{ "HM3: Hierarchical Multi-Objective Model Merging for Pretrained Models": { "paper_title": "HM3: Hierarchical Multi-Objective Model Merging for Pretrained Models", "arxiv_id": "2409.18893v1", "keyword": "reinforcement learning", "authors": [ "Yu Zhou", "Xingyu Wu", "Jibin Wu", "Liang Feng", "Kay Chen Tan" ], "references": [ { "title": "DELLA-Merging: Reducing Interference in Model Merging through Magnitude-Based Sampling", "abstract": "With the proliferation of domain-specific models, model merging has emerged as a set of techniques that combine the capabilities of multiple models into one that can multitask without the cost of additional training. In this paper, we propose a new model merging technique, Drop and rEscaLe via sampLing with mAgnitude (DELLA-Merging), that employs a novel pruning technique, MAGPRUNE, which shows significant advantages over DARE and TIES. MAGPRUNE first ranks the parameters in order of their magnitude and assigns higher dropout probabilities (p) to parameters with lower ranks corresponding to lower magnitudes. To approximate the original embeddings, MAGPRUNE employs a rescaling operation on the parameters that survive the random dropping by 1/(1 - p). On three different expert models considered for merging (LM, Math, Code) and corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA shows an average improvement of 2.4 points over baseline methods employing delta parameter pruning (an improvement of 3.6 points over TIES, 1.2 points over DARE), and 11.1 points over the no-pruning baseline (TA). We release the source code at: https://github.com/declare-lab/della.", "year": 2024, "venue": "arXiv.org", "authors": [ "Pala Tej Deep", "Rishabh Bhardwaj", "Soujanya Poria" ], "externalIds": { "DBLP": "journals/corr/abs-2406-11617", "ArXiv": "2406.11617", "DOI": "10.48550/arXiv.2406.11617", "CorpusId": 270560921 }, "url": "https://www.semanticscholar.org/paper/90ba2852b80d440a5528f869515b6df6fe38fe36", "referenceCount": 34, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Twin-Merging: Dynamic Integration of Modular Expertise in Model Merging", "abstract": "In the era of large language models, model merging is a promising way to combine multiple task-specific models into a single multitask model without extra training. However, two challenges remain: (a) interference between different models and (b) heterogeneous data during testing. Traditional model merging methods often show significant performance gaps compared to fine-tuned models due to these issues. Additionally, a one-size-fits-all model lacks flexibility for diverse test data, leading to performance degradation. We show that both shared and exclusive task-specific knowledge are crucial for merging performance, but directly merging exclusive knowledge hinders overall performance. In view of this, we propose Twin-Merging, a method that encompasses two principal stages: (1) modularizing knowledge into shared and exclusive components, with compression to reduce redundancy and enhance efficiency; (2) dynamically merging shared and task-specific knowledge based on the input. This approach narrows the performance gap between merged and fine-tuned models and improves adaptability to heterogeneous data. Extensive experiments on $12$ datasets for both discriminative and generative tasks demonstrate the effectiveness of our method, showing an average improvement of $28.34\\%$ in absolute normalized score for discriminative tasks and even surpassing the fine-tuned upper bound on the generative tasks. (Our implementation is available in https://github.com/LZY-the-boys/Twin-Mergin.)", "year": 2024, "venue": "arXiv.org", "authors": [ "Zhenyi Lu", "Chenghao Fan", "Wei Wei", "Xiaoye Qu", "Dangyang Chen", "Yu Cheng" ], "externalIds": { "DBLP": "journals/corr/abs-2406-15479", "ArXiv": "2406.15479", "DOI": "10.48550/arXiv.2406.15479", "CorpusId": 270702345 }, "url": "https://www.semanticscholar.org/paper/84f4ed2029085d2e009e6f262537c4201738737e", "referenceCount": 51, "citationCount": 5, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Open-LLM-Leaderboard: From Multi-choice to Open-style Questions for LLMs Evaluation, Benchmark, and Arena", "abstract": "Multiple-choice questions (MCQ) are frequently used to assess large language models (LLMs). Typically, an LLM is given a question and selects the answer deemed most probable after adjustments for factors like length. Unfortunately, LLMs may inherently favor certain answer choice IDs, such as A/B/C/D, due to inherent biases of priori unbalanced probabilities, influencing the prediction of answers based on these IDs. Previous research has introduced methods to reduce this ''selection bias'' by simply permutating options on a few test samples and applying to new ones. Another problem of MCQ is the lottery ticket choice by ''random guessing''. The LLM does not learn particular knowledge, but the option is guessed correctly. This situation is especially serious for those small-scale LLMs. To address them, a more thorough approach involves shifting from MCQ to open-style questions, which can fundamentally eliminate selection bias and random guessing issues. However, transitioning causes its own set of challenges in (1) identifying suitable open-style questions and (2) validating the correctness of LLM open-style responses against human-annotated ground-truths. This work aims to tackle these significant difficulties, and establish a new LLM evaluation benchmark through entirely open-style questions. Consequently, we introduce the Open-LLM-Leaderboard to track various LLMs' performance and reflect true capability of them, such as GPT-4o/4/3.5, Claude 3, Gemini, etc. Our code and dataset are available at https://github.com/VILA-Lab/Open-LLM-Leaderboard.", "year": 2024, "venue": "arXiv.org", "authors": [ "Aidar Myrzakhan", "S. Mahmoud Bsharat", "Zhiqiang Shen" ], "externalIds": { "ArXiv": "2406.07545", "DBLP": "journals/corr/abs-2406-07545", "DOI": "10.48550/arXiv.2406.07545", "CorpusId": 270379906 }, "url": "https://www.semanticscholar.org/paper/9b8c2f2507c3aaf4edd450116d3c19573aafc4c5", "referenceCount": 41, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MAP: Low-compute Model Merging with Amortized Pareto Fronts via Quadratic Approximation", "abstract": "Model merging has emerged as an effective approach to combine multiple single-task models, fine-tuned from the same pre-trained model, into a multitask model. This process typically involves computing a weighted average of the model parameters without any additional training. Existing model-merging methods focus on enhancing average task accuracy. However, interference and conflicts between the objectives of different tasks can lead to trade-offs during model merging. In real-world applications, a set of solutions with various trade-offs can be more informative, helping practitioners make decisions based on diverse preferences. In this paper, we introduce a novel low-compute algorithm, Model Merging with Amortized Pareto Front (MAP). MAP identifies a Pareto set of scaling coefficients for merging multiple models to reflect the trade-offs. The core component of MAP is approximating the evaluation metrics of the various tasks using a quadratic approximation surrogate model derived from a pre-selected set of scaling coefficients, enabling amortized inference. Experimental results on vision and natural language processing tasks show that MAP can accurately identify the Pareto front. To further reduce the required computation of MAP, we propose (1) a Bayesian adaptive sampling algorithm and (2) a nested merging scheme with multiple stages.", "year": 2024, "venue": "arXiv.org", "authors": [ "Lu Li", "Tianyu Zhang", "Zhiqi Bu", "Suyuchen Wang", "Huan He", "Jie Fu", "Yonghui Wu", "Jiang Bian", "Yong Chen", "Y. Bengio" ], "externalIds": { "ArXiv": "2406.07529", "DBLP": "journals/corr/abs-2406-07529", "DOI": "10.48550/arXiv.2406.07529", "CorpusId": 270379651 }, "url": "https://www.semanticscholar.org/paper/c51c550cc3dbac1222799a7397bf4d26daf35a7a", "referenceCount": 72, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "EMR-Merging: Tuning-Free High-Performance Model Merging", "abstract": "The success of pretrain-finetune paradigm brings about the release of numerous model weights. In this case, merging models finetuned on different tasks to enable a single model with multi-task capabilities is gaining increasing attention for its practicability. Existing model merging methods usually suffer from (1) significant performance degradation or (2) requiring tuning by additional data or training. In this paper, we rethink and analyze the existing model merging paradigm. We discover that using a single model's weights can hardly simulate all the models' performance. To tackle this issue, we propose Elect, Mask&Rescale-Merging (EMR-Merging). We first (a) elect a unified model from all the model weights and then (b) generate extremely lightweight task-specific modulators, including masks and rescalers, to align the direction and magnitude between the unified model and each specific model, respectively. EMR-Merging is tuning-free, thus requiring no data availability or any additional training while showing impressive performance. We find that EMR-Merging shows outstanding performance compared to existing merging methods under different classical and newly-established settings, including merging different numbers of vision models (up to 30), NLP models, PEFT models, and multi-modal models.", "year": 2024, "venue": "arXiv.org", "authors": [ "Chenyu Huang", "Peng Ye", "Tao Chen", "Tong He", "Xiangyu Yue", "Wanli Ouyang" ], "externalIds": { "ArXiv": "2405.17461", "DBLP": "journals/corr/abs-2405-17461", "DOI": "10.48550/arXiv.2405.17461", "CorpusId": 270067773 }, "url": "https://www.semanticscholar.org/paper/a26013ad73804f983ffcea44a6ead683400012fb", "referenceCount": 90, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Have You Merged My Model? On The Robustness of Large Language Model IP Protection Methods Against Model Merging", "abstract": "Model merging is a promising lightweight model empowerment technique that does not rely on expensive computing devices (e.g., GPUs) or require the collection of specific training data. Instead, it involves editing different upstream model parameters to absorb their downstream task capabilities. However, uncertified model merging can infringe upon the Intellectual Property (IP) rights of the original upstream models. In this paper, we conduct the first study on the robustness of IP protection methods in model merging scenarios. We investigate two state-of-the-art IP protection techniques: Quantization Watermarking and Instructional Fingerprint, along with various advanced model merging technologies, such as Task Arithmetic, TIES-MERGING, and so on. Experimental results indicate that current Large Language Model (LLM) watermarking techniques cannot survive in the merged models, whereas model fingerprinting techniques can. Our research aims to highlight that model merging should be an indispensable consideration in the robustness assessment of model IP protection techniques, thereby promoting the healthy development of the open-source LLM community.", "year": 2024, "venue": "arXiv.org", "authors": [ "Tianshuo Cong", "Delong Ran", "Zesen Liu", "Xinlei He", "Jinyuan Liu", "Yichen Gong", "Qi Li", "Anyu Wang", "Xiaoyun Wang" ], "externalIds": { "ArXiv": "2404.05188", "DBLP": "journals/corr/abs-2404-05188", "DOI": "10.48550/arXiv.2404.05188", "CorpusId": 269005388 }, "url": "https://www.semanticscholar.org/paper/def1312b504778a26736c3dc5d7405bf634e7929", "referenceCount": 24, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Model Stock: All we need is just a few fine-tuned models", "abstract": "This paper introduces an efficient fine-tuning method for large pre-trained models, offering strong in-distribution (ID) and out-of-distribution (OOD) performance. Breaking away from traditional practices that need a multitude of fine-tuned models for averaging, our approach employs significantly fewer models to achieve final weights yet yield superior accuracy. Drawing from key insights in the weight space of fine-tuned weights, we uncover a strong link between the performance and proximity to the center of weight space. Based on this, we introduce a method that approximates a center-close weight using only two fine-tuned models, applicable during or after training. Our innovative layer-wise weight averaging technique surpasses state-of-the-art model methods such as Model Soup, utilizing only two fine-tuned models. This strategy can be aptly coined Model Stock, highlighting its reliance on selecting a minimal number of models to draw a more optimized-averaged model. We demonstrate the efficacy of Model Stock with fine-tuned models based upon pre-trained CLIP architectures, achieving remarkable performance on both ID and OOD tasks on the standard benchmarks, all while barely bringing extra computational demands. Our code and pre-trained models are available at https://github.com/naver-ai/model-stock.", "year": 2024, "venue": "arXiv.org", "authors": [ "Dong-Hwan Jang", "Sangdoo Yun", "Dongyoon Han" ], "externalIds": { "DBLP": "journals/corr/abs-2403-19522", "ArXiv": "2403.19522", "DOI": "10.48550/arXiv.2403.19522", "CorpusId": 268733341 }, "url": "https://www.semanticscholar.org/paper/04dcc984785f7c7b201acad6507c3fd06e552990", "referenceCount": 35, "citationCount": 10, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Evolutionary Optimization of Model Merging Recipes", "abstract": "We present a novel application of evolutionary algorithms to automate the creation of powerful foundation models. While model merging has emerged as a promising approach for LLM development due to its cost-effectiveness, it currently relies on human intuition and domain knowledge, limiting its potential. Here, we propose an evolutionary approach that overcomes this limitation by automatically discovering effective combinations of diverse open-source models, harnessing their collective intelligence without requiring extensive additional training data or compute. Our approach operates in both parameter space and data flow space, allowing for optimization beyond just the weights of the individual models. This approach even facilitates cross-domain merging, generating models like a Japanese LLM with Math reasoning capabilities. Surprisingly, our Japanese Math LLM achieved state-of-the-art performance on a variety of established Japanese LLM benchmarks, even surpassing models with significantly more parameters, despite not being explicitly trained for such tasks. Furthermore, a culturally-aware Japanese VLM generated through our approach demonstrates its effectiveness in describing Japanese culture-specific content, outperforming previous Japanese VLMs. This work not only contributes new state-of-the-art models back to the open-source community, but also introduces a new paradigm for automated model composition, paving the way for exploring alternative, efficient approaches to foundation model development.", "year": 2024, "venue": "arXiv.org", "authors": [ "Takuya Akiba", "Makoto Shing", "Yujin Tang", "Qi Sun", "David Ha" ], "externalIds": { "DBLP": "journals/corr/abs-2403-13187", "ArXiv": "2403.13187", "DOI": "10.48550/arXiv.2403.13187", "CorpusId": 268537290 }, "url": "https://www.semanticscholar.org/paper/828f98e0feba2baa55a5486f354fd074cca0880c", "referenceCount": 43, "citationCount": 29, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Large Language Models \"Ad Referendum\": How Good Are They at Machine Translation in the Legal Domain?", "abstract": "This study evaluates the machine translation (MT) quality of two state-of-the-art large language models (LLMs) against a traditional neural machine translation (NMT) system across four language pairs in the legal domain. It combines automatic evaluation metrics (AEMs) and human evaluation (HE) by professional translators to assess translation ranking, fluency and adequacy. The results indicate that while Google Translate generally outperforms LLMs in AEMs, human evaluators rate LLMs, especially GPT-4, comparably or slightly better in terms of producing contextually adequate and fluent translations. This discrepancy suggests LLMs' potential in handling specialized legal terminology and context, highlighting the importance of human evaluation methods in assessing MT quality. The study underscores the evolving capabilities of LLMs in specialized domains and calls for reevaluation of traditional AEMs to better capture the nuances of LLM-generated translations.", "year": 2024, "venue": "MonTI : monographs in translation and interpreting", "authors": [ "Vicent Briva-Iglesias", "Joao Lucas Cavalheiro Camargo", "Gokhan Dogru" ], "externalIds": { "ArXiv": "2402.07681", "DBLP": "journals/corr/abs-2402-07681", "DOI": "10.48550/arXiv.2402.07681", "CorpusId": 267627712 }, "url": "https://www.semanticscholar.org/paper/67fc653f060fa2de78d1801f6fcc539f5bcb3308", "referenceCount": 62, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Evolutionary Computation in the Era of Large Language Model: Survey and Roadmap", "abstract": "Large language models (LLMs) have not only revolutionized natural language processing but also extended their prowess to various domains, marking a significant stride towards artificial general intelligence. The interplay between LLMs and evolutionary algorithms (EAs), despite differing in objectives and methodologies, share a common pursuit of applicability in complex problems. Meanwhile, EA can provide an optimization framework for LLM's further enhancement under black-box settings, empowering LLM with flexible global search capacities. On the other hand, the abundant domain knowledge inherent in LLMs could enable EA to conduct more intelligent searches. Furthermore, the text processing and generative capabilities of LLMs would aid in deploying EAs across a wide range of tasks. Based on these complementary advantages, this paper provides a thorough review and a forward-looking roadmap, categorizing the reciprocal inspiration into two main avenues: LLM-enhanced EA and EA-enhanced LLM. Some integrated synergy methods are further introduced to exemplify the complementarity between LLMs and EAs in diverse scenarios, including code generation, software engineering, neural architecture search, and various generation tasks. As the first comprehensive review focused on the EA research in the era of LLMs, this paper provides a foundational stepping stone for understanding the collaborative potential of LLMs and EAs. The identified challenges and future directions offer guidance for researchers and practitioners to unlock the full potential of this innovative collaboration in propelling advancements in optimization and artificial intelligence. We have created a GitHub repository to index the relevant papers: https://github.com/wuxingyu-ai/LLM4EC.", "year": 2024, "venue": "arXiv.org", "authors": [ "Xingyu Wu", "Sheng-hao Wu", "Jibin Wu", "Liang Feng", "Kay Chen Tan" ], "externalIds": { "DBLP": "journals/corr/abs-2401-10034", "ArXiv": "2401.10034", "DOI": "10.48550/arXiv.2401.10034", "CorpusId": 267035000 }, "url": "https://www.semanticscholar.org/paper/478f71f1cd9bad0435560544a9dce7ca49d97766", "referenceCount": 184, "citationCount": 31, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "PPO-Clip Attains Global Optimality: Towards Deeper Understandings of Clipping", "abstract": "Proximal Policy Optimization algorithm employing a clipped surrogate objective (PPO-Clip) is a prominent exemplar of the policy optimization methods. However, despite its remarkable empirical success, PPO-Clip lacks theoretical substantiation to date. In this paper, we contribute to the field by establishing the first global convergence results of a PPO-Clip variant in both tabular and neural function approximation settings. Our findings highlight the O(1/√T ) min-iterate convergence rate specifically in the context of neural function approximation. We tackle the inherent challenges in analyzing PPO-Clip through three central concepts: (i) We introduce a generalized version of the PPO-Clip objective, illuminated by its connection with the hinge loss. (ii) Employing entropic mirror descent, we establish asymptotic convergence for tabular PPO-Clip with direct policy parameterization. (iii) Inspired by the tabular analysis, we streamline convergence analysis by introducing a two-step policy improvement approach. This decouples policy search from complex neural policy parameterization using a regression-based update scheme. Furthermore, we gain deeper insights into the efficacy of PPO-Clip by interpreting these generalized objectives. Our theoretical findings also mark the first characterization of the influence of the clipping mechanism on PPO-Clip convergence. Importantly, the clipping range affects only the\npre-constant of the convergence rate.", "year": 2023, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Nai-Chieh Huang", "Ping-Chun Hsieh", "Kuo-Hao Ho", "I-Chen Wu" ], "externalIds": { "DBLP": "conf/aaai/HuangHHW24", "ArXiv": "2312.12065", "DOI": "10.48550/arXiv.2312.12065", "CorpusId": 266362674 }, "url": "https://www.semanticscholar.org/paper/daa625386ce9aa6039556d77cb28942bb17898e4", "referenceCount": 33, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Model Breadcrumbs: Scaling Multi-Task Model Merging with Sparse Masks", "abstract": "The rapid development of AI systems has been greatly influenced by the emergence of foundation models. A common approach for targeted problems involves fine-tuning these pre-trained foundation models for specific target tasks, resulting in a rapid spread of models fine-tuned across a diverse array of tasks. This work focuses on the problem of merging multiple fine-tunings of the same foundation model derived from a spectrum of auxiliary tasks. We introduce a new simple method, Model Breadcrumbs, which consists of a sparsely defined weight set that guides model adaptation within the weight space of a pre-trained model. These breadcrumbs are constructed by subtracting the weights from a pre-trained model before and after fine-tuning, followed by a sparsification process that eliminates weight outliers and negligible perturbations. Our experiments demonstrate the effectiveness of Model Breadcrumbs to simultaneously improve performance across multiple tasks. This contribution aligns with the evolving paradigm of updatable machine learning, reminiscent of the collaborative principles underlying open-source software development, fostering a community-driven effort to reliably update machine learning models. Our method is shown to be more efficient and unlike previous proposals does not require hyperparameter tuning for each new task added. Through extensive experimentation involving various models, tasks, and modalities we establish that integrating Model Breadcrumbs offers a simple, efficient, and highly effective approach for constructing multi-task models and facilitating updates to foundation models.", "year": 2023, "venue": "arXiv.org", "authors": [ "Mohammad-Javad Davari", "Eugene Belilovsky" ], "externalIds": { "DBLP": "journals/corr/abs-2312-06795", "ArXiv": "2312.06795", "DOI": "10.48550/arXiv.2312.06795", "CorpusId": 266174505 }, "url": "https://www.semanticscholar.org/paper/9a9cb8f557d381b7959510caee5f73dd3245db49", "referenceCount": 60, "citationCount": 14, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Language Models are Super Mario: Absorbing Abilities from Homologous Models as a Free Lunch", "abstract": "In this paper, we unveil that Language Models (LMs) can acquire new capabilities by assimilating parameters from homologous models without retraining or GPUs. We first introduce DARE to set most delta parameters (i.e., the disparity between fine-tuned and pre-trained parameters) to zeros without affecting the abilities of Supervised Fine-Tuning (SFT) LMs, which randomly Drops delta parameters with a ratio $p$ And REscales the remaining ones by $1 / (1 - p)$ to approximate the original embeddings. Then, we use DARE as a versatile plug-in to sparsify delta parameters of multiple SFT homologous models for mitigating parameter interference and merge them into a single model by parameter fusing. We experiment with encoder- and decoder-based LMs, showing that: (1) SFT delta parameter value ranges are typically small (within 0.002) with extreme redundancy, and DARE can effortlessly eliminate 90% or even 99% of them; (2) DARE can merge multiple task-specific LMs into one LM with diverse capabilities. Notably, this phenomenon is more pronounced in large-scale LMs, where the merged LM reveals the potential to surpass the performance of any source LM, providing a new discovery. We also utilize DARE to create a merged LM that ranks first among models with 7 billion parameters on the Open LLM Leaderboard.", "year": 2023, "venue": "International Conference on Machine Learning", "authors": [ "Le Yu", "Yu Bowen", "Haiyang Yu", "Fei Huang", "Yongbin Li" ], "externalIds": { "DBLP": "conf/icml/Yu0Y0L24", "ArXiv": "2311.03099", "DOI": "10.48550/arXiv.2311.03099", "CorpusId": 265034087 }, "url": "https://www.semanticscholar.org/paper/c0230760f644f6b7538d93e4296a5e9aa7028e45", "referenceCount": 75, "citationCount": 100, "influentialCitationCount": 22, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Code Llama: Open Foundation Models for Code", "abstract": "We release Code Llama, a family of large language models for code based on Llama 2 providing state-of-the-art performance among open models, infilling capabilities, support for large input contexts, and zero-shot instruction following ability for programming tasks. We provide multiple flavors to cover a wide range of applications: foundation models (Code Llama), Python specializations (Code Llama - Python), and instruction-following models (Code Llama - Instruct) with 7B, 13B, 34B and 70B parameters each. All models are trained on sequences of 16k tokens and show improvements on inputs with up to 100k tokens. 7B, 13B and 70B Code Llama and Code Llama - Instruct variants support infilling based on surrounding content. Code Llama reaches state-of-the-art performance among open models on several code benchmarks, with scores of up to 67% and 65% on HumanEval and MBPP, respectively. Notably, Code Llama - Python 7B outperforms Llama 2 70B on HumanEval and MBPP, and all our models outperform every other publicly available model on MultiPL-E. We release Code Llama under a permissive license that allows for both research and commercial use.", "year": 2023, "venue": "arXiv.org", "authors": [ "Baptiste Rozière", "Jonas Gehring", "Fabian Gloeckle", "Sten Sootla", "Itai Gat", "Xiaoqing Tan", "Yossi Adi", "Jingyu Liu", "Tal Remez", "J. Rapin", "Artyom Kozhevnikov", "I. Evtimov", "Joanna Bitton", "Manish P Bhatt", "Cristian Cantón Ferrer", "Aaron Grattafiori", "Wenhan Xiong", "Alexandre D'efossez", "Jade Copet", "F. Azhar", "Hugo Touvron", "Louis Martin", "Nicolas Usunier", "Thomas Scialom", "Gabriel Synnaeve" ], "externalIds": { "DBLP": "journals/corr/abs-2308-12950", "ArXiv": "2308.12950", "DOI": "10.48550/arXiv.2308.12950", "CorpusId": 261100919 }, "url": "https://www.semanticscholar.org/paper/0b0debb710366cdff461938c80763eace1651af6", "referenceCount": 92, "citationCount": 1137, "influentialCitationCount": 161, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "WizardMath: Empowering Mathematical Reasoning for Large Language Models via Reinforced Evol-Instruct", "abstract": "Large language models (LLMs), such as GPT-4, have shown remarkable performance in natural language processing (NLP) tasks, including challenging mathematical reasoning. However, most existing open-source models are only pre-trained on large-scale internet data and without math-related optimization. In this paper, we present WizardMath, which enhances the mathematical reasoning abilities of Llama-2, by applying our proposed Reinforcement Learning from Evol-Instruct Feedback (RLEIF) method to the domain of math. Through extensive experiments on two mathematical reasoning benchmarks, namely GSM8k and MATH, we reveal the extraordinary capabilities of our model. WizardMath surpasses all other open-source LLMs by a substantial margin. Furthermore, our model even outperforms ChatGPT-3.5, Claude Instant-1, PaLM-2 and Minerva on GSM8k, simultaneously surpasses Text-davinci-002, PaLM-1 and GPT-3 on MATH. More details and model weights are public at https://github.com/nlpxucan/WizardLM and https://huggingface.co/WizardLM.", "year": 2023, "venue": "arXiv.org", "authors": [ "Haipeng Luo", "Qingfeng Sun", "Can Xu", "Pu Zhao", "Jian-Guang Lou", "Chongyang Tao", "Xiubo Geng", "Qingwei Lin", "Shifeng Chen", "Dongmei Zhang" ], "externalIds": { "ArXiv": "2308.09583", "DBLP": "journals/corr/abs-2308-09583", "DOI": "10.48550/arXiv.2308.09583", "CorpusId": 261030818 }, "url": "https://www.semanticscholar.org/paper/dd18782960f9ee4c66b79e1518b342ad3f8d19e7", "referenceCount": 107, "citationCount": 250, "influentialCitationCount": 47, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Llama 2: Open Foundation and Fine-Tuned Chat Models", "abstract": "In this work, we develop and release Llama 2, a collection of pretrained and fine-tuned large language models (LLMs) ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama 2-Chat, are optimized for dialogue use cases. Our models outperform open-source chat models on most benchmarks we tested, and based on our human evaluations for helpfulness and safety, may be a suitable substitute for closed-source models. We provide a detailed description of our approach to fine-tuning and safety improvements of Llama 2-Chat in order to enable the community to build on our work and contribute to the responsible development of LLMs.", "year": 2023, "venue": "arXiv.org", "authors": [ "Hugo Touvron", "Louis Martin", "Kevin R. Stone", "Peter Albert", "Amjad Almahairi", "Yasmine Babaei", "Nikolay Bashlykov", "Soumya Batra", "Prajjwal Bhargava", "Shruti Bhosale", "D. Bikel", "Lukas Blecher", "Cristian Cantón Ferrer", "Moya Chen", "Guillem Cucurull", "David Esiobu", "Jude Fernandes", "Jeremy Fu", "Wenyin Fu", "Brian Fuller", "Cynthia Gao", "Vedanuj Goswami", "Naman Goyal", "A. Hartshorn", "Saghar Hosseini", "Rui Hou", "Hakan Inan", "Marcin Kardas", "Viktor Kerkez", "Madian Khabsa", "Isabel M. Kloumann", "A. Korenev", "Punit Singh Koura", "Marie-Anne Lachaux", "Thibaut Lavril", "Jenya Lee", "Diana Liskovich", "Yinghai Lu", "Yuning Mao", "Xavier Martinet", "Todor Mihaylov", "Pushkar Mishra", "Igor Molybog", "Yixin Nie", "Andrew Poulton", "Jeremy Reizenstein", "Rashi Rungta", "Kalyan Saladi", "Alan Schelten", "Ruan Silva", "Eric Michael Smith", "R. Subramanian", "Xia Tan", "Binh Tang", "Ross Taylor", "Adina Williams", "Jian Xiang Kuan", "Puxin Xu", "Zhengxu Yan", "Iliyan Zarov", "Yuchen Zhang", "Angela Fan", "Melanie Kambadur", "Sharan Narang", "Aurelien Rodriguez", "Robert Stojnic", "Sergey Edunov", "Thomas Scialom" ], "externalIds": { "ArXiv": "2307.09288", "DBLP": "journals/corr/abs-2307-09288", "CorpusId": 259950998 }, "url": "https://www.semanticscholar.org/paper/104b0bb1da562d53cbda87aec79ef6a2827d191a", "referenceCount": 131, "citationCount": 7142, "influentialCitationCount": 1096, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "TIES-Merging: Resolving Interference When Merging Models", "abstract": "Transfer learning - i.e., further fine-tuning a pre-trained model on a downstream task - can confer significant advantages, including improved downstream performance, faster convergence, and better sample efficiency. These advantages have led to a proliferation of task-specific fine-tuned models, which typically can only perform a single task and do not benefit from one another. Recently, model merging techniques have emerged as a solution to combine multiple task-specific models into a single multitask model without performing additional training. However, existing merging methods often ignore the interference between parameters of different models, resulting in large performance drops when merging multiple models. In this paper, we demonstrate that prior merging techniques inadvertently lose valuable information due to two major sources of interference: (a) interference due to redundant parameter values and (b) disagreement on the sign of a given parameter's values across models. To address this, we propose our method, TRIM, ELECT SIGN&MERGE (TIES-Merging), which introduces three novel steps when merging models: (1) resetting parameters that only changed a small amount during fine-tuning, (2) resolving sign conflicts, and (3) merging only the parameters that are in alignment with the final agreed-upon sign. We find that TIES-Merging outperforms several existing methods in diverse settings covering a range of modalities, domains, number of tasks, model sizes, architectures, and fine-tuning settings. We further analyze the impact of different types of interference on model parameters, and highlight the importance of resolving sign interference. Our code is available at https://github.com/prateeky2806/ties-merging", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Prateek Yadav", "Derek Tam", "Leshem Choshen", "Colin Raffel", "Mohit Bansal" ], "externalIds": { "DBLP": "conf/nips/YadavTCRB23", "ArXiv": "2306.01708", "CorpusId": 259064039 }, "url": "https://www.semanticscholar.org/paper/2651f0179874bd010f58d2c9fa7d118807c80977", "referenceCount": 90, "citationCount": 87, "influentialCitationCount": 25, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "VisionLLM: Large Language Model is also an Open-Ended Decoder for Vision-Centric Tasks", "abstract": "Large language models (LLMs) have notably accelerated progress towards artificial general intelligence (AGI), with their impressive zero-shot capacity for user-tailored tasks, endowing them with immense potential across a range of applications. However, in the field of computer vision, despite the availability of numerous powerful vision foundation models (VFMs), they are still restricted to tasks in a pre-defined form, struggling to match the open-ended task capabilities of LLMs. In this work, we present an LLM-based framework for vision-centric tasks, termed VisionLLM. This framework provides a unified perspective for vision and language tasks by treating images as a foreign language and aligning vision-centric tasks with language tasks that can be flexibly defined and managed using language instructions. An LLM-based decoder can then make appropriate predictions based on these instructions for open-ended tasks. Extensive experiments show that the proposed VisionLLM can achieve different levels of task customization through language instructions, from fine-grained object-level to coarse-grained task-level customization, all with good results. It's noteworthy that, with a generalist LLM-based framework, our model can achieve over 60\\% mAP on COCO, on par with detection-specific models. We hope this model can set a new baseline for generalist vision and language models. The demo shall be released based on https://github.com/OpenGVLab/InternGPT. The code shall be released at https://github.com/OpenGVLab/VisionLLM.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Wen Wang", "Zhe Chen", "Xiaokang Chen", "Jiannan Wu", "Xizhou Zhu", "Gang Zeng", "Ping Luo", "Tong Lu", "Jie Zhou", "Y. Qiao", "Jifeng Dai" ], "externalIds": { "ArXiv": "2305.11175", "DBLP": "conf/nips/WangCCWZZLLZQD23", "DOI": "10.48550/arXiv.2305.11175", "CorpusId": 258762579 }, "url": "https://www.semanticscholar.org/paper/42a30dc5470f54ec249f25d3c31e05d7c376c8e3", "referenceCount": 81, "citationCount": 295, "influentialCitationCount": 15, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Patching open-vocabulary models by interpolating weights", "abstract": "Open-vocabulary models like CLIP achieve high accuracy across many image classification tasks. However, there are still settings where their zero-shot performance is far from optimal. We study model patching, where the goal is to improve accuracy on specific tasks without degrading accuracy on tasks where performance is already adequate. Towards this goal, we introduce PAINT, a patching method that uses interpolations between the weights of a model before fine-tuning and the weights after fine-tuning on a task to be patched. On nine tasks where zero-shot CLIP performs poorly, PAINT increases accuracy by 15 to 60 percentage points while preserving accuracy on ImageNet within one percentage point of the zero-shot model. PAINT also allows a single model to be patched on multiple tasks and improves with model scale. Furthermore, we identify cases of broad transfer, where patching on one task increases accuracy on other tasks even when the tasks have disjoint classes. Finally, we investigate applications beyond common benchmarks such as counting or reducing the impact of typographic attacks on CLIP. Our findings demonstrate that it is possible to expand the set of tasks on which open-vocabulary models achieve high accuracy without re-training them from scratch.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Gabriel Ilharco", "Mitchell Wortsman", "S. Gadre", "Shuran Song", "Hannaneh Hajishirzi", "Simon Kornblith", "Ali Farhadi", "Ludwig Schmidt" ], "externalIds": { "DBLP": "journals/corr/abs-2208-05592", "ArXiv": "2208.05592", "DOI": "10.48550/arXiv.2208.05592", "CorpusId": 251493208 }, "url": "https://www.semanticscholar.org/paper/65f056d32dac701240a52a5daf8cedb611b04ceb", "referenceCount": 97, "citationCount": 118, "influentialCitationCount": 20, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Survey on Learnable Evolutionary Algorithms for Scalable Multiobjective Optimization", "abstract": "Recent decades have witnessed great advancements in multiobjective evolutionary algorithms (MOEAs) for multiobjective optimization problems (MOPs). However, these progressively improved MOEAs have not necessarily been equipped with scalable and learnable problem-solving strategies for new and grand challenges brought by the scaling-up MOPs with continuously increasing complexity from diverse aspects, mainly, including expensive cost of function evaluations, many objectives, large-scale search space, time-varying environments, and multitask. Under different scenarios, divergent thinking is required in designing new powerful MOEAs for solving them effectively. In this context, research studies on learnable MOEAs with machine learning techniques have received extensive attention in the field of evolutionary computation. This article begins with a general taxonomy of scaling-up MOPs and learnable MOEAs, followed by an analysis of the challenges that these MOPs pose to traditional MOEAs. Then, we synthetically overview recent advances of learnable MOEAs in solving various scaling-up MOPs, focusing primarily on four attractive directions (i.e., learnable evolutionary discriminators for environmental selection, learnable evolutionary generators for reproduction, learnable evolutionary evaluators for function evaluations, and learnable evolutionary transfer modules for sharing or reusing optimization experience). The insight of learnable MOEAs is offered to readers as a reference to the general track of the efforts in this field.", "year": 2022, "venue": "IEEE Transactions on Evolutionary Computation", "authors": [ "Songbai Liu", "Qiuzhen Lin", "Jianqiang Li", "K. Tan" ], "externalIds": { "ArXiv": "2206.11526", "DBLP": "journals/tec/LiuLLT23", "DOI": "10.1109/TEVC.2023.3250350", "CorpusId": 249953938 }, "url": "https://www.semanticscholar.org/paper/00c4e1abbc489cfeee9e3ec55fa23ad8e9fd06db", "referenceCount": 320, "citationCount": 31, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Model soups: averaging weights of multiple fine-tuned models improves accuracy without increasing inference time", "abstract": "The conventional recipe for maximizing model accuracy is to (1) train multiple models with various hyperparameters and (2) pick the individual model which performs best on a held-out validation set, discarding the remainder. In this paper, we revisit the second step of this procedure in the context of fine-tuning large pre-trained models, where fine-tuned models often appear to lie in a single low error basin. We show that averaging the weights of multiple models fine-tuned with different hyperparameter configurations often improves accuracy and robustness. Unlike a conventional ensemble, we may average many models without incurring any additional inference or memory costs -- we call the results\"model soups.\"When fine-tuning large pre-trained models such as CLIP, ALIGN, and a ViT-G pre-trained on JFT, our soup recipe provides significant improvements over the best model in a hyperparameter sweep on ImageNet. The resulting ViT-G model, which attains 90.94% top-1 accuracy on ImageNet, achieved a new state of the art. Furthermore, we show that the model soup approach extends to multiple image classification and natural language processing tasks, improves out-of-distribution performance, and improves zero-shot performance on new downstream tasks. Finally, we analytically relate the performance similarity of weight-averaging and logit-ensembling to flatness of the loss and confidence of the predictions, and validate this relation empirically. Code is available at https://github.com/mlfoundations/model-soups.", "year": 2022, "venue": "International Conference on Machine Learning", "authors": [ "Mitchell Wortsman", "Gabriel Ilharco", "S. Gadre", "R. Roelofs", "Raphael Gontijo-Lopes", "Ari S. Morcos", "Hongseok Namkoong", "Ali Farhadi", "Y. Carmon", "Simon Kornblith", "Ludwig Schmidt" ], "externalIds": { "DBLP": "conf/icml/WortsmanIGRLMNF22", "ArXiv": "2203.05482", "DOI": "10.48550/arXiv.2203.05482", "CorpusId": 247362886 }, "url": "https://www.semanticscholar.org/paper/54020e5fe48ebb250f27d744e20a63cac2988a84", "referenceCount": 119, "citationCount": 638, "influentialCitationCount": 87, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Merging Models with Fisher-Weighted Averaging", "abstract": "Averaging the parameters of models that have the same architecture and initialization can provide a means of combining their respective capabilities. In this paper, we take the perspective that this\"merging\"operation can be seen as choosing parameters that approximately maximize the joint likelihood of the posteriors of the models' parameters. Computing a simple average of the models' parameters therefore corresponds to making an isotropic Gaussian approximation to their posteriors. We develop an alternative merging procedure based on the Laplace approximation where we approximate each model's posterior as a Gaussian distribution whose precision matrix corresponds to its Fisher information. We first show that our\"Fisher merging\"technique provides a performance boost in settings where simple parameter averaging is currently used -- specifically, robust fine-tuning and model ensembling. Then, we compare merging to standard gradient-based transfer learning and demonstrate that merging enables a fundamentally different method for transferring capabilities across models. Specifically, we show that Fisher merging is competitive with gradient-based transfer learning approaches (while being significantly cheaper) in intermediate-task training and domain-adaptive pre-training. We also show that our merging procedure makes it possible to combine models in previously unexplored ways. We release our code to facilitate future research into methods for merging models.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Michael Matena", "Colin Raffel" ], "externalIds": { "ArXiv": "2111.09832", "DBLP": "journals/corr/abs-2111-09832", "CorpusId": 244345933 }, "url": "https://www.semanticscholar.org/paper/06b20a1c6883464fcb2855adc146874fe7937c41", "referenceCount": 73, "citationCount": 206, "influentialCitationCount": 32, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Training Verifiers to Solve Math Word Problems", "abstract": "State-of-the-art language models can match human performance on many tasks, but they still struggle to robustly perform multi-step mathematical reasoning. To diagnose the failures of current models and support research, we introduce GSM8K, a dataset of 8.5K high quality linguistically diverse grade school math word problems. We find that even the largest transformer models fail to achieve high test performance, despite the conceptual simplicity of this problem distribution. To increase performance, we propose training verifiers to judge the correctness of model completions. At test time, we generate many candidate solutions and select the one ranked highest by the verifier. We demonstrate that verification significantly improves performance on GSM8K, and we provide strong empirical evidence that verification scales more effectively with increased data than a finetuning baseline.", "year": 2021, "venue": "arXiv.org", "authors": [ "K. Cobbe", "V. Kosaraju", "Mohammad Bavarian", "Mark Chen", "Heewoo Jun", "Lukasz Kaiser", "Matthias Plappert", "Jerry Tworek", "Jacob Hilton", "Reiichiro Nakano", "Christopher Hesse", "John Schulman" ], "externalIds": { "ArXiv": "2110.14168", "DBLP": "journals/corr/abs-2110-14168", "CorpusId": 239998651 }, "url": "https://www.semanticscholar.org/paper/d6045d2ccc9c09ca1671348de86d07da6bc28eea", "referenceCount": 31, "citationCount": 2150, "influentialCitationCount": 564, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Evaluating Large Language Models Trained on Code", "abstract": "We introduce Codex, a GPT language model fine-tuned on publicly available code from GitHub, and study its Python code-writing capabilities. A distinct production version of Codex powers GitHub Copilot. On HumanEval, a new evaluation set we release to measure functional correctness for synthesizing programs from docstrings, our model solves 28.8% of the problems, while GPT-3 solves 0% and GPT-J solves 11.4%. Furthermore, we find that repeated sampling from the model is a surprisingly effective strategy for producing working solutions to difficult prompts. Using this method, we solve 70.2% of our problems with 100 samples per problem. Careful investigation of our model reveals its limitations, including difficulty with docstrings describing long chains of operations and with binding operations to variables. Finally, we discuss the potential broader impacts of deploying powerful code generation technologies, covering safety, security, and economics.", "year": 2021, "venue": "arXiv.org", "authors": [ "Mark Chen", "Jerry Tworek", "Heewoo Jun", "Qiming Yuan", "Henrique Pondé", "Jared Kaplan", "Harrison Edwards", "Yura Burda", "Nicholas Joseph", "Greg Brockman", "Alex Ray", "Raul Puri", "Gretchen Krueger", "Michael Petrov", "Heidy Khlaaf", "Girish Sastry", "Pamela Mishkin", "Brooke Chan", "Scott Gray", "Nick Ryder", "Mikhail Pavlov", "Alethea Power", "Lukasz Kaiser", "Mohammad Bavarian", "Clemens Winter", "Philippe Tillet", "F. Such", "D. Cummings", "Matthias Plappert", "Fotios Chantzis", "Elizabeth Barnes", "Ariel Herbert-Voss", "William H. Guss", "Alex Nichol", "Igor Babuschkin", "S. Balaji", "Shantanu Jain", "A. Carr", "J. Leike", "Joshua Achiam", "Vedant Misra", "Evan Morikawa", "Alec Radford", "M. Knight", "Miles Brundage", "Mira Murati", "Katie Mayer", "P. Welinder", "Bob McGrew", "Dario Amodei", "Sam McCandlish", "I. Sutskever", "Wojciech Zaremba" ], "externalIds": { "DBLP": "journals/corr/abs-2107-03374", "ArXiv": "2107.03374", "CorpusId": 235755472 }, "url": "https://www.semanticscholar.org/paper/acbdbf49f9bc3f151b93d9ca9a06009f4f6eb269", "referenceCount": 127, "citationCount": 3533, "influentialCitationCount": 700, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Survey on Evolutionary Construction of Deep Neural Networks", "abstract": "Automated construction of deep neural networks (DNNs) has become a research hot spot nowadays because DNN’s performance is heavily influenced by its architecture and parameters, which are highly task-dependent, but it is notoriously difficult to find the most appropriate DNN in terms of architecture and parameters to best solve a given task. In this work, we provide an insight into the automated DNN construction process by formulating it into a multilevel multiobjective large-scale optimization problem with constraints, where the nonconvex, nondifferentiable, and black-box nature of this problem make evolutionary algorithms (EAs) to stand out as a promising solver. Then, we give a systematical review of existing evolutionary DNN construction techniques from different aspects of this optimization problem and analyze the pros and cons of using EA-based methods in each aspect. This work aims to help DNN researchers to better understand why, where, and how to utilize EAs for automated DNN construction and meanwhile, help EA researchers to better understand the task of automated DNN construction so that they may focus more on EA-favored optimization scenarios to devise more effective techniques.", "year": 2021, "venue": "IEEE Transactions on Evolutionary Computation", "authors": [ "Xun Zhou", "A. K. Qin", "Maoguo Gong", "K. Tan" ], "externalIds": { "MAG": "3160536906", "DBLP": "journals/tec/ZhouQGT21", "DOI": "10.1109/TEVC.2021.3079985", "CorpusId": 236553617 }, "url": "https://www.semanticscholar.org/paper/dc6d4a4a6f8f30072381f7c00028eeaaa628bfa9", "referenceCount": 210, "citationCount": 61, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning Transferable Visual Models From Natural Language Supervision", "abstract": "State-of-the-art computer vision systems are trained to predict a fixed set of predetermined object categories. This restricted form of supervision limits their generality and usability since additional labeled data is needed to specify any other visual concept. Learning directly from raw text about images is a promising alternative which leverages a much broader source of supervision. We demonstrate that the simple pre-training task of predicting which caption goes with which image is an efficient and scalable way to learn SOTA image representations from scratch on a dataset of 400 million (image, text) pairs collected from the internet. After pre-training, natural language is used to reference learned visual concepts (or describe new ones) enabling zero-shot transfer of the model to downstream tasks. We study the performance of this approach by benchmarking on over 30 different existing computer vision datasets, spanning tasks such as OCR, action recognition in videos, geo-localization, and many types of fine-grained object classification. The model transfers non-trivially to most tasks and is often competitive with a fully supervised baseline without the need for any dataset specific training. For instance, we match the accuracy of the original ResNet-50 on ImageNet zero-shot without needing to use any of the 1.28 million training examples it was trained on. We release our code and pre-trained model weights at https://github.com/OpenAI/CLIP.", "year": 2021, "venue": "International Conference on Machine Learning", "authors": [ "Alec Radford", "Jong Wook Kim", "Chris Hallacy", "A. Ramesh", "Gabriel Goh", "Sandhini Agarwal", "Girish Sastry", "Amanda Askell", "Pamela Mishkin", "Jack Clark", "Gretchen Krueger", "I. Sutskever" ], "externalIds": { "ArXiv": "2103.00020", "DBLP": "conf/icml/RadfordKHRGASAM21", "CorpusId": 231591445 }, "url": "https://www.semanticscholar.org/paper/6f870f7f02a8c59c3e23f407f3ef00dd1dcf8fc4", "referenceCount": 220, "citationCount": 18886, "influentialCitationCount": 5013, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale", "abstract": "While the Transformer architecture has become the de-facto standard for natural language processing tasks, its applications to computer vision remain limited. In vision, attention is either applied in conjunction with convolutional networks, or used to replace certain components of convolutional networks while keeping their overall structure in place. We show that this reliance on CNNs is not necessary and a pure transformer applied directly to sequences of image patches can perform very well on image classification tasks. When pre-trained on large amounts of data and transferred to multiple mid-sized or small image recognition benchmarks (ImageNet, CIFAR-100, VTAB, etc.), Vision Transformer (ViT) attains excellent results compared to state-of-the-art convolutional networks while requiring substantially fewer computational resources to train.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Alexey Dosovitskiy", "Lucas Beyer", "Alexander Kolesnikov", "Dirk Weissenborn", "Xiaohua Zhai", "Thomas Unterthiner", "Mostafa Dehghani", "Matthias Minderer", "G. Heigold", "S. Gelly", "Jakob Uszkoreit", "N. Houlsby" ], "externalIds": { "MAG": "3094502228", "ArXiv": "2010.11929", "DBLP": "conf/iclr/DosovitskiyB0WZ21", "CorpusId": 225039882 }, "url": "https://www.semanticscholar.org/paper/268d347e8a55b5eb82fb5e7d2f800e33c75ab18a", "referenceCount": 65, "citationCount": 28233, "influentialCitationCount": 4121, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "NATS-Bench: Benchmarking NAS Algorithms for Architecture Topology and Size", "abstract": "Neural architecture search (NAS) has attracted a lot of attention and has been illustrated to bring tangible benefits in a large number of applications in the past few years. Architecture topology and architecture size have been regarded as two of the most important aspects for the performance of deep learning models and the community has spawned lots of searching algorithms for both of those aspects of the neural architectures. However, the performance gain from these searching algorithms is achieved under different search spaces and training setups. This makes the overall performance of the algorithms incomparable and the improvement from a sub-module of the searching model unclear. In this paper, we propose NATS-Bench, a unified benchmark on searching for both topology and size, for (almost) any up-to-date NAS algorithm. NATS-Bench includes the search space of 15,625 neural cell candidates for architecture topology and 32,768 for architecture size on three datasets. We analyze the validity of our benchmark in terms of various criteria and performance comparison of all candidates in the search space. We also show the versatility of NATS-Bench by benchmarking 13 recent state-of-the-art NAS algorithms on it. All logs and diagnostic information trained using the same setup for each candidate are provided. This facilitates a much larger community of researchers to focus on developing better NAS algorithms in a more comparable and computationally effective environment. All codes are publicly available at: https://xuanyidong.com/assets/projects/NATS-Bench.", "year": 2020, "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", "authors": [ "Xuanyi Dong", "Lu Liu", "Katarzyna Musial", "B. Gabrys" ], "externalIds": { "ArXiv": "2009.00437", "DBLP": "journals/pami/DongLMG22", "MAG": "3082154327", "DOI": "10.1109/TPAMI.2021.3054824", "CorpusId": 221397774, "PubMed": "33497330" }, "url": "https://www.semanticscholar.org/paper/3259c9ab1714a4cfdf6439cca6bdc5f78d78fda3", "referenceCount": 54, "citationCount": 140, "influentialCitationCount": 33, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics", "Medicine" ] }, { "title": "Neural Architecture Search without Training", "abstract": "The time and effort involved in hand-designing deep neural networks is immense. This has prompted the development of Neural Architecture Search (NAS) techniques to automate this design. However, NAS algorithms tend to be extremely slow and expensive; they need to train vast numbers of candidate networks to inform the search process. This could be remedied if we could infer a network's trained accuracy from its initial state. In this work, we examine how the linear maps induced by data points correlate for untrained network architectures in the NAS-Bench-201 search space, and motivate how this can be used to give a measure of modelling flexibility which is highly indicative of a network's trained performance. We incorporate this measure into a simple algorithm that allows us to search for powerful networks without any training in a matter of seconds on a single GPU. Code to reproduce our experiments is available at this https URL.", "year": 2020, "venue": "International Conference on Machine Learning", "authors": [ "J. Mellor", "Jack Turner", "A. Storkey", "Elliot J. Crowley" ], "externalIds": { "MAG": "3033395418", "DBLP": "journals/corr/abs-2006-04647", "ArXiv": "2006.04647", "CorpusId": 219531078 }, "url": "https://www.semanticscholar.org/paper/25c371d565b387dbf22207a954a9549557698c21", "referenceCount": 44, "citationCount": 316, "influentialCitationCount": 81, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Learning Transferable Architectures for Scalable Image Recognition", "abstract": "Developing neural network image classification models often requires significant architecture engineering. In this paper, we study a method to learn the model architectures directly on the dataset of interest. As this approach is expensive when the dataset is large, we propose to search for an architectural building block on a small dataset and then transfer the block to a larger dataset. The key contribution of this work is the design of a new search space (which we call the \"NASNet search space\") which enables transferability. In our experiments, we search for the best convolutional layer (or \"cell\") on the CIFAR-10 dataset and then apply this cell to the ImageNet dataset by stacking together more copies of this cell, each with their own parameters to design a convolutional architecture, which we name a \"NASNet architecture\". We also introduce a new regularization technique called ScheduledDropPath that significantly improves generalization in the NASNet models. On CIFAR-10 itself, a NASNet found by our method achieves 2.4% error rate, which is state-of-the-art. Although the cell is not searched for directly on ImageNet, a NASNet constructed from the best cell achieves, among the published works, state-of-the-art accuracy of 82.7% top-1 and 96.2% top-5 on ImageNet. Our model is 1.2% better in top-1 accuracy than the best human-invented architectures while having 9 billion fewer FLOPS - a reduction of 28% in computational demand from the previous state-of-the-art model. When evaluated at different levels of computational cost, accuracies of NASNets exceed those of the state-of-the-art human-designed models. For instance, a small version of NASNet also achieves 74% top-1 accuracy, which is 3.1% better than equivalently-sized, state-of-the-art models for mobile platforms. Finally, the image features learned from image classification are generically useful and can be transferred to other computer vision problems. On the task of object detection, the learned features by NASNet used with the Faster-RCNN framework surpass state-of-the-art by 4.0% achieving 43.1% mAP on the COCO dataset.", "year": 2017, "venue": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition", "authors": [ "Barret Zoph", "Vijay Vasudevan", "Jonathon Shlens", "Quoc V. Le" ], "externalIds": { "MAG": "2964081807", "DBLP": "journals/corr/ZophVSL17", "ArXiv": "1707.07012", "DOI": "10.1109/CVPR.2018.00907", "CorpusId": 12227989 }, "url": "https://www.semanticscholar.org/paper/d0611891b9e8a7c5731146097b6f201578f47b2f", "referenceCount": 77, "citationCount": 5251, "influentialCitationCount": 824, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Proximal Policy Optimization Algorithms", "abstract": "We propose a new family of policy gradient methods for reinforcement learning, which alternate between sampling data through interaction with the environment, and optimizing a \"surrogate\" objective function using stochastic gradient ascent. Whereas standard policy gradient methods perform one gradient update per data sample, we propose a novel objective function that enables multiple epochs of minibatch updates. The new methods, which we call proximal policy optimization (PPO), have some of the benefits of trust region policy optimization (TRPO), but they are much simpler to implement, more general, and have better sample complexity (empirically). Our experiments test PPO on a collection of benchmark tasks, including simulated robotic locomotion and Atari game playing, and we show that PPO outperforms other online policy gradient methods, and overall strikes a favorable balance between sample complexity, simplicity, and wall-time.", "year": 2017, "venue": "arXiv.org", "authors": [ "John Schulman", "Filip Wolski", "Prafulla Dhariwal", "Alec Radford", "Oleg Klimov" ], "externalIds": { "MAG": "2736601468", "ArXiv": "1707.06347", "DBLP": "journals/corr/SchulmanWDRK17", "CorpusId": 28695052 }, "url": "https://www.semanticscholar.org/paper/dce6f9d4017b1785979e7520fd0834ef8cf02f4b", "referenceCount": 14, "citationCount": 14872, "influentialCitationCount": 3164, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Remote Sensing Image Scene Classification: Benchmark and State of the Art", "abstract": "Remote sensing image scene classification plays an important role in a wide range of applications and hence has been receiving remarkable attention. During the past years, significant efforts have been made to develop various data sets or present a variety of approaches for scene classification from remote sensing images. However, a systematic review of the literature concerning data sets and methods for scene classification is still lacking. In addition, almost all existing data sets have a number of limitations, including the small scale of scene classes and the image numbers, the lack of image variations and diversity, and the saturation of accuracy. These limitations severely limit the development of new approaches especially deep learning-based methods. This paper first provides a comprehensive review of the recent progress. Then, we propose a large-scale data set, termed “NWPU-RESISC45,” which is a publicly available benchmark for REmote Sensing Image Scene Classification (RESISC), created by Northwestern Polytechnical University (NWPU). This data set contains 31 500 images, covering 45 scene classes with 700 images in each class. The proposed NWPU-RESISC45 1) is large-scale on the scene classes and the total image number; 2) holds big variations in translation, spatial resolution, viewpoint, object pose, illumination, background, and occlusion; and 3) has high within-class diversity and between-class similarity. The creation of this data set will enable the community to develop and evaluate various data-driven algorithms. Finally, several representative methods are evaluated using the proposed data set, and the results are reported as a useful baseline for future research.", "year": 2017, "venue": "Proceedings of the IEEE", "authors": [ "Gong Cheng", "Junwei Han", "Xiaoqiang Lu" ], "externalIds": { "MAG": "3103856189", "ArXiv": "1703.00121", "DBLP": "journals/pieee/ChengHL17", "DOI": "10.1109/JPROC.2017.2675998", "CorpusId": 3046524 }, "url": "https://www.semanticscholar.org/paper/179765729fc1e269393617795507607c29a66a8e", "referenceCount": 178, "citationCount": 1879, "influentialCitationCount": 309, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Communication-Efficient Learning of Deep Networks from Decentralized Data", "abstract": "Modern mobile devices have access to a wealth of data suitable for learning models, which in turn can greatly improve the user experience on the device. For example, language models can improve speech recognition and text entry, and image models can automatically select good photos. However, this rich data is often privacy sensitive, large in quantity, or both, which may preclude logging to the data center and training there using conventional approaches. We advocate an alternative that leaves the training data distributed on the mobile devices, and learns a shared model by aggregating locally-computed updates. We term this decentralized approach Federated Learning. \nWe present a practical method for the federated learning of deep networks based on iterative model averaging, and conduct an extensive empirical evaluation, considering five different model architectures and four datasets. These experiments demonstrate the approach is robust to the unbalanced and non-IID data distributions that are a defining characteristic of this setting. Communication costs are the principal constraint, and we show a reduction in required communication rounds by 10-100x as compared to synchronized stochastic gradient descent.", "year": 2016, "venue": "International Conference on Artificial Intelligence and Statistics", "authors": [ "H. B. McMahan", "Eider Moore", "Daniel Ramage", "S. Hampson", "B. A. Y. Arcas" ], "externalIds": { "MAG": "2950745363", "DBLP": "conf/aistats/McMahanMRHA17", "ArXiv": "1602.05629", "CorpusId": 14955348 }, "url": "https://www.semanticscholar.org/paper/d1dbf643447405984eeef098b1b320dee0b3b8a7", "referenceCount": 50, "citationCount": 13806, "influentialCitationCount": 3346, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Human-level control through deep reinforcement learning", "abstract": null, "year": 2015, "venue": "Nature", "authors": [ "Volodymyr Mnih", "K. Kavukcuoglu", "David Silver", "Andrei A. Rusu", "J. Veness", "Marc G. Bellemare", "Alex Graves", "Martin A. Riedmiller", "A. Fidjeland", "Georg Ostrovski", "Stig Petersen", "Charlie Beattie", "Amir Sadik", "Ioannis Antonoglou", "Helen King", "D. Kumaran", "Daan Wierstra", "S. Legg", "D. Hassabis" ], "externalIds": { "DBLP": "journals/nature/MnihKSRVBGRFOPB15", "MAG": "2145339207", "DOI": "10.1038/nature14236", "CorpusId": 205242740, "PubMed": "25719670" }, "url": "https://www.semanticscholar.org/paper/340f48901f72278f6bf78a04ee5b01df208cc508", "referenceCount": 37, "citationCount": 25075, "influentialCitationCount": 3030, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "SUN Database: Exploring a Large Collection of Scene Categories", "abstract": null, "year": 2014, "venue": "International Journal of Computer Vision", "authors": [ "Jianxiong Xiao", "Krista A. Ehinger", "James Hays", "A. Torralba", "A. Oliva" ], "externalIds": { "DBLP": "journals/ijcv/XiaoEHTO16", "MAG": "1977766639", "DOI": "10.1007/s11263-014-0748-y", "CorpusId": 10224573 }, "url": "https://www.semanticscholar.org/paper/c9a6c7bfe831f2b154deac4409c35633c63ef326", "referenceCount": 57, "citationCount": 349, "influentialCitationCount": 35, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Describing Textures in the Wild", "abstract": "Patterns and textures are key characteristics of many natural objects: a shirt can be striped, the wings of a butterfly can be veined, and the skin of an animal can be scaly. Aiming at supporting this dimension in image understanding, we address the problem of describing textures with semantic attributes. We identify a vocabulary of forty-seven texture terms and use them to describe a large dataset of patterns collected \"in the wild\". The resulting Describable Textures Dataset (DTD) is a basis to seek the best representation for recognizing describable texture attributes in images. We port from object recognition to texture recognition the Improved Fisher Vector (IFV) and Deep Convolutional-network Activation Features (DeCAF), and show that surprisingly, they both outperform specialized texture descriptors not only on our problem, but also in established material recognition datasets. We also show that our describable attributes are excellent texture descriptors, transferring between datasets and tasks, in particular, combined with IFV and DeCAF, they significantly outperform the state-of-the-art by more than 10% on both FMD and KTH-TIPS-2b benchmarks. We also demonstrate that they produce intuitive descriptions of materials and Internet images.", "year": 2013, "venue": "2014 IEEE Conference on Computer Vision and Pattern Recognition", "authors": [ "Mircea Cimpoi", "Subhransu Maji", "Iasonas Kokkinos", "S. Mohamed", "A. Vedaldi" ], "externalIds": { "MAG": "2918062363", "ArXiv": "1311.3618", "DBLP": "journals/corr/CimpoiMKMV13", "DOI": "10.1109/CVPR.2014.461", "CorpusId": 4309276 }, "url": "https://www.semanticscholar.org/paper/18c125ce0f64e85577f7d30132cf0e92ec664bf4", "referenceCount": 45, "citationCount": 2176, "influentialCitationCount": 321, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The German Traffic Sign Recognition Benchmark: A multi-class classification competition", "abstract": "The “German Traffic Sign Recognition Benchmark” is a multi-category classification competition held at IJCNN 2011. Automatic recognition of traffic signs is required in advanced driver assistance systems and constitutes a challenging real-world computer vision and pattern recognition problem. A comprehensive, lifelike dataset of more than 50,000 traffic sign images has been collected. It reflects the strong variations in visual appearance of signs due to distance, illumination, weather conditions, partial occlusions, and rotations. The images are complemented by several precomputed feature sets to allow for applying machine learning algorithms without background knowledge in image processing. The dataset comprises 43 classes with unbalanced class frequencies. Participants have to classify two test sets of more than 12,500 images each. Here, the results on the first of these sets, which was used in the first evaluation stage of the two-fold challenge, are reported. The methods employed by the participants who achieved the best results are briefly described and compared to human traffic sign recognition performance and baseline results.", "year": 2011, "venue": "The 2011 International Joint Conference on Neural Networks", "authors": [ "J. Stallkamp", "Marc Schlipsing", "J. Salmen", "C. Igel" ], "externalIds": { "DBLP": "conf/ijcnn/StallkampSSI11", "MAG": "2117876524", "DOI": "10.1109/IJCNN.2011.6033395", "CorpusId": 15926837 }, "url": "https://www.semanticscholar.org/paper/22fe619996b59c09cb73be40103a123d2e328111", "referenceCount": 22, "citationCount": 1000, "influentialCitationCount": 166, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MOEA/D: A Multiobjective Evolutionary Algorithm Based on Decomposition", "abstract": "Decomposition is a basic strategy in traditional multiobjective optimization. However, it has not yet been widely used in multiobjective evolutionary optimization. This paper proposes a multiobjective evolutionary algorithm based on decomposition (MOEA/D). It decomposes a multiobjective optimization problem into a number of scalar optimization subproblems and optimizes them simultaneously. Each subproblem is optimized by only using information from its several neighboring subproblems, which makes MOEA/D have lower computational complexity at each generation than MOGLS and nondominated sorting genetic algorithm II (NSGA-II). Experimental results have demonstrated that MOEA/D with simple decomposition methods outperforms or performs similarly to MOGLS and NSGA-II on multiobjective 0-1 knapsack problems and continuous multiobjective optimization problems. It has been shown that MOEA/D using objective normalization can deal with disparately-scaled objectives, and MOEA/D with an advanced decomposition method can generate a set of very evenly distributed solutions for 3-objective test instances. The ability of MOEA/D with small population, the scalability and sensitivity of MOEA/D have also been experimentally investigated in this paper.", "year": 2007, "venue": "IEEE Transactions on Evolutionary Computation", "authors": [ "Qingfu Zhang", "Hui Li" ], "externalIds": { "DBLP": "journals/tec/ZhangL07", "MAG": "2143381319", "DOI": "10.1109/TEVC.2007.892759", "CorpusId": 7312933 }, "url": "https://www.semanticscholar.org/paper/b76be6707fa5858cc5378bc11f10ec6f6a97d85c", "referenceCount": 45, "citationCount": 7126, "influentialCitationCount": 1100, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Multiobjective Evolutionary Algorithms and Applications", "abstract": null, "year": 2005, "venue": "Advanced Information and Knowledge Processing", "authors": [ "K. Tan", "E. F. Khor", "Tong-heng Lee" ], "externalIds": { "DBLP": "series/aikp/TanKL05", "MAG": "1581487892", "DOI": "10.1007/1-84628-132-6", "CorpusId": 43754276 }, "url": "https://www.semanticscholar.org/paper/8b11ce20ca427c4e3eeaa61edf4f35a4a7d3e58d", "referenceCount": 0, "citationCount": 273, "influentialCitationCount": 10, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "An evolution strategy with probabilistic mutation for multi-objective optimisation", "abstract": "Evolutionary algorithms have been applied with great success to the difficult field of multiobjective optimisation. Nevertheless, the need for improvements in this field is still strong. We present a new evolutionary algorithm, ESP (the Evolution Strategy with Probabilistic mutation). ESP extends traditional evolution strategies in two principal ways: it applies mutation probabilistically in a GA-like fashion, and it uses a new hyper-volume based, parameterless, scaling independent measure for resolving ties during the selection process. ESP outperforms the state-of-the-art algorithms on a suite of benchmark multiobjective test functions using a range of popular metrics.", "year": 2003, "venue": "The 2003 Congress on Evolutionary Computation, 2003. CEC '03.", "authors": [ "S. Huband", "P. Hingston", "R. L. While", "L. Barone" ], "externalIds": { "MAG": "2129392074", "DBLP": "conf/cec/HubandHWB03", "DOI": "10.1109/CEC.2003.1299373", "CorpusId": 12031609 }, "url": "https://www.semanticscholar.org/paper/13915f3491500ad6fa8d1c1c1bf7fb86cda7547a", "referenceCount": 27, "citationCount": 103, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Multi-Objective Optimization for Bandwidth-Limited Federated Learning in Wireless Edge Systems", "abstract": "This paper studies a bandwidth-limited federated learning (FL) system where the access point is a central server for aggregation and the energy-constrained user equipments (UEs) with limited computation capabilities (e.g., Internet of Things devices) perform local training. Limited by the bandwidth in wireless edge systems, only a part of UEs can participate in each FL training round. Selecting different UEs could affect the FL performance, and selected UEs need to allocate their computing resource effectively. In wireless edge FL systems, simultaneously accelerating FL training and reducing computing-communication energy consumption are of importance. To this end, we formulate a multi-objective optimization problem (MOP). In MOP, the model training convergence is difficult to calculate accurately. Meanwhile, MOP is a combinatorial optimization problem, with the high-dimension mix-integer variables, which is proved to be NP-hard. To address these challenges, a multi-objective evolutionary algorithm for the bandwidth-limited FL system (MOEA-FL) is proposed to obtain a Pareto optimal solution set. In MOEA-FL, an age-of-update-loss method is first proposed to transform the original global loss function into a convergence reference function. Then, MOEA-FL divides MOP into $N$ single objective subproblems by the Tchebycheff approach and optimizes the subproblems simultaneously by evolving a population. Extensive experiments have been carried out on MNIST dataset and a medical case called TissueMNIST dataset for both the i.i.d and non-i.i.d data setting. Experimental results demonstrate that MOEA-FL performs better than other algorithms and verify the robustness and scalability of MOEA-FL.", "year": 2023, "venue": "IEEE Open Journal of the Communications Society", "authors": [ "Yujie Zhou", "Xuemei Liu", "Lei Lei" ], "externalIds": { "DBLP": "journals/ojcs/ZhouLL23", "DOI": "10.1109/OJCOMS.2023.3266389", "CorpusId": 258094008 }, "url": "https://www.semanticscholar.org/paper/9658a28ba10b3e53fd1121d6c25037003ab0d1bb", "referenceCount": 52, "citationCount": 6, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Overview of the IWSLT 2017 Evaluation Campaign", "abstract": "The IWSLT 2017 evaluation campaign has organised three tasks. The Multilingual task, which is about training machine translation systems handling many-to-many language directions, including so-called zero-shot directions. The Dialogue task, which calls for the integration of context information in machine translation, in order to resolve anaphoric references that typically occur in human-human dialogue turns. And, finally, the Lecture task, which offers the challenge of automatically transcribing and translating real-life university lectures. Following the tradition of these reports, we will described all tasks in detail and present the results of all runs submitted by their participants.", "year": 2017, "venue": "International Workshop on Spoken Language Translation", "authors": [ "M. Cettolo", "Marcello Federico", "L. Bentivogli", "Niehues Jan", "Stüker Sebastian", "Sudoh Katsuitho", "Yoshino Koichiro", "Federmann Christian" ], "externalIds": { "MAG": "3005389111", "ACL": "2017.iwslt-1.1", "DBLP": "conf/iwslt/CettoloFBNSSYF17", "CorpusId": 21675165 }, "url": "https://www.semanticscholar.org/paper/cb0ab255c4079e2082ba6e3a807529527d96687c", "referenceCount": 29, "citationCount": 183, "influentialCitationCount": 16, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Neural Machine Translation Systems for WMT 16", "abstract": "We participated in the WMT 2016 shared news translation task by building neural translation systems for four language pairs, each trained in both directions: English ↔Czech, English↔German, English ↔Romanian and English↔Russian. Our systems are based on an attentional encoder-decoder, using BPE subword segmentation for open-vocabulary translation with a fixed vocabulary. We experimented with using automatic back-translations of the monolingual News corpus as additional training data, pervasive dropout, and target-bidirectional models. All reported methods give substantial improvements, and we see improvements of 4.3–11.2 BLEU over our baseline systems. In the human evaluation, our systems were the (tied) best constrained system for 7 out of 8 translation directions in which we participated. 12", "year": 2016, "venue": "", "authors": [ "Rico Sennrich", "B. Haddow", "Alexandra Birch" ], "externalIds": { "CorpusId": 14919987 }, "url": "https://www.semanticscholar.org/paper/e3ce71a26872c7755e6d8b8fc45bf00c8be64193", "referenceCount": 13, "citationCount": 526, "influentialCitationCount": 126, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Sampling generative networks", "abstract": null, "year": 2016, "venue": "arXiv preprint", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Reading Digits in Natural Images with Unsupervised Feature Learning", "abstract": "Detecting and reading text from natural images is a hard computer vision task that is central to a variety of emerging applications. Related problems like document character recognition have been widely studied by computer vision and machine learning researchers and are virtually solved for practical applications like reading handwritten digits. Reliably recognizing characters in more complex scenes like photographs, however, is far more difficult: the best existing methods lag well behind human performance on the same tasks. In this paper we attack the problem of recognizing digits in a real application using unsupervised feature learning methods: reading house numbers from street level photos. To this end, we introduce a new benchmark dataset for research use containing over 600,000 labeled digits cropped from Street View images. We then demonstrate the difficulty of recognizing these digits when the problem is approached with hand-designed features. Finally, we employ variants of two recently proposed unsupervised feature learning methods and find that they are convincingly superior on our benchmarks.", "year": 2011, "venue": "", "authors": [ "Yuval Netzer", "Tao Wang", "Adam Coates", "A. Bissacco", "Bo Wu", "A. Ng" ], "externalIds": { "MAG": "2335728318", "CorpusId": 16852518 }, "url": "https://www.semanticscholar.org/paper/02227c94dd41fe0b439e050d377b0beb5d427cda", "referenceCount": 29, "citationCount": 6261, "influentialCitationCount": 1597, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "2023 A framework for few-shot language model evaluation", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2022. A framework for the evaluation of code generation models", "abstract": null, "year": null, "venue": "github", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2024. Arcee’s MergeKit: A toolkit for merging large language models", "abstract": null, "year": null, "venue": "arXiv preprint", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Input: A set of weight vectors { λλλ 1 ,λλλ 2 , . . . ,λλλ N } and their corresponding optimal merged models at the parameter space", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2024b. CausalBench: A comprehensive benchmark for causal learning capability of large language models", "abstract": null, "year": null, "venue": "arXiv", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2024a. It’s morphing time: Unleashing the potential of multiple LLMs via multi-objective optimization", "abstract": null, "year": null, "venue": "arXiv preprint", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "for each weight vector λλλ n in { λλλ 1 ,λλλ 2 , . . . ,", "abstract": null, "year": null, "venue": "λλλ N", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Autonomous Network Defence using Reinforcement Learning": { "paper_title": "Autonomous Network Defence using Reinforcement Learning", "arxiv_id": "2409.18197v1", "keyword": "reinforcement learning", "authors": [ "Myles Foley", "Chris Hicks", "Kate Highnam", "Vasilios Mavroudis" ], "references": [ { "title": "CybORG: A Gym for the Development of Autonomous Cyber Agents", "abstract": "Autonomous Cyber Operations (ACO) involves the development of blue team (defender) and red team (attacker) decision-making agents in adversarial scenarios. To support the application of machine learning algorithms to solve this problem, and to encourage researchers in this field to attend to problems in the ACO setting, we introduce CybORG, a work-in-progress gym for ACO research. CybORG features a simulation and emulation environment with a common interface to facilitate the rapid training of autonomous agents that can then be tested on real-world systems. Initial testing demonstrates the feasibility of this approach.", "year": 2021, "venue": "arXiv.org", "authors": [ "Maxwell Standen", "Martin Lucas", "David Bowman", "Toby J. Richer", "Junae Kim", "Damian A. Marriott" ], "externalIds": { "DBLP": "journals/corr/abs-2108-09118", "ArXiv": "2108.09118", "CorpusId": 237259783 }, "url": "https://www.semanticscholar.org/paper/28465528c31336905bbdf2bd7f80f1806b6f03c8", "referenceCount": 31, "citationCount": 54, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Automated Penetration Testing Using Deep Reinforcement Learning", "abstract": "At present, penetration testing is done mostly manually, and relies heavily on the experience of the ethical hackers that are performing it, called \"pentesters\". This paper presents an automated penetration testing framework that employs deep reinforcement learning to automate the penetration testing process. We plan to use this framework mainly as a component of cybersecurity training activities, to provide guided learning for attack training by making use of the framework to suggest possible strategies. When adding support for actual penetration testing tools, the framework could also be used in defense training, by automatically recreating attacks in the training environment.In this paper we present our approach for automated penetration testing, which has two stages. First we use the Shodan search engine to collect relevant server data in order to build a realistic network topology, and employ multi-host multi-stage vulnerability analysis (MulVAL) to generate an attack tree for that topology; traditional search algorithms are used to find all the possible attack paths in that tree and to build a matrix representation as needed by deep reinforcement learning algorithms. As a second stage, we employ the Deep Q-Learning Network (DQN) method to discover the most easy to exploit attack path from the possible candidates. This approach was evaluated by generating thousands of input scenarios, and DQN was able to find the optimal path with an accuracy of 0.86, while also providing valid solutions in the other cases.", "year": 2020, "venue": "2020 IEEE European Symposium on Security and Privacy Workshops (EuroS&PW)", "authors": [ "Zhenguo Hu", "R. Beuran", "Yasuo Tan" ], "externalIds": { "MAG": "3094485976", "DBLP": "conf/eurosp/HuBT20", "DOI": "10.1109/EuroSPW51379.2020.00010", "CorpusId": 225079494 }, "url": "https://www.semanticscholar.org/paper/7e0a789e7255687f4907ebe647921498219ba77a", "referenceCount": 27, "citationCount": 66, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Dota 2 with Large Scale Deep Reinforcement Learning", "abstract": "On April 13th, 2019, OpenAI Five became the first AI system to defeat the world champions at an esports game. The game of Dota 2 presents novel challenges for AI systems such as long time horizons, imperfect information, and complex, continuous state-action spaces, all challenges which will become increasingly central to more capable AI systems. OpenAI Five leveraged existing reinforcement learning techniques, scaled to learn from batches of approximately 2 million frames every 2 seconds. We developed a distributed training system and tools for continual training which allowed us to train OpenAI Five for 10 months. By defeating the Dota 2 world champion (Team OG), OpenAI Five demonstrates that self-play reinforcement learning can achieve superhuman performance on a difficult task.", "year": 2019, "venue": "arXiv.org", "authors": [ "Christopher Berner", "Greg Brockman", "Brooke Chan", "Vicki Cheung", "Przemyslaw Debiak", "Christy Dennison", "David Farhi", "Quirin Fischer", "Shariq Hashme", "Christopher Hesse", "R. Józefowicz", "Scott Gray", "Catherine Olsson", "J. Pachocki", "Michael Petrov", "Henrique Pondé de Oliveira Pinto", "Jonathan Raiman", "Tim Salimans", "Jeremy Schlatter", "Jonas Schneider", "Szymon Sidor", "I. Sutskever", "Jie Tang", "Filip Wolski", "Susan Zhang" ], "externalIds": { "MAG": "2996037775", "DBLP": "journals/corr/abs-1912-06680", "ArXiv": "1912.06680", "CorpusId": 209376771 }, "url": "https://www.semanticscholar.org/paper/b19729b27a1b4c24b52f87308c907653300afa7f", "referenceCount": 50, "citationCount": 1591, "influentialCitationCount": 86, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Deep Reinforcement Learning for Cyber Security", "abstract": "The scale of Internet-connected systems has increased considerably, and these systems are being exposed to cyberattacks more than ever. The complexity and dynamics of cyberattacks require protecting mechanisms to be responsive, adaptive, and scalable. Machine learning, or more specifically deep reinforcement learning (DRL), methods have been proposed widely to address these issues. By incorporating deep learning into traditional RL, DRL is highly capable of solving complex, dynamic, and especially high-dimensional cyber defense problems. This article presents a survey of DRL approaches developed for cyber security. We touch on different vital aspects, including DRL-based security methods for cyber–physical systems, autonomous intrusion detection techniques, and multiagent DRL-based game theory simulations for defense strategies against cyberattacks. Extensive discussions and future research directions on DRL-based cyber security are also given. We expect that this comprehensive review provides the foundations for and facilitates future studies on exploring the potential of emerging DRL to cope with increasingly complex cyber security problems.", "year": 2019, "venue": "IEEE Transactions on Neural Networks and Learning Systems", "authors": [ "T. Nguyen", "V. Reddi" ], "externalIds": { "DBLP": "journals/corr/abs-1906-05799", "MAG": "2952298682", "ArXiv": "1906.05799", "DOI": "10.1109/TNNLS.2021.3121870", "CorpusId": 189762438, "PubMed": "34723814" }, "url": "https://www.semanticscholar.org/paper/a9b901fcd68a3715d5ef186a398476bc1e762b0e", "referenceCount": 218, "citationCount": 240, "influentialCitationCount": 10, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine", "Mathematics" ] }, { "title": "Distributed Prioritized Experience Replay", "abstract": "We propose a distributed architecture for deep reinforcement learning at scale, that enables agents to learn effectively from orders of magnitude more data than previously possible. The algorithm decouples acting from learning: the actors interact with their own instances of the environment by selecting actions according to a shared neural network, and accumulate the resulting experience in a shared experience replay memory; the learner replays samples of experience and updates the neural network. The architecture relies on prioritized experience replay to focus only on the most significant data generated by the actors. Our architecture substantially improves the state of the art on the Arcade Learning Environment, achieving better final performance in a fraction of the wall-clock training time.", "year": 2018, "venue": "International Conference on Learning Representations", "authors": [ "Dan Horgan", "John Quan", "D. Budden", "Gabriel Barth-Maron", "Matteo Hessel", "H. V. Hasselt", "David Silver" ], "externalIds": { "MAG": "2963296584", "DBLP": "conf/iclr/HorganQBBHHS18", "ArXiv": "1803.00933", "CorpusId": 3463260 }, "url": "https://www.semanticscholar.org/paper/601a2d349fc26d7b82f905e924e2f91b0ac4b310", "referenceCount": 41, "citationCount": 682, "influentialCitationCount": 86, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "IMPALA: Scalable Distributed Deep-RL with Importance Weighted Actor-Learner Architectures", "abstract": "In this work we aim to solve a large collection of tasks using a single reinforcement learning agent with a single set of parameters. A key challenge is to handle the increased amount of data and extended training time. We have developed a new distributed agent IMPALA (Importance Weighted Actor-Learner Architecture) that not only uses resources more efficiently in single-machine training but also scales to thousands of machines without sacrificing data efficiency or resource utilisation. We achieve stable learning at high throughput by combining decoupled acting and learning with a novel off-policy correction method called V-trace. We demonstrate the effectiveness of IMPALA for multi-task reinforcement learning on DMLab-30 (a set of 30 tasks from the DeepMind Lab environment (Beattie et al., 2016)) and Atari-57 (all available Atari games in Arcade Learning Environment (Bellemare et al., 2013a)). Our results show that IMPALA is able to achieve better performance than previous agents with less data, and crucially exhibits positive transfer between tasks as a result of its multi-task approach.", "year": 2018, "venue": "International Conference on Machine Learning", "authors": [ "L. Espeholt", "Hubert Soyer", "R. Munos", "K. Simonyan", "Volodymyr Mnih", "Tom Ward", "Yotam Doron", "Vlad Firoiu", "Tim Harley", "Iain Dunning", "S. Legg", "K. Kavukcuoglu" ], "externalIds": { "MAG": "2950708659", "DBLP": "conf/icml/EspeholtSMSMWDF18", "ArXiv": "1802.01561", "CorpusId": 3645060 }, "url": "https://www.semanticscholar.org/paper/80196cdfcd0c6ce2953bf65a7f019971e2026386", "referenceCount": 42, "citationCount": 1455, "influentialCitationCount": 231, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "RLlib: Abstractions for Distributed Reinforcement Learning", "abstract": "Reinforcement learning (RL) algorithms involve the deep nesting of highly irregular computation patterns, each of which typically exhibits opportunities for distributed computation. We argue for distributing RL components in a composable way by adapting algorithms for top-down hierarchical control, thereby encapsulating parallelism and resource requirements within short-running compute tasks. We demonstrate the benefits of this principle through RLlib: a library that provides scalable software primitives for RL. These primitives enable a broad range of algorithms to be implemented with high performance, scalability, and substantial code reuse. RLlib is available at this https URL.", "year": 2017, "venue": "International Conference on Machine Learning", "authors": [ "Eric Liang", "Richard Liaw", "Robert Nishihara", "Philipp Moritz", "Roy Fox", "Ken Goldberg", "Joseph E. Gonzalez", "Michael I. Jordan", "Ion Stoica" ], "externalIds": { "MAG": "2949532984", "DBLP": "conf/icml/LiangLNMFGGJS18", "CorpusId": 49546141 }, "url": "https://www.semanticscholar.org/paper/82a262a2034b349abaa720c7f8229a0ef19e87cd", "referenceCount": 42, "citationCount": 771, "influentialCitationCount": 87, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep reinforecement learning based optimal defense for cyber-physical system in presence of unknown cyber-attack", "abstract": "In this paper, the online optimal cyber-defense problem has been investigated for Cyber-Physical Systems (CPS) with unknown cyber-attacks. Firstly, a novel cyber state dynamics has been generated that can evaluate the real-time impacts from current cyber-attack and defense strategies effectively and dynamically. Next, adopting game theory technique, the idea optimal defense design can be obtained by using the full knowledge of cyber-state dynamics. To relax the requirement about cyber-state dynamics, a game-theoretical actor-critic neural network (NN) structure was developed to efficiently learn the optimal cyber defense strategy online. Moreover, to further improve the practicality of developed scheme, a novel deep reinforcement learning algorithm have been designed and implemented into actor-critic NN structure. Eventually, the numerical simulation demonstrate that proposed deep reinforcement learning based optimal defense strategy cannot only online defend the CPS even in presence of unknown cyber-attacks, and also learn the optimal defense policy more accurate and timely.", "year": 2017, "venue": "IEEE Symposium Series on Computational Intelligence", "authors": [ "Ming Feng", "Hao Xu" ], "externalIds": { "MAG": "2787262922", "DBLP": "conf/ssci/FengX17", "DOI": "10.1109/SSCI.2017.8285298", "CorpusId": 44244847 }, "url": "https://www.semanticscholar.org/paper/96305a429019a1ec87d0f3b5e4a9fda9b57b724f", "referenceCount": 33, "citationCount": 29, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Proximal Policy Optimization Algorithms", "abstract": "We propose a new family of policy gradient methods for reinforcement learning, which alternate between sampling data through interaction with the environment, and optimizing a \"surrogate\" objective function using stochastic gradient ascent. Whereas standard policy gradient methods perform one gradient update per data sample, we propose a novel objective function that enables multiple epochs of minibatch updates. The new methods, which we call proximal policy optimization (PPO), have some of the benefits of trust region policy optimization (TRPO), but they are much simpler to implement, more general, and have better sample complexity (empirically). Our experiments test PPO on a collection of benchmark tasks, including simulated robotic locomotion and Atari game playing, and we show that PPO outperforms other online policy gradient methods, and overall strikes a favorable balance between sample complexity, simplicity, and wall-time.", "year": 2017, "venue": "arXiv.org", "authors": [ "John Schulman", "Filip Wolski", "Prafulla Dhariwal", "Alec Radford", "Oleg Klimov" ], "externalIds": { "MAG": "2736601468", "ArXiv": "1707.06347", "DBLP": "journals/corr/SchulmanWDRK17", "CorpusId": 28695052 }, "url": "https://www.semanticscholar.org/paper/dce6f9d4017b1785979e7520fd0834ef8cf02f4b", "referenceCount": 14, "citationCount": 14872, "influentialCitationCount": 3164, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Towards automated network mitigation analysis", "abstract": "Penetration testing is a well-established practical concept for the identification of potentially exploitable security weaknesses and an important component of a security audit. Providing a holistic security assessment for networks consisting of several hundreds hosts is hardly feasible though without some sort of mechanization. Mitigation, prioritizing counter-measures subject to a given budget, currently lacks a solid theoretical understanding and is hence more art than science. In this work, we propose the first approach for conducting comprehensive what-if analyses in order to reason about mitigation in a conceptually well-founded manner. To evaluate and compare mitigation strategies, we use simulated penetration testing, i.e., automated attack-finding, based on a network model to which a subset of a given set of mitigation actions, e.g., changes to the network topology, system updates, configuration changes etc. is applied. Using Stackelberg planning, we determine optimal combinations that minimize the maximal attacker success (similar to a Stackelberg game), and thus provide a well-founded basis for a holistic mitigation strategy. We show that these Stackelberg planning models can largely be derived from network scan, public vulnerability databases and manual inspection with various degrees of automation and detail, and we simulate mitigation analysis on networks of different size and vulnerability.", "year": 2017, "venue": "ACM Symposium on Applied Computing", "authors": [ "Patrick Speicher", "Marcel Steinmetz", "J. Hoffmann", "M. Backes", "R. Künnemann" ], "externalIds": { "DBLP": "conf/sac/SpeicherS00K19", "MAG": "2908511069", "DOI": "10.1145/3297280.3297473", "CorpusId": 142503844 }, "url": "https://www.semanticscholar.org/paper/b54f3cf3b70a2f550e7b97f7416474e7601f88d7", "referenceCount": 45, "citationCount": 21, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Curiosity-Driven Exploration by Self-Supervised Prediction", "abstract": "In many real-world scenarios, rewards extrinsic to the agent are extremely sparse, or absent altogether. In such cases, curiosity can serve as an intrinsic reward signal to enable the agent to explore its environment and learn skills that might be useful later in its life. We formulate curiosity as the error in an agent's ability to predict the consequence of its own actions in a visual feature space learned by a self-supervised inverse dynamics model. Our formulation scales to high-dimensional continuous state spaces like images, bypasses the difficulties of directly predicting pixels, and, critically, ignores the aspects of the environment that cannot affect the agent. The proposed approach is evaluated in two environments: VizDoom and Super Mario Bros. Three broad settings are investigated: 1) sparse extrinsic reward; 2) exploration with no extrinsic reward; and 3) generalization to unseen scenarios (e.g. new levels of the same game).", "year": 2017, "venue": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "authors": [ "Deepak Pathak", "Pulkit Agrawal", "Alexei A. Efros", "Trevor Darrell" ], "externalIds": { "MAG": "2963523627", "ArXiv": "1705.05363", "DBLP": "journals/corr/PathakAED17", "DOI": "10.1109/CVPRW.2017.70", "CorpusId": 20045336 }, "url": "https://www.semanticscholar.org/paper/225ab689f41cef1dc18237ef5dab059a49950abf", "referenceCount": 49, "citationCount": 2200, "influentialCitationCount": 299, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Cyber Autonomy Gym for Experimentation Challenge 1. https://github.com/cage-challenge/cage-challenge-1", "abstract": null, "year": 2021, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "M-Trends2021:CyberSecurityInsights", "abstract": null, "year": 2021, "venue": "TechnicalReport", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Reinforcement Learning: An Introduction (2nd ed.). Poster Session ASIA CCS ’22", "abstract": null, "year": 2018, "venue": "May 30–June", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "LoopSR: Looping Sim-and-Real for Lifelong Policy Adaptation of Legged Robots": { "paper_title": "LoopSR: Looping Sim-and-Real for Lifelong Policy Adaptation of Legged Robots", "arxiv_id": "2409.17992v1", "keyword": "reinforcement learning", "authors": [ "Peilin Wu", "Weiji Xie", "Jiahang Cao", "Hang Lai", "Weinan Zhang" ], "references": [ { "title": "A Comprehensive Survey on Retrieval Methods in Recommender Systems", "abstract": "In an era dominated by information overload, effective recommender systems are essential for managing the deluge of data across digital platforms. Multi-stage cascade ranking systems are widely used in the industry, with retrieval and ranking being two typical stages. Retrieval methods sift through vast candidates to filter out irrelevant items, while ranking methods prioritize these candidates to present the most relevant items to users. Unlike studies focusing on the ranking stage, this survey explores the critical yet often overlooked retrieval stage of recommender systems. To achieve precise and efficient personalized retrieval, we summarize existing work in three key areas: improving similarity computation between user and item, enhancing indexing mechanisms for efficient retrieval, and optimizing training methods of retrieval. We also provide a comprehensive set of benchmarking experiments on three public datasets. Furthermore, we highlight current industrial applications through a case study on retrieval practices at a specific company, covering the entire retrieval process and online serving, along with practical implications and challenges. By detailing the retrieval stage, which is fundamental for effective recommendation, this survey aims to bridge the existing knowledge gap and serve as a cornerstone for researchers interested in optimizing this critical component of cascade recommender systems.", "year": 2024, "venue": "arXiv.org", "authors": [ "Junjie Huang", "Jizheng Chen", "Jianghao Lin", "Jiarui Qin", "Ziming Feng", "Weinan Zhang", "Yong Yu" ], "externalIds": { "DBLP": "journals/corr/abs-2407-21022", "ArXiv": "2407.21022", "DOI": "10.48550/arXiv.2407.21022", "CorpusId": 271571620 }, "url": "https://www.semanticscholar.org/paper/2b572f973cac02b36e31a6619565beed1b983cfd", "referenceCount": 185, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Skill Transfer and Discovery for Sim-to-Real Learning: A Representation-Based Viewpoint", "abstract": "We study sim-to-real skill transfer and discovery in the context of robotics control using representation learning. We draw inspiration from spectral decomposition of Markov decision processes. The spectral decomposition brings about representation that can linearly represent the state-action value function induced by any policies, thus can be regarded as skills. The skill representations are transferable across arbitrary tasks with the same transition dynamics. Moreover, to handle the sim-to-real gap in the dynamics, we propose a skill discovery algorithm that learns new skills caused by the sim-to-real gap from real-world data. We promote the discovery of new skills by enforcing orthogonal constraints between the skills to learn and the skills from simulators, and then synthesize the policy using the enlarged skill sets. We demonstrate our methodology by transferring quadrotor controllers from simulators to Crazyflie 2.1 quadrotors. We show that we can learn the skill representations from a single simulator task and transfer these to multiple different real-world tasks including hovering, taking off, landing and trajectory tracking. Our skill discovery approach helps narrow the sim-to-real gap and improve the real-world controller performance by up to 30.2%.", "year": 2024, "venue": "arXiv.org", "authors": [ "Haitong Ma", "Zhaolin Ren", "Bo Dai", "Na Li" ], "externalIds": { "DBLP": "journals/corr/abs-2404-05051", "ArXiv": "2404.05051", "DOI": "10.48550/arXiv.2404.05051", "CorpusId": 269005006 }, "url": "https://www.semanticscholar.org/paper/37888bd7334b063cefcbd58094366799e3a665cd", "referenceCount": 36, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Reset & Distill: A Recipe for Overcoming Negative Transfer in Continual Reinforcement Learning", "abstract": "We argue that the negative transfer problem occurring when the new task to learn arrives is an important problem that needs not be overlooked when developing effective Continual Reinforcement Learning (CRL) algorithms. Through comprehensive experimental validation, we demonstrate that such issue frequently exists in CRL and cannot be effectively addressed by several recent work on mitigating plasticity loss of RL agents. To that end, we develop Reset&Distill (R&D), a simple yet highly effective method, to overcome the negative transfer problem in CRL. R&D combines a strategy of resetting the agent's online actor and critic networks to learn a new task and an offline learning step for distilling the knowledge from the online actor and previous expert's action probabilities. We carried out extensive experiments on long sequence of Meta World tasks and show that our method consistently outperforms recent baselines, achieving significantly higher success rates across a range of tasks. Our findings highlight the importance of considering negative transfer in CRL and emphasize the need for robust strategies like R&D to mitigate its detrimental effects.", "year": 2024, "venue": "arXiv.org", "authors": [ "Hongjoon Ahn", "Jinu Hyeon", "Youngmin Oh", "Bosun Hwang", "Taesup Moon" ], "externalIds": { "ArXiv": "2403.05066", "DBLP": "journals/corr/abs-2403-05066", "DOI": "10.48550/arXiv.2403.05066", "CorpusId": 268297137 }, "url": "https://www.semanticscholar.org/paper/6e3e807b79f30e8ac749a62a37aaafb91b2c0caf", "referenceCount": 49, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Unsupervised Zero-Shot Reinforcement Learning via Functional Reward Encodings", "abstract": "Can we pre-train a generalist agent from a large amount of unlabeled offline trajectories such that it can be immediately adapted to any new downstream tasks in a zero-shot manner? In this work, we present a functional reward encoding (FRE) as a general, scalable solution to this zero-shot RL problem. Our main idea is to learn functional representations of any arbitrary tasks by encoding their state-reward samples using a transformer-based variational auto-encoder. This functional encoding not only enables the pre-training of an agent from a wide diversity of general unsupervised reward functions, but also provides a way to solve any new downstream tasks in a zero-shot manner, given a small number of reward-annotated samples. We empirically show that FRE agents trained on diverse random unsupervised reward functions can generalize to solve novel tasks in a range of simulated robotic benchmarks, often outperforming previous zero-shot RL and offline RL methods. Code for this project is provided at: https://github.com/kvfrans/fre", "year": 2024, "venue": "International Conference on Machine Learning", "authors": [ "Kevin Frans", "Seohong Park", "Pieter Abbeel", "Sergey Levine" ], "externalIds": { "ArXiv": "2402.17135", "DBLP": "journals/corr/abs-2402-17135", "DOI": "10.48550/arXiv.2402.17135", "CorpusId": 268033694 }, "url": "https://www.semanticscholar.org/paper/d27da1ba65fa958e45837120fad1c25e7017d80c", "referenceCount": 59, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Foundation Policies with Hilbert Representations", "abstract": "Unsupervised and self-supervised objectives, such as next token prediction, have enabled pre-training generalist models from large amounts of unlabeled data. In reinforcement learning (RL), however, finding a truly general and scalable unsupervised pre-training objective for generalist policies from offline data remains a major open question. While a number of methods have been proposed to enable generic self-supervised RL, based on principles such as goal-conditioned RL, behavioral cloning, and unsupervised skill learning, such methods remain limited in terms of either the diversity of the discovered behaviors, the need for high-quality demonstration data, or the lack of a clear adaptation mechanism for downstream tasks. In this work, we propose a novel unsupervised framework to pre-train generalist policies that capture diverse, optimal, long-horizon behaviors from unlabeled offline data such that they can be quickly adapted to any arbitrary new tasks in a zero-shot manner. Our key insight is to learn a structured representation that preserves the temporal structure of the underlying environment, and then to span this learned latent space with directional movements, which enables various zero-shot policy\"prompting\"schemes for downstream tasks. Through our experiments on simulated robotic locomotion and manipulation benchmarks, we show that our unsupervised policies can solve goal-conditioned and general RL tasks in a zero-shot fashion, even often outperforming prior methods designed specifically for each setting. Our code and videos are available at https://seohong.me/projects/hilp/.", "year": 2024, "venue": "International Conference on Machine Learning", "authors": [ "Seohong Park", "Tobias Kreiman", "Sergey Levine" ], "externalIds": { "DBLP": "conf/icml/ParkKL24", "ArXiv": "2402.15567", "DOI": "10.48550/arXiv.2402.15567", "CorpusId": 267938433 }, "url": "https://www.semanticscholar.org/paper/c4e31cad004cb6eda60dd3e042f9b7d60ddee6ea", "referenceCount": 80, "citationCount": 9, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Generalizable Task Representation Learning for Offline Meta-Reinforcement Learning with Data Limitations", "abstract": "Generalization and sample efficiency have been long-standing issues concerning reinforcement learning, and thus the field of Offline Meta-Reinforcement Learning (OMRL) has gained increasing attention due to its potential of solving a wide range of problems with static and limited offline data. Existing OMRL methods often assume sufficient training tasks and data coverage to apply contrastive learning to extract task representations. However, such assumptions are not applicable in several real-world applications and thus undermine the generalization ability of the representations. In this paper, we consider OMRL with two types of data limitations: limited training tasks and limited behavior diversity and propose a novel algorithm called GENTLE for learning generalizable task representations in the face of data limitations. GENTLE employs Task Auto-Encoder (TAE), which is an encoder-decoder architecture to extract the characteristics of the tasks. Unlike existing methods, TAE is optimized solely by reconstruction of the state transition and reward, which captures the generative structure of the task models and produces generalizable representations when training tasks are limited. To alleviate the effect of limited behavior diversity, we consistently construct pseudo-transitions to align the data distribution used to train TAE with the data distribution encountered during testing. Empirically, GENTLE significantly outperforms existing OMRL methods on both in-distribution tasks and out-of-distribution tasks across both the given-context protocol and the one-shot protocol.", "year": 2023, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Renzhe Zhou", "Chenxiao Gao", "Zongzhang Zhang", "Yang Yu" ], "externalIds": { "DBLP": "conf/aaai/ZhouGZ024", "ArXiv": "2312.15909", "DOI": "10.48550/arXiv.2312.15909", "CorpusId": 266550885 }, "url": "https://www.semanticscholar.org/paper/feb2255dbad1c829fff48922fbf0169d1b3ee4df", "referenceCount": 30, "citationCount": 5, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Retrieval-Augmented Generation for Large Language Models: A Survey", "abstract": "Large Language Models (LLMs) showcase impressive capabilities but encounter challenges like hallucination, outdated knowledge, and non-transparent, untraceable reasoning processes. Retrieval-Augmented Generation (RAG) has emerged as a promising solution by incorporating knowledge from external databases. This enhances the accuracy and credibility of the generation, particularly for knowledge-intensive tasks, and allows for continuous knowledge updates and integration of domain-specific information. RAG synergistically merges LLMs' intrinsic knowledge with the vast, dynamic repositories of external databases. This comprehensive review paper offers a detailed examination of the progression of RAG paradigms, encompassing the Naive RAG, the Advanced RAG, and the Modular RAG. It meticulously scrutinizes the tripartite foundation of RAG frameworks, which includes the retrieval, the generation and the augmentation techniques. The paper highlights the state-of-the-art technologies embedded in each of these critical components, providing a profound understanding of the advancements in RAG systems. Furthermore, this paper introduces up-to-date evaluation framework and benchmark. At the end, this article delineates the challenges currently faced and points out prospective avenues for research and development.", "year": 2023, "venue": "arXiv.org", "authors": [ "Yunfan Gao", "Yun Xiong", "Xinyu Gao", "Kangxiang Jia", "Jinliu Pan", "Yuxi Bi", "Yi Dai", "Jiawei Sun", "Qianyu Guo", "Meng Wang", "Haofen Wang" ], "externalIds": { "DBLP": "journals/corr/abs-2312-10997", "ArXiv": "2312.10997", "DOI": "10.48550/arXiv.2312.10997", "CorpusId": 266359151 }, "url": "https://www.semanticscholar.org/paper/46f9f7b8f88f72e12cbdb21e3311f995eb6e65c5", "referenceCount": 229, "citationCount": 573, "influentialCitationCount": 36, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Grow Your Limits: Continuous Improvement with Real-World RL for Robotic Locomotion", "abstract": "Deep reinforcement learning can enable robots to autonomously acquire complex behaviors such as legged locomotion. However, RL in the real world is complicated by constraints on efficiency, safety, and overall training stability, which limits its practical applicability. We present APRL, a policy regularization framework that modulates the robot’s exploration throughout training, striking a balance between flexible improvement potential and focused, efficient exploration. APRL enables a quadrupedal robot to efficiently learn to walk entirely in the real world within minutes and continue to improve with more training where prior work saturates in performance. We demonstrate that continued training with APRL results in a policy that is substantially more capable of navigating challenging situations and adapts to changes in dynamics. Videos and code to reproduce our results are available at: https://sites.google.com/berkeley.edu/aprl", "year": 2023, "venue": "IEEE International Conference on Robotics and Automation", "authors": [ "Laura M. Smith", "Yunhao Cao", "Sergey Levine" ], "externalIds": { "DBLP": "conf/icra/SmithCL24", "ArXiv": "2310.17634", "DOI": "10.1109/ICRA57147.2024.10610485", "CorpusId": 264491092 }, "url": "https://www.semanticscholar.org/paper/665ca0f0e7abc7cb0fd28281dfa820251cf76153", "referenceCount": 64, "citationCount": 11, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning Multiple Gaits within Latent Space for Quadruped Robots", "abstract": "Learning multiple gaits is non-trivial for legged robots, especially when encountering different terrains and velocity commands. In this work, we present an end-to-end training framework for learning multiple gaits for quadruped robots, tailored to the needs of robust locomotion, agile locomotion, and user's commands. A latent space is constructed concurrently by a gait encoder and a gait generator, which helps the agent to reuse multiple gait skills to achieve adaptive gait behaviors. To learn natural behaviors for multiple gaits, we design gait-dependent rewards that are constructed explicitly from gait parameters and implicitly from conditional adversarial motion priors (CAMP). We demonstrate such multiple gaits control on a quadruped robot Go1 with only proprioceptive sensors.", "year": 2023, "venue": "arXiv.org", "authors": [ "Jinze Wu", "Yufei Xue", "Chenkun Qi" ], "externalIds": { "DBLP": "journals/corr/abs-2308-03014", "ArXiv": "2308.03014", "DOI": "10.48550/arXiv.2308.03014", "CorpusId": 260682916 }, "url": "https://www.semanticscholar.org/paper/53bf8e5c182f007d7327323424d6e9c7d9413d8e", "referenceCount": 31, "citationCount": 11, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning Robust and Agile Legged Locomotion Using Adversarial Motion Priors", "abstract": "Developing both robust and agile locomotion skills for legged robots is non-trivial. In this work, we present the first blind locomotion system capable of traversing challenging terrains robustly while moving rapidly over natural terrains. Our approach incorporates the Adversarial Motion Priors (AMP) in locomotion policy training and demonstrates zero-shot generalization from the motion dataset on flat terrains to challenging terrains in the real world. We show this result on a quadruped robot Go1 using only proprioceptive sensors consisting of the IMU and joint encoders. Experiments on the Go1 demonstrate the robust and natural motion generated by the proposed method for traversing challenging terrains while moving rapidly over natural terrains.", "year": 2023, "venue": "IEEE Robotics and Automation Letters", "authors": [ "Jinze Wu", "Guiyang Xin", "Chenkun Qi", "Yufei Xue" ], "externalIds": { "DBLP": "journals/ral/WuXQX23", "DOI": "10.1109/LRA.2023.3290509", "CorpusId": 259363534 }, "url": "https://www.semanticscholar.org/paper/d8ad8305d0a31ff507577036331c32a087dac4f1", "referenceCount": 38, "citationCount": 34, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Definition of Continual Reinforcement Learning", "abstract": "In a standard view of the reinforcement learning problem, an agent's goal is to efficiently identify a policy that maximizes long-term reward. However, this perspective is based on a restricted view of learning as finding a solution, rather than treating learning as endless adaptation. In contrast, continual reinforcement learning refers to the setting in which the best agents never stop learning. Despite the importance of continual reinforcement learning, the community lacks a simple definition of the problem that highlights its commitments and makes its primary concepts precise and clear. To this end, this paper is dedicated to carefully defining the continual reinforcement learning problem. We formalize the notion of agents that\"never stop learning\"through a new mathematical language for analyzing and cataloging agents. Using this new language, we define a continual learning agent as one that can be understood as carrying out an implicit search process indefinitely, and continual reinforcement learning as the setting in which the best agents are all continual learning agents. We provide two motivating examples, illustrating that traditional views of multi-task reinforcement learning and continual supervised learning are special cases of our definition. Collectively, these definitions and perspectives formalize many intuitive concepts at the heart of learning, and open new research pathways surrounding continual learning agents.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "David Abel", "André Barreto", "Benjamin Van Roy", "Doina Precup", "H. V. Hasselt", "Satinder Singh" ], "externalIds": { "DBLP": "conf/nips/Abel0RPHS23", "ArXiv": "2307.11046", "DOI": "10.48550/arXiv.2307.11046", "CorpusId": 259991454 }, "url": "https://www.semanticscholar.org/paper/da1766e02346e7eb238249d06643459450ffdde6", "referenceCount": 69, "citationCount": 37, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Demonstrating A Walk in the Park: Learning to Walk in 20 Minutes With Model-Free Reinforcement Learning", "abstract": "—Deep reinforcement learning is a promising approach to learning policies in unstructured environments. Due to its sample inefficiency, though, deep RL applications have primarily focused on simulated environments. In this work, we demonstrate that the recent advancements in machine learning algorithms and libraries combined with careful MDP formulation lead to learning quadruped locomotion in only 20 minutes in the real world. We evaluate our approach on several indoor and outdoor terrains that are known to be challenging for classical, model-based controllers and observe that the robot consistently learns a walking gait on all of these terrains. Finally, we evaluate our design decisions in a simulated environment. We provide videos of all real-world training and code to reproduce our results on our website: https://sites.google.com/berkeley.", "year": 2023, "venue": "Robotics: Science and Systems", "authors": [ "Laura M. Smith", "Ilya Kostrikov", "S. Levine" ], "externalIds": { "DBLP": "conf/rss/KostrikovSL23", "DOI": "10.15607/RSS.2023.XIX.056", "CorpusId": 251594552 }, "url": "https://www.semanticscholar.org/paper/de7473c817c4ebf0c22c09b3c0a5e983b27523c8", "referenceCount": 71, "citationCount": 19, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DreamWaQ: Learning Robust Quadrupedal Locomotion With Implicit Terrain Imagination via Deep Reinforcement Learning", "abstract": "Quadrupedal robots resemble the physical ability of legged animals to walk through unstructured terrains. However, designing a controller for quadrupedal robots poses a significant challenge due to their functional complexity and requires adaptation to various terrains. Recently, deep reinforcement learning, inspired by how legged animals learn to walk from their experiences, has been utilized to synthesize natural quadrupedal locomotion. However, state-of-the-art methods strongly depend on a complex and reliable sensing framework. Furthermore, prior works that rely only on proprioception have shown a limited demonstration for overcoming challenging terrains, especially for a long distance. This work proposes a novel quadrupedal locomotion learning framework that allows quadrupedal robots to walk through challenging terrains, even with limited sensing modalities. The proposed framework was validated in real-world outdoor environments with varying conditions within a single run for a long distance.", "year": 2023, "venue": "IEEE International Conference on Robotics and Automation", "authors": [ "I. Made", "Aswin Nahrendra", "Byeong-Uk Yu", "Hyunsam Myung" ], "externalIds": { "DBLP": "journals/corr/abs-2301-10602", "ArXiv": "2301.10602", "DOI": "10.1109/ICRA48891.2023.10161144", "CorpusId": 256231406 }, "url": "https://www.semanticscholar.org/paper/980b07368e08932b8d3e089c7509039cb30edce6", "referenceCount": 39, "citationCount": 37, "influentialCitationCount": 9, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Walk These Ways: Tuning Robot Control for Generalization with Multiplicity of Behavior", "abstract": "Learned locomotion policies can rapidly adapt to diverse environments similar to those experienced during training but lack a mechanism for fast tuning when they fail in an out-of-distribution test environment. This necessitates a slow and iterative cycle of reward and environment redesign to achieve good performance on a new task. As an alternative, we propose learning a single policy that encodes a structured family of locomotion strategies that solve training tasks in different ways, resulting in Multiplicity of Behavior (MoB). Different strategies generalize differently and can be chosen in real-time for new tasks or environments, bypassing the need for time-consuming retraining. We release a fast, robust open-source MoB locomotion controller, Walk These Ways, that can execute diverse gaits with variable footswing, posture, and speed, unlocking diverse downstream tasks: crouching, hopping, high-speed running, stair traversal, bracing against shoves, rhythmic dance, and more. Video and code release: https://gmargo11.github.io/walk-these-ways/", "year": 2022, "venue": "Conference on Robot Learning", "authors": [ "G. Margolis" ], "externalIds": { "DBLP": "journals/corr/abs-2212-03238", "ArXiv": "2212.03238", "DOI": "10.48550/arXiv.2212.03238", "CorpusId": 254192949 }, "url": "https://www.semanticscholar.org/paper/039cac821166586d4e9bebf1070941f3939e0d71", "referenceCount": 29, "citationCount": 94, "influentialCitationCount": 10, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Legged Locomotion in Challenging Terrains using Egocentric Vision", "abstract": "Animals are capable of precise and agile locomotion using vision. Replicating this ability has been a long-standing goal in robotics. The traditional approach has been to decompose this problem into elevation mapping and foothold planning phases. The elevation mapping, however, is susceptible to failure and large noise artifacts, requires specialized hardware, and is biologically implausible. In this paper, we present the first end-to-end locomotion system capable of traversing stairs, curbs, stepping stones, and gaps. We show this result on a medium-sized quadruped robot using a single front-facing depth camera. The small size of the robot necessitates discovering specialized gait patterns not seen elsewhere. The egocentric camera requires the policy to remember past information to estimate the terrain under its hind feet. We train our policy in simulation. Training has two phases - first, we train a policy using reinforcement learning with a cheap-to-compute variant of depth image and then in phase 2 distill it into the final policy that uses depth using supervised learning. The resulting policy transfers to the real world and is able to run in real-time on the limited compute of the robot. It can traverse a large variety of terrain while being robust to perturbations like pushes, slippery surfaces, and rocky terrain. Videos are at https://vision-locomotion.github.io", "year": 2022, "venue": "Conference on Robot Learning", "authors": [ "Ananye Agarwal", "Ashish Kumar", "Jitendra Malik", "Deepak Pathak" ], "externalIds": { "DBLP": "journals/corr/abs-2211-07638", "ArXiv": "2211.07638", "DOI": "10.48550/arXiv.2211.07638", "CorpusId": 252733339 }, "url": "https://www.semanticscholar.org/paper/c25933713708bca9a7a2c1379e9783c88a3afdd3", "referenceCount": 92, "citationCount": 149, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "VIP: Towards Universal Visual Reward and Representation via Value-Implicit Pre-Training", "abstract": "Reward and representation learning are two long-standing challenges for learning an expanding set of robot manipulation skills from sensory observations. Given the inherent cost and scarcity of in-domain, task-specific robot data, learning from large, diverse, offline human videos has emerged as a promising path towards acquiring a generally useful visual representation for control; however, how these human videos can be used for general-purpose reward learning remains an open question. We introduce $\\textbf{V}$alue-$\\textbf{I}$mplicit $\\textbf{P}$re-training (VIP), a self-supervised pre-trained visual representation capable of generating dense and smooth reward functions for unseen robotic tasks. VIP casts representation learning from human videos as an offline goal-conditioned reinforcement learning problem and derives a self-supervised dual goal-conditioned value-function objective that does not depend on actions, enabling pre-training on unlabeled human videos. Theoretically, VIP can be understood as a novel implicit time contrastive objective that generates a temporally smooth embedding, enabling the value function to be implicitly defined via the embedding distance, which can then be used to construct the reward for any goal-image specified downstream task. Trained on large-scale Ego4D human videos and without any fine-tuning on in-domain, task-specific data, VIP's frozen representation can provide dense visual reward for an extensive set of simulated and $\\textbf{real-robot}$ tasks, enabling diverse reward-based visual control methods and significantly outperforming all prior pre-trained representations. Notably, VIP can enable simple, $\\textbf{few-shot}$ offline RL on a suite of real-world robot tasks with as few as 20 trajectories.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Yecheng Jason Ma", "Shagun Sodhani", "Dinesh Jayaraman", "O. Bastani", "Vikash Kumar", "Amy Zhang" ], "externalIds": { "DBLP": "journals/corr/abs-2210-00030", "ArXiv": "2210.00030", "DOI": "10.48550/arXiv.2210.00030", "CorpusId": 252683397 }, "url": "https://www.semanticscholar.org/paper/3fbe2e8413df0207c26ff393c9aaa8488e3ca4c3", "referenceCount": 71, "citationCount": 190, "influentialCitationCount": 13, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Disentangling Transfer in Continual Reinforcement Learning", "abstract": "The ability of continual learning systems to transfer knowledge from previously seen tasks in order to maximize performance on new tasks is a significant challenge for the field, limiting the applicability of continual learning solutions to realistic scenarios. Consequently, this study aims to broaden our understanding of transfer and its driving forces in the specific case of continual reinforcement learning. We adopt SAC as the underlying RL algorithm and Continual World as a suite of continuous control tasks. We systematically study how different components of SAC (the actor and the critic, exploration, and data) affect transfer efficacy, and we provide recommendations regarding various modeling options. The best set of choices, dubbed ClonEx-SAC, is evaluated on the recent Continual World benchmark. ClonEx-SAC achieves 87% final success rate compared to 80% of PackNet, the best method in the benchmark. Moreover, the transfer grows from 0.18 to 0.54 according to the metric provided by Continual World.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Maciej Wołczyk", "Michal Zajkac", "Razvan Pascanu", "Lukasz Kuci'nski", "Piotr Milo's" ], "externalIds": { "DBLP": "journals/corr/abs-2209-13900", "ArXiv": "2209.13900", "DOI": "10.48550/arXiv.2209.13900", "CorpusId": 252567887 }, "url": "https://www.semanticscholar.org/paper/fc3b5dc5528e24dbbc8f4ec273e622ce40eec855", "referenceCount": 57, "citationCount": 22, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DayDreamer: World Models for Physical Robot Learning", "abstract": "To solve tasks in complex environments, robots need to learn from experience. Deep reinforcement learning is a common approach to robot learning but requires a large amount of trial and error to learn, limiting its deployment in the physical world. As a consequence, many advances in robot learning rely on simulators. On the other hand, learning inside of simulators fails to capture the complexity of the real world, is prone to simulator inaccuracies, and the resulting behaviors do not adapt to changes in the world. The Dreamer algorithm has recently shown great promise for learning from small amounts of interaction by planning within a learned world model, outperforming pure reinforcement learning in video games. Learning a world model to predict the outcomes of potential actions enables planning in imagination, reducing the amount of trial and error needed in the real environment. However, it is unknown whether Dreamer can facilitate faster learning on physical robots. In this paper, we apply Dreamer to 4 robots to learn online and directly in the real world, without simulators. Dreamer trains a quadruped robot to roll off its back, stand up, and walk from scratch and without resets in only 1 hour. We then push the robot and find that Dreamer adapts within 10 minutes to withstand perturbations or quickly roll over and stand back up. On two different robotic arms, Dreamer learns to pick and place multiple objects directly from camera images and sparse rewards, approaching human performance. On a wheeled robot, Dreamer learns to navigate to a goal position purely from camera images, automatically resolving ambiguity about the robot orientation. Using the same hyperparameters across all experiments, we find that Dreamer is capable of online learning in the real world, establishing a strong baseline. We release our infrastructure for future applications of world models to robot learning.", "year": 2022, "venue": "Conference on Robot Learning", "authors": [ "Philipp Wu", "Alejandro Escontrela", "Danijar Hafner", "Ken Goldberg", "P. Abbeel" ], "externalIds": { "DBLP": "conf/corl/WuEHAG22", "ArXiv": "2206.14176", "CorpusId": 250088882 }, "url": "https://www.semanticscholar.org/paper/25bc06b508b2c63b9faf77881e528530b147b988", "referenceCount": 74, "citationCount": 196, "influentialCitationCount": 10, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Contrastive Learning as Goal-Conditioned Reinforcement Learning", "abstract": "In reinforcement learning (RL), it is easier to solve a task if given a good representation. While deep RL should automatically acquire such good representations, prior work often finds that learning representations in an end-to-end fashion is unstable and instead equip RL algorithms with additional representation learning parts (e.g., auxiliary losses, data augmentation). How can we design RL algorithms that directly acquire good representations? In this paper, instead of adding representation learning parts to an existing RL algorithm, we show (contrastive) representation learning methods can be cast as RL algorithms in their own right. To do this, we build upon prior work and apply contrastive representation learning to action-labeled trajectories, in such a way that the (inner product of) learned representations exactly corresponds to a goal-conditioned value function. We use this idea to reinterpret a prior RL method as performing contrastive learning, and then use the idea to propose a much simpler method that achieves similar performance. Across a range of goal-conditioned RL tasks, we demonstrate that contrastive RL methods achieve higher success rates than prior non-contrastive methods, including in the offline RL setting. We also show that contrastive RL outperforms prior methods on image-based tasks, without using data augmentation or auxiliary objectives.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Benjamin Eysenbach", "Tianjun Zhang", "R. Salakhutdinov", "S. Levine" ], "externalIds": { "ArXiv": "2206.07568", "DBLP": "journals/corr/abs-2206-07568", "DOI": "10.48550/arXiv.2206.07568", "CorpusId": 249674522 }, "url": "https://www.semanticscholar.org/paper/53dcf467fbded741dd08902d4203a9b57e889c87", "referenceCount": 151, "citationCount": 103, "influentialCitationCount": 14, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Adversarial Motion Priors Make Good Substitutes for Complex Reward Functions", "abstract": "Training a high-dimensional simulated agent with an under-specified reward function often leads the agent to learn physically infeasible strategies that are ineffective when deployed in the real world. To mitigate these unnatural behaviors, reinforcement learning practitioners often utilize complex reward functions that encourage physically plausible behaviors. However, a tedious labor-intensive tuning process is often required to create hand-designed rewards which might not easily generalize across platforms and tasks. We propose substituting complex reward functions with “style rewards” learned from a dataset of motion capture demonstrations. A learned style reward can be combined with an arbitrary task reward to train policies that perform tasks using naturalistic strategies. These natural strategies can also facilitate transfer to the real world. We build upon Adversarial Motion Priors - an approach from the computer graphics domain that encodes a style reward from a dataset of reference motions - to demonstrate that an adversarial approach to training policies can produce behaviors that transfer to a real quadrupedal robot without requiring complex reward functions. We also demonstrate that an effective style reward can be learned from a few seconds of motion capture data gathered from a German Shepherd and leads to energy-efficient locomotion strategies with natural gait transitions.", "year": 2022, "venue": "IEEE/RJS International Conference on Intelligent RObots and Systems", "authors": [ "Alejandro Escontrela", "X. B. Peng", "Wenhao Yu", "Tingnan Zhang", "Atil Iscen", "Ken Goldberg", "P. Abbeel" ], "externalIds": { "DBLP": "journals/corr/abs-2203-15103", "ArXiv": "2203.15103", "DOI": "10.1109/IROS47612.2022.9981973", "CorpusId": 247778924 }, "url": "https://www.semanticscholar.org/paper/c14605e00dc35886a5767ccc15a5882209a9d1f9", "referenceCount": 57, "citationCount": 80, "influentialCitationCount": 10, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "R3M: A Universal Visual Representation for Robot Manipulation", "abstract": "We study how visual representations pre-trained on diverse human video data can enable data-efficient learning of downstream robotic manipulation tasks. Concretely, we pre-train a visual representation using the Ego4D human video dataset using a combination of time-contrastive learning, video-language alignment, and an L1 penalty to encourage sparse and compact representations. The resulting representation, R3M, can be used as a frozen perception module for downstream policy learning. Across a suite of 12 simulated robot manipulation tasks, we find that R3M improves task success by over 20% compared to training from scratch and by over 10% compared to state-of-the-art visual representations like CLIP and MoCo. Furthermore, R3M enables a Franka Emika Panda arm to learn a range of manipulation tasks in a real, cluttered apartment given just 20 demonstrations. Code and pre-trained models are available at https://tinyurl.com/robotr3m.", "year": 2022, "venue": "Conference on Robot Learning", "authors": [ "Suraj Nair", "A. Rajeswaran", "Vikash Kumar", "Chelsea Finn", "Abhi Gupta" ], "externalIds": { "DBLP": "conf/corl/NairRKF022", "ArXiv": "2203.12601", "DOI": "10.48550/arXiv.2203.12601", "CorpusId": 247618840 }, "url": "https://www.semanticscholar.org/paper/c9bdc9ad2c3cf3230ba9aac7b5783ab411f0d204", "referenceCount": 74, "citationCount": 385, "influentialCitationCount": 60, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Legged Robots that Keep on Learning: Fine-Tuning Locomotion Policies in the Real World", "abstract": "Legged robots are physically capable of traversing a wide range of challenging environments, but designing controllers that are sufficiently robust to handle this diversity has been a long-standing challenge in robotics. Reinforcement learning presents an appealing approach for automating the controller design process and has been able to produce remarkably robust controllers when trained in a suitable range of environments. However, it is difficult to predict all likely conditions the robot will encounter during deployment and enumerate them at training-time. What if instead of training controllers that are robust enough to handle any eventuality, we enable the robot to continually learn in any setting it finds itself in? This kind of real-world reinforcement learning poses a number of challenges, including efficiency, safety, and autonomy. To address these challenges, we propose a practical robot reinforcement learning system for fine-tuning locomotion policies in the real world. We demonstrate that a modest amount of real-world training can substantially improve performance during deployment, and this enables a real A1 quadrupedal robot to autonomously fine-tune multiple locomotion skills in a range of environments, including an outdoor lawn and a variety of indoor terrains. (Videos and code11https://sites.google.com/berkele.edu/fine-tuning-locomotion)", "year": 2021, "venue": "IEEE International Conference on Robotics and Automation", "authors": [ "Laura M. Smith", "J. Kew", "X. B. Peng", "Sehoon Ha", "Jie Tan", "S. Levine" ], "externalIds": { "ArXiv": "2110.05457", "DBLP": "conf/icra/SmithKPHTL22", "DOI": "10.1109/icra46639.2022.9812166", "CorpusId": 238583756 }, "url": "https://www.semanticscholar.org/paper/35efc3a4c5f64d96ded6daea692f3935c96f0415", "referenceCount": 45, "citationCount": 78, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning to Walk in Minutes Using Massively Parallel Deep Reinforcement Learning", "abstract": "In this work, we present and study a training set-up that achieves fast policy generation for real-world robotic tasks by using massive parallelism on a single workstation GPU. We analyze and discuss the impact of different training algorithm components in the massively parallel regime on the final policy performance and training times. In addition, we present a novel game-inspired curriculum that is well suited for training with thousands of simulated robots in parallel. We evaluate the approach by training the quadrupedal robot ANYmal to walk on challenging terrain. The parallel approach allows training policies for flat terrain in under four minutes, and in twenty minutes for uneven terrain. This represents a speedup of multiple orders of magnitude compared to previous work. Finally, we transfer the policies to the real robot to validate the approach. We open-source our training code to help accelerate further research in the field of learned legged locomotion.", "year": 2021, "venue": "Conference on Robot Learning", "authors": [ "N. Rudin", "David Hoeller", "Philipp Reist", "Marco Hutter" ], "externalIds": { "DBLP": "journals/corr/abs-2109-11978", "ArXiv": "2109.11978", "CorpusId": 237635100 }, "url": "https://www.semanticscholar.org/paper/ca6096142016a2ba8133f6cb2c04ad30f5eae730", "referenceCount": 29, "citationCount": 379, "influentialCitationCount": 61, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Isaac Gym: High Performance GPU-Based Physics Simulation For Robot Learning", "abstract": "Isaac Gym offers a high performance learning platform to train policies for wide variety of robotics tasks directly on GPU. Both physics simulation and the neural network policy training reside on GPU and communicate by directly passing data from physics buffers to PyTorch tensors without ever going through any CPU bottlenecks. This leads to blazing fast training times for complex robotics tasks on a single GPU with 2-3 orders of magnitude improvements compared to conventional RL training that uses a CPU based simulator and GPU for neural networks. We host the results and videos at \\url{https://sites.google.com/view/isaacgym-nvidia} and isaac gym can be downloaded at \\url{https://developer.nvidia.com/isaac-gym}.", "year": 2021, "venue": "NeurIPS Datasets and Benchmarks", "authors": [ "Viktor Makoviychuk", "Lukasz Wawrzyniak", "Yunrong Guo", "Michelle Lu", "Kier Storey", "M. Macklin", "David Hoeller", "N. Rudin", "Arthur Allshire", "Ankur Handa", "Gavriel State" ], "externalIds": { "DBLP": "conf/nips/MakoviychukWGLS21", "ArXiv": "2108.10470", "CorpusId": 237277983 }, "url": "https://www.semanticscholar.org/paper/49142e3e381c0dc7fee0049ea41d2ef02c0340d7", "referenceCount": 42, "citationCount": 712, "influentialCitationCount": 65, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "RMA: Rapid Motor Adaptation for Legged Robots", "abstract": "Successful real-world deployment of legged robots would require them to adapt in real-time to unseen scenarios like changing terrains, changing payloads, wear and tear. This paper presents Rapid Motor Adaptation (RMA) algorithm to solve this problem of real-time online adaptation in quadruped robots. RMA consists of two components: a base policy and an adaptation module. The combination of these components enables the robot to adapt to novel situations in fractions of a second. RMA is trained completely in simulation without using any domain knowledge like reference trajectories or predefined foot trajectory generators and is deployed on the A1 robot without any fine-tuning. We train RMA on a varied terrain generator using bioenergetics-inspired rewards and deploy it on a variety of difficult terrains including rocky, slippery, deformable surfaces in environments with grass, long vegetation, concrete, pebbles, stairs, sand, etc. RMA shows state-of-the-art performance across diverse real-world as well as simulation experiments. Video results at https://ashish-kmr.github.io/rma-legged-robots/", "year": 2021, "venue": "Robotics: Science and Systems", "authors": [ "Ashish Kumar", "Zipeng Fu", "Deepak Pathak", "Jitendra Malik" ], "externalIds": { "DBLP": "journals/corr/abs-2107-04034", "ArXiv": "2107.04034", "DOI": "10.15607/RSS.2021.XVII.011", "CorpusId": 235650916 }, "url": "https://www.semanticscholar.org/paper/1ca5ff6555d9fc634d3858d1fda9b3de2a91b13a", "referenceCount": 70, "citationCount": 413, "influentialCitationCount": 48, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Decision Transformer: Reinforcement Learning via Sequence Modeling", "abstract": "We introduce a framework that abstracts Reinforcement Learning (RL) as a sequence modeling problem. This allows us to draw upon the simplicity and scalability of the Transformer architecture, and associated advances in language modeling such as GPT-x and BERT. In particular, we present Decision Transformer, an architecture that casts the problem of RL as conditional sequence modeling. Unlike prior approaches to RL that fit value functions or compute policy gradients, Decision Transformer simply outputs the optimal actions by leveraging a causally masked Transformer. By conditioning an autoregressive model on the desired return (reward), past states, and actions, our Decision Transformer model can generate future actions that achieve the desired return. Despite its simplicity, Decision Transformer matches or exceeds the performance of state-of-the-art model-free offline RL baselines on Atari, OpenAI Gym, and Key-to-Door tasks.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Lili Chen", "Kevin Lu", "A. Rajeswaran", "Kimin Lee", "Aditya Grover", "M. Laskin", "P. Abbeel", "A. Srinivas", "Igor Mordatch" ], "externalIds": { "DBLP": "journals/corr/abs-2106-01345", "MAG": "3169291081", "ArXiv": "2106.01345", "CorpusId": 235294299 }, "url": "https://www.semanticscholar.org/paper/c1ad5f9b32d80f1c65d67894e5b8c2fdf0ae4500", "referenceCount": 91, "citationCount": 1220, "influentialCitationCount": 225, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Understanding the Behaviour of Contrastive Loss", "abstract": "Unsupervised contrastive learning has achieved out-standing success, while the mechanism of contrastive loss has been less studied. In this paper, we concentrate on the understanding of the behaviours of unsupervised contrastive loss. We will show that the contrastive loss is a hardness-aware loss function, and the temperature τ controls the strength of penalties on hard negative samples. The previous study has shown that uniformity is a key property of contrastive learning. We build relations between the uniformity and the temperature τ. We will show that uniformity helps the contrastive learning to learn separable features, however excessive pursuit to the uniformity makes the contrastive loss not tolerant to semantically similar samples, which may break the underlying semantic structure and be harmful to the formation of features useful for downstream tasks. This is caused by the inherent defect of the instance discrimination objective. Specifically, instance discrimination objective tries to push all different instances apart, ignoring the underlying relations between samples. Pushing semantically consistent samples apart has no positive effect for acquiring a prior informative to general downstream tasks. A well-designed contrastive loss should have some extents of tolerance to the closeness of semantically similar samples. Therefore, we find that the contrastive loss meets a uniformity-tolerance dilemma, and a good choice of temperature can compromise these two properties properly to both learn separable features and tolerant to semantically similar samples, improving the feature qualities and the downstream performances.", "year": 2020, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Feng Wang", "Huaping Liu" ], "externalIds": { "MAG": "3113202745", "DBLP": "conf/cvpr/WangL21a", "ArXiv": "2012.09740", "DOI": "10.1109/CVPR46437.2021.00252", "CorpusId": 229297730 }, "url": "https://www.semanticscholar.org/paper/57eaad10369de402d3363c1d99c93810463eb03c", "referenceCount": 39, "citationCount": 545, "influentialCitationCount": 47, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Exploring Simple Siamese Representation Learning", "abstract": "Siamese networks have become a common structure in various recent models for unsupervised visual representation learning. These models maximize the similarity between two augmentations of one image, subject to certain conditions for avoiding collapsing solutions. In this paper, we report surprising empirical results that simple Siamese networks can learn meaningful representations even using none of the following: (i) negative sample pairs, (ii) large batches, (iii) momentum encoders. Our experiments show that collapsing solutions do exist for the loss and structure, but a stop-gradient operation plays an essential role in preventing collapsing. We provide a hypothesis on the implication of stop-gradient, and further show proof-of-concept experiments verifying it. Our \"SimSiam\" method achieves competitive results on ImageNet and downstream tasks. We hope this simple baseline will motivate people to rethink the roles of Siamese architectures for unsupervised representation learning. Code is made available.1", "year": 2020, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Xinlei Chen", "Kaiming He" ], "externalIds": { "MAG": "3107668149", "DBLP": "conf/cvpr/ChenH21", "ArXiv": "2011.10566", "DOI": "10.1109/CVPR46437.2021.01549", "CorpusId": 227118869 }, "url": "https://www.semanticscholar.org/paper/0e23d2f14e7e56e81538f4a63e11689d8ac1eb9d", "referenceCount": 44, "citationCount": 3389, "influentialCitationCount": 639, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Dynamics Randomization Revisited: A Case Study for Quadrupedal Locomotion", "abstract": "Understanding the gap between simulation and reality is critical for reinforcement learning with legged robots, which are largely trained in simulation. However, recent work has resulted in sometimes conflicting conclusions with regard to which factors are important for success, including the role of dynamics randomization. In this paper, we aim to provide clarity and understanding on the role of dynamics randomization in learning robust locomotion policies for the Laikago quadruped robot. Surprisingly, in contrast to prior work with the same robot model, we find that direct sim-to-real transfer is possible without dynamics randomization or on-robot adaptation schemes. We conduct extensive ablation studies in a sim-to-sim setting to understand the key issues underlying successful policy transfer, including other design decisions that can impact policy robustness. We further ground our conclusions via sim-to-real experiments with various gaits, speeds, and stepping frequencies. Additional Details: pair.toronto.edu/understanding-dr/", "year": 2020, "venue": "IEEE International Conference on Robotics and Automation", "authors": [ "Zhaoming Xie", "Xingye Da", "M. V. D. Panne", "Buck Babich", "Animesh Garg" ], "externalIds": { "DBLP": "conf/icra/XieDPBG21", "ArXiv": "2011.02404", "MAG": "3095999515", "DOI": "10.1109/ICRA48506.2021.9560837", "CorpusId": 226246191 }, "url": "https://www.semanticscholar.org/paper/903f344a59bf6f738368b15c0c19912ac171085b", "referenceCount": 33, "citationCount": 72, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning quadrupedal locomotion over challenging terrain", "abstract": "A learning-based locomotion controller enables a quadrupedal ANYmal robot to traverse challenging natural environments. Legged locomotion can extend the operational domain of robots to some of the most challenging environments on Earth. However, conventional controllers for legged locomotion are based on elaborate state machines that explicitly trigger the execution of motion primitives and reflexes. These designs have increased in complexity but fallen short of the generality and robustness of animal locomotion. Here, we present a robust controller for blind quadrupedal locomotion in challenging natural environments. Our approach incorporates proprioceptive feedback in locomotion control and demonstrates zero-shot generalization from simulation to natural environments. The controller is trained by reinforcement learning in simulation. The controller is driven by a neural network policy that acts on a stream of proprioceptive signals. The controller retains its robustness under conditions that were never encountered during training: deformable terrains such as mud and snow, dynamic footholds such as rubble, and overground impediments such as thick vegetation and gushing water. The presented work indicates that robust locomotion in natural environments can be achieved by training in simple domains.", "year": 2020, "venue": "Science Robotics", "authors": [ "Joonho Lee", "Jemin Hwangbo", "Lorenz Wellhausen", "V. Koltun", "Marco Hutter" ], "externalIds": { "ArXiv": "2010.11251", "DBLP": "journals/scirobotics/HwangboWK020", "MAG": "3093922502", "DOI": "10.1126/scirobotics.abc5986", "CorpusId": 224828219, "PubMed": "33087482" }, "url": "https://www.semanticscholar.org/paper/eadbe2e4f9de47dd357589cf59e3d1f0199e5075", "referenceCount": 56, "citationCount": 874, "influentialCitationCount": 53, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering", "Medicine" ] }, { "title": "Contrastive Representation Learning: A Framework and Review", "abstract": "Contrastive Learning has recently received interest due to its success in self-supervised representation learning in the computer vision domain. However, the origins of Contrastive Learning date as far back as the 1990s and its development has spanned across many fields and domains including Metric Learning and natural language processing. In this paper, we provide a comprehensive literature review and we propose a general Contrastive Representation Learning framework that simplifies and unifies many different contrastive learning methods. We also provide a taxonomy for each of the components of contrastive learning in order to summarise it and distinguish it from other forms of machine learning. We then discuss the inductive biases which are present in any contrastive learning system and we analyse our framework under different views from various sub-fields of Machine Learning. Examples of how contrastive learning has been applied in computer vision, natural language processing, audio processing, and others, as well as in Reinforcement Learning are also presented. Finally, we discuss the challenges and some of the most promising future research directions ahead.", "year": 2020, "venue": "IEEE Access", "authors": [ "Phúc H. Lê Khắc", "G. Healy", "A. Smeaton" ], "externalIds": { "MAG": "3096655658", "DBLP": "journals/access/Le-KhacHS20", "ArXiv": "2010.05113", "DOI": "10.1109/ACCESS.2020.3031549", "CorpusId": 222291214 }, "url": "https://www.semanticscholar.org/paper/4f0a8cad6d6a8d0397ad1bd35acce6458aa7164c", "referenceCount": 143, "citationCount": 555, "influentialCitationCount": 14, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Mastering Atari with Discrete World Models", "abstract": "Intelligent agents need to generalize from past experience to achieve goals in complex environments. World models facilitate such generalization and allow learning behaviors from imagined outcomes to increase sample-efficiency. While learning world models from image inputs has recently become feasible for some tasks, modeling Atari games accurately enough to derive successful behaviors has remained an open challenge for many years. We introduce DreamerV2, a reinforcement learning agent that learns behaviors purely from predictions in the compact latent space of a powerful world model. The world model uses discrete representations and is trained separately from the policy. DreamerV2 constitutes the first agent that achieves human-level performance on the Atari benchmark of 55 tasks by learning behaviors inside a separately trained world model. With the same computational budget and wall-clock time, DreamerV2 reaches 200M frames and exceeds the final performance of the top single-GPU agents IQN and Rainbow.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Danijar Hafner", "T. Lillicrap", "Mohammad Norouzi", "Jimmy Ba" ], "externalIds": { "ArXiv": "2010.02193", "DBLP": "journals/corr/abs-2010-02193", "MAG": "3091507139", "CorpusId": 222133157 }, "url": "https://www.semanticscholar.org/paper/b44bb1762640ed72091fd5f5fdc20719a6dc24af", "referenceCount": 81, "citationCount": 664, "influentialCitationCount": 122, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Transfer Learning in Deep Reinforcement Learning: A Survey", "abstract": "Reinforcement learning is a learning paradigm for solving sequential decision-making problems. Recent years have witnessed remarkable progress in reinforcement learning upon the fast development of deep neural networks. Along with the promising prospects of reinforcement learning in numerous domains such as robotics and game-playing, transfer learning has arisen to tackle various challenges faced by reinforcement learning, by transferring knowledge from external expertise to facilitate the efficiency and effectiveness of the learning process. In this survey, we systematically investigate the recent progress of transfer learning approaches in the context of deep reinforcement learning. Specifically, we provide a framework for categorizing the state-of-the-art transfer learning approaches, under which we analyze their goals, methodologies, compatible reinforcement learning backbones, and practical applications. We also draw connections between transfer learning and other relevant topics from the reinforcement learning perspective and explore their potential challenges that await future research progress.", "year": 2020, "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", "authors": [ "Zhuangdi Zhu", "Kaixiang Lin", "Anil K. Jain", "Jiayu Zhou" ], "externalIds": { "ArXiv": "2009.07888", "DBLP": "journals/corr/abs-2009-07888", "MAG": "3085267010", "DOI": "10.1109/TPAMI.2023.3292075", "CorpusId": 221761694, "PubMed": "37402188" }, "url": "https://www.semanticscholar.org/paper/f8492a321d66c381637b693a24af994af41b3cdf", "referenceCount": 235, "citationCount": 386, "influentialCitationCount": 22, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics", "Medicine" ] }, { "title": "Decoupling Representation Learning from Reinforcement Learning", "abstract": "In an effort to overcome limitations of reward-driven feature learning in deep reinforcement learning (RL) from images, we propose decoupling representation learning from policy learning. To this end, we introduce a new unsupervised learning (UL) task, called Augmented Temporal Contrast (ATC), which trains a convolutional encoder to associate pairs of observations separated by a short time difference, under image augmentations and using a contrastive loss. In online RL experiments, we show that training the encoder exclusively using ATC matches or outperforms end-to-end RL in most environments. Additionally, we benchmark several leading UL algorithms by pre-training encoders on expert demonstrations and using them, with weights frozen, in RL agents; we find that agents using ATC-trained encoders outperform all others. We also train multi-task encoders on data from multiple environments and show generalization to different downstream RL tasks. Finally, we ablate components of ATC, and introduce a new data augmentation to enable replay of (compressed) latent images from pre-trained encoders when RL requires augmentation. Our experiments span visually diverse RL benchmarks in DeepMind Control, DeepMind Lab, and Atari, and our complete code is available at this https URL.", "year": 2020, "venue": "International Conference on Machine Learning", "authors": [ "Adam Stooke", "Kimin Lee", "P. Abbeel", "M. Laskin" ], "externalIds": { "MAG": "3085605093", "DBLP": "journals/corr/abs-2009-08319", "ArXiv": "2009.08319", "CorpusId": 221761383 }, "url": "https://www.semanticscholar.org/paper/17985b57240bfaea02a6098a7a34e71e780180eb", "referenceCount": 42, "citationCount": 296, "influentialCitationCount": 32, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Supervised Contrastive Learning", "abstract": "Cross entropy is the most widely used loss function for supervised training of image classification models. In this paper, we propose a novel training methodology that consistently outperforms cross entropy on supervised learning tasks across different architectures and data augmentations. We modify the batch contrastive loss, which has recently been shown to be very effective at learning powerful representations in the self-supervised setting. We are thus able to leverage label information more effectively than cross entropy. Clusters of points belonging to the same class are pulled together in embedding space, while simultaneously pushing apart clusters of samples from different classes. In addition to this, we leverage key ingredients such as large batch sizes and normalized embeddings, which have been shown to benefit self-supervised learning. On both ResNet-50 and ResNet-200, we outperform cross entropy by over 1%, setting a new state of the art number of 78.8% among methods that use AutoAugment data augmentation. The loss also shows clear benefits for robustness to natural corruptions on standard benchmarks on both calibration and accuracy. Compared to cross entropy, our supervised contrastive loss is more stable to hyperparameter settings such as optimizers or data augmentations.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Prannay Khosla", "Piotr Teterwak", "Chen Wang", "Aaron Sarna", "Yonglong Tian", "Phillip Isola", "Aaron Maschinot", "Ce Liu", "Dilip Krishnan" ], "externalIds": { "MAG": "3018378048", "ArXiv": "2004.11362", "DBLP": "journals/corr/abs-2004-11362", "CorpusId": 216080787 }, "url": "https://www.semanticscholar.org/paper/38643c2926b10f6f74f122a7037e2cd20d77c0f1", "referenceCount": 75, "citationCount": 3662, "influentialCitationCount": 619, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Continual Reinforcement Learning with Multi-Timescale Replay", "abstract": "In this paper, we propose a multi-timescale replay (MTR) buffer for improving continual learning in RL agents faced with environments that are changing continuously over time at timescales that are unknown to the agent. The basic MTR buffer comprises a cascade of sub-buffers that accumulate experiences at different timescales, enabling the agent to improve the trade-off between adaptation to new data and retention of old knowledge. We also combine the MTR framework with invariant risk minimization, with the idea of encouraging the agent to learn a policy that is robust across the various environments it encounters over time. The MTR methods are evaluated in three different continual learning settings on two continuous control tasks and, in many cases, show improvement over the baselines.", "year": 2020, "venue": "arXiv.org", "authors": [ "Christos Kaplanis", "C. Clopath", "M. Shanahan" ], "externalIds": { "ArXiv": "2004.07530", "MAG": "3017258201", "DBLP": "journals/corr/abs-2004-07530", "CorpusId": 215786063 }, "url": "https://www.semanticscholar.org/paper/244b87a37da4a763baa703db462eb74e51f560d2", "referenceCount": 34, "citationCount": 13, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "CURL: Contrastive Unsupervised Representations for Reinforcement Learning", "abstract": "We present CURL: Contrastive Unsupervised Representations for Reinforcement Learning. CURL extracts high-level features from raw pixels using contrastive learning and performs off-policy control on top of the extracted features. CURL outperforms prior pixel-based methods, both model-based and model-free, on complex tasks in the DeepMind Control Suite and Atari Games showing 1.9x and 1.2x performance gains at the 100K environment and interaction steps benchmarks respectively. On the DeepMind Control Suite, CURL is the first image-based algorithm to nearly match the sample-efficiency of methods that use state-based features. Our code is open-sourced and available at this https URL.", "year": 2020, "venue": "International Conference on Machine Learning", "authors": [ "A. Srinivas", "M. Laskin", "P. Abbeel" ], "externalIds": { "MAG": "3015437096", "DBLP": "journals/corr/abs-2004-04136", "ArXiv": "2004.04136", "CorpusId": 215415964 }, "url": "https://www.semanticscholar.org/paper/9efb64f20ab1f157ca9f4050d4aaacf6c3f9b2b2", "referenceCount": 65, "citationCount": 951, "influentialCitationCount": 161, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Dream to Control: Learning Behaviors by Latent Imagination", "abstract": "To select effective actions in complex environments, intelligent agents need to generalize from past experience. World models can represent knowledge about the environment to facilitate such generalization. While learning world models from high-dimensional sensory inputs is becoming feasible through deep learning, there are many potential ways for deriving behaviors from them. We present Dreamer, a reinforcement learning agent that solves long-horizon tasks purely by latent imagination. We efficiently learn behaviors by backpropagating analytic gradients of learned state values through trajectories imagined in the compact state space of a learned world model. On 20 challenging visual control tasks, Dreamer exceeds existing approaches in data-efficiency, computation time, and final performance.", "year": 2019, "venue": "International Conference on Learning Representations", "authors": [ "Danijar Hafner", "T. Lillicrap", "Jimmy Ba", "Mohammad Norouzi" ], "externalIds": { "MAG": "2995298643", "ArXiv": "1912.01603", "DBLP": "conf/iclr/HafnerLB020", "CorpusId": 208547755 }, "url": "https://www.semanticscholar.org/paper/0cc956565c7d249d4197eeb1dbab6523c648b2c9", "referenceCount": 71, "citationCount": 1084, "influentialCitationCount": 194, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep Multimodal Representation Learning: A Survey", "abstract": "Multimodal representation learning, which aims to narrow the heterogeneity gap among different modalities, plays an indispensable role in the utilization of ubiquitous multimodal data. Due to the powerful representation ability with multiple levels of abstraction, deep learning-based multimodal representation learning has attracted much attention in recent years. In this paper, we provided a comprehensive survey on deep multimodal representation learning which has never been concentrated entirely. To facilitate the discussion on how the heterogeneity gap is narrowed, according to the underlying structures in which different modalities are integrated, we category deep multimodal representation learning methods into three frameworks: joint representation, coordinated representation, and encoder-decoder. Additionally, we review some typical models in this area ranging from conventional models to newly developed technologies. This paper highlights on the key issues of newly developed technologies, such as encoder-decoder model, generative adversarial networks, and attention mechanism in a multimodal representation learning perspective, which, to the best of our knowledge, have never been reviewed previously, even though they have become the major focuses of much contemporary research. For each framework or model, we discuss its basic structure, learning objective, application scenes, key issues, advantages, and disadvantages, such that both novel and experienced researchers can benefit from this survey. Finally, we suggest some important directions for future work.", "year": 2019, "venue": "IEEE Access", "authors": [ "Wenzhong Guo", "Jianwen Wang", "Shiping Wang" ], "externalIds": { "DBLP": "journals/access/GuoWW19", "MAG": "2946165673", "DOI": "10.1109/ACCESS.2019.2916887", "CorpusId": 169032532 }, "url": "https://www.semanticscholar.org/paper/c192c7d1d94e7a64de7e18e2f2fdffbf2909fcff", "referenceCount": 168, "citationCount": 329, "influentialCitationCount": 19, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Visual Foresight: Model-Based Deep Reinforcement Learning for Vision-Based Robotic Control", "abstract": "Deep reinforcement learning (RL) algorithms can learn complex robotic skills from raw sensory inputs, but have yet to achieve the kind of broad generalization and applicability demonstrated by deep learning methods in supervised domains. We present a deep RL method that is practical for real-world robotics tasks, such as robotic manipulation, and generalizes effectively to never-before-seen tasks and objects. In these settings, ground truth reward signals are typically unavailable, and we therefore propose a self-supervised model-based approach, where a predictive model learns to directly predict the future from raw sensory readings, such as camera images. At test time, we explore three distinct goal specification methods: designated pixels, where a user specifies desired object manipulation tasks by selecting particular pixels in an image and corresponding goal positions, goal images, where the desired goal state is specified with an image, and image classifiers, which define spaces of goal states. Our deep predictive models are trained using data collected autonomously and continuously by a robot interacting with hundreds of objects, without human supervision. We demonstrate that visual MPC can generalize to never-before-seen objects---both rigid and deformable---and solve a range of user-defined object manipulation tasks using the same model.", "year": 2018, "venue": "arXiv.org", "authors": [ "F. Ebert", "Chelsea Finn", "Sudeep Dasari", "Annie Xie", "Alex X. Lee", "S. Levine" ], "externalIds": { "MAG": "2902125520", "ArXiv": "1812.00568", "DBLP": "journals/corr/abs-1812-00568", "CorpusId": 54437075 }, "url": "https://www.semanticscholar.org/paper/54cd5a5ddd286442fa94da7ec344a7e76b9a6ccd", "referenceCount": 55, "citationCount": 348, "influentialCitationCount": 19, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Representation Learning with Contrastive Predictive Coding", "abstract": "While supervised learning has enabled great progress in many applications, unsupervised learning has not seen such widespread adoption, and remains an important and challenging endeavor for artificial intelligence. In this work, we propose a universal unsupervised learning approach to extract useful representations from high-dimensional data, which we call Contrastive Predictive Coding. The key insight of our model is to learn such representations by predicting the future in latent space by using powerful autoregressive models. We use a probabilistic contrastive loss which induces the latent space to capture information that is maximally useful to predict future samples. It also makes the model tractable by using negative sampling. While most prior work has focused on evaluating representations for a particular modality, we demonstrate that our approach is able to learn useful representations achieving strong performance on four distinct domains: speech, images, text and reinforcement learning in 3D environments.", "year": 2018, "venue": "arXiv.org", "authors": [ "Aäron van den Oord", "Yazhe Li", "O. Vinyals" ], "externalIds": { "DBLP": "journals/corr/abs-1807-03748", "ArXiv": "1807.03748", "MAG": "2842511635", "CorpusId": 49670925 }, "url": "https://www.semanticscholar.org/paper/b227f3e4c0dc96e5ac5426b85485a70f2175a205", "referenceCount": 57, "citationCount": 8591, "influentialCitationCount": 1192, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Sim-to-Real: Learning Agile Locomotion For Quadruped Robots", "abstract": "Designing agile locomotion for quadruped robots often requires extensive expertise and tedious manual tuning. In this paper, we present a system to automate this process by leveraging deep reinforcement learning techniques. Our system can learn quadruped locomotion from scratch using simple reward signals. In addition, users can provide an open loop reference to guide the learning process when more control over the learned gait is needed. The control policies are learned in a physics simulator and then deployed on real robots. In robotics, policies trained in simulation often do not transfer to the real world. We narrow this reality gap by improving the physics simulator and learning robust policies. We improve the simulation using system identification, developing an accurate actuator model and simulating latency. We learn robust controllers by randomizing the physical environments, adding perturbations and designing a compact observation space. We evaluate our system on two agile locomotion gaits: trotting and galloping. After learning in simulation, a quadruped robot can successfully perform both gaits in the real world.", "year": 2018, "venue": "Robotics: Science and Systems", "authors": [ "Jie Tan", "Tingnan Zhang", "Erwin Coumans", "Atil Iscen", "Yunfei Bai", "Danijar Hafner", "Steven Bohez", "Vincent Vanhoucke" ], "externalIds": { "MAG": "2798273187", "DBLP": "journals/corr/abs-1804-10332", "ArXiv": "1804.10332", "DOI": "10.15607/RSS.2018.XIV.010", "CorpusId": 13750177 }, "url": "https://www.semanticscholar.org/paper/4d3b69bdcd1d325d29badc6a38f2d6cc504fe7d1", "referenceCount": 54, "citationCount": 720, "influentialCitationCount": 45, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Selective Experience Replay for Lifelong Learning", "abstract": "\n \n Deep reinforcement learning has emerged as a powerful tool for a variety of learning tasks, however deep nets typically exhibit forgetting when learning multiple tasks in sequence. To mitigate forgetting, we propose an experience replay process that augments the standard FIFO buffer and selectively stores experiences in a long-term memory. We explore four strategies for selecting which experiences will be stored: favoring surprise, favoring reward, matching the global training distribution, and maximizing coverage of the state space. We show that distribution matching successfully prevents catastrophic forgetting, and is consistently the best approach on all domains tested. While distribution matching has better and more consistent performance, we identify one case in which coverage maximization is beneficial---when tasks that receive less trained are more important. Overall, our results show that selective experience replay, when suitable selection algorithms are employed, can prevent catastrophic forgetting.\n \n", "year": 2018, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "David Isele", "Akansel Cosgun" ], "externalIds": { "DBLP": "journals/corr/abs-1802-10269", "ArXiv": "1802.10269", "MAG": "2788994997", "DOI": "10.1609/aaai.v32i1.11595", "CorpusId": 3622700 }, "url": "https://www.semanticscholar.org/paper/8c1650cb7c313ca9134edff68952c3defd793d04", "referenceCount": 50, "citationCount": 396, "influentialCitationCount": 27, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Integrating State Representation Learning Into Deep Reinforcement Learning", "abstract": "Most deep reinforcement learning techniques are unsuitable for robotics, as they require too much interaction time to learn useful, general control policies. This problem can be largely attributed to the fact that a state representation needs to be learned as a part of learning control policies, which can only be done through fitting expected returns based on observed rewards. While the reward function provides information on the desirability of the state of the world, it does not necessarily provide information on how to distill a good, general representation of that state from the sensory observations. State representation learning objectives can be used to help learn such a representation. While many of these objectives have been proposed, they are typically not directly combined with reinforcement learning algorithms. We investigate several methods for integrating state representation learning into reinforcement learning. In these methods, the state representation learning objectives help regularize the state representation during the reinforcement learning, and the reinforcement learning itself is viewed as a crucial state representation learning objective and allowed to help shape the representation. Using autonomous racing tests in the TORCS simulator, we show how the integrated methods quickly learn policies that generalize to new environments much better than deep reinforcement learning without state representation learning.", "year": 2018, "venue": "IEEE Robotics and Automation Letters", "authors": [ "Tim de Bruin", "Jens Kober", "K. Tuyls", "Robert Babuška" ], "externalIds": { "DBLP": "journals/ral/BruinKTB18", "MAG": "2790924949", "DOI": "10.1109/LRA.2018.2800101", "CorpusId": 3549024 }, "url": "https://www.semanticscholar.org/paper/84bb62e3f40434a1e367d24783bd81432a5396d6", "referenceCount": 32, "citationCount": 103, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Stochastic Variational Video Prediction", "abstract": "Predicting the future in real-world settings, particularly from raw sensory observations such as images, is exceptionally challenging. Real-world events can be stochastic and unpredictable, and the high dimensionality and complexity of natural images requires the predictive model to build an intricate understanding of the natural world. Many existing methods tackle this problem by making simplifying assumptions about the environment. One common assumption is that the outcome is deterministic and there is only one plausible future. This can lead to low-quality predictions in real-world settings with stochastic dynamics. In this paper, we develop a stochastic variational video prediction (SV2P) method that predicts a different possible future for each sample of its latent variables. To the best of our knowledge, our model is the first to provide effective stochastic multi-frame prediction for real-world video. We demonstrate the capability of the proposed method in predicting detailed future frames of videos on multiple real-world datasets, both action-free and action-conditioned. We find that our proposed method produces substantially improved video predictions when compared to the same model without stochasticity, and to other stochastic video prediction methods. Our SV2P implementation will be open sourced upon publication.", "year": 2017, "venue": "International Conference on Learning Representations", "authors": [ "M. Babaeizadeh", "Chelsea Finn", "D. Erhan", "R. Campbell", "S. Levine" ], "externalIds": { "DBLP": "conf/iclr/BabaeizadehFECL18", "MAG": "2906780586", "ArXiv": "1710.11252", "CorpusId": 9128667 }, "url": "https://www.semanticscholar.org/paper/59d86da5c5936e7a236678bf5eaaa7753c226fb1", "referenceCount": 43, "citationCount": 509, "influentialCitationCount": 59, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Sim-to-Real Transfer of Robotic Control with Dynamics Randomization", "abstract": "Simulations are attractive environments for training agents as they provide an abundant source of data and alleviate certain safety concerns during the training process. But the behaviours developed by agents in simulation are often specific to the characteristics of the simulator. Due to modeling error, strategies that are successful in simulation may not transfer to their real world counterparts. In this paper, we demonstrate a simple method to bridge this “reality gap”. By randomizing the dynamics of the simulator during training, we are able to develop policies that are capable of adapting to very different dynamics, including ones that differ significantly from the dynamics on which the policies were trained. This adaptivity enables the policies to generalize to the dynamics of the real world without any training on the physical system. Our approach is demonstrated on an object pushing task using a robotic arm. Despite being trained exclusively in simulation, our policies are able to maintain a similar level of performance when deployed on a real robot, reliably moving an object to a desired location from random initial configurations. We explore the impact of various design decisions and show that the resulting policies are robust to significant calibration error.", "year": 2017, "venue": "IEEE International Conference on Robotics and Automation", "authors": [ "X. B. Peng", "Marcin Andrychowicz", "Wojciech Zaremba", "P. Abbeel" ], "externalIds": { "MAG": "2949612618", "DBLP": "conf/icra/PengAZA18", "ArXiv": "1710.06537", "DOI": "10.1109/ICRA.2018.8460528", "CorpusId": 3707478 }, "url": "https://www.semanticscholar.org/paper/0af8cdb71ce9e5bf37ad2a11f05af293cfe62172", "referenceCount": 40, "citationCount": 1197, "influentialCitationCount": 67, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Proximal Policy Optimization Algorithms", "abstract": "We propose a new family of policy gradient methods for reinforcement learning, which alternate between sampling data through interaction with the environment, and optimizing a \"surrogate\" objective function using stochastic gradient ascent. Whereas standard policy gradient methods perform one gradient update per data sample, we propose a novel objective function that enables multiple epochs of minibatch updates. The new methods, which we call proximal policy optimization (PPO), have some of the benefits of trust region policy optimization (TRPO), but they are much simpler to implement, more general, and have better sample complexity (empirically). Our experiments test PPO on a collection of benchmark tasks, including simulated robotic locomotion and Atari game playing, and we show that PPO outperforms other online policy gradient methods, and overall strikes a favorable balance between sample complexity, simplicity, and wall-time.", "year": 2017, "venue": "arXiv.org", "authors": [ "John Schulman", "Filip Wolski", "Prafulla Dhariwal", "Alec Radford", "Oleg Klimov" ], "externalIds": { "MAG": "2736601468", "ArXiv": "1707.06347", "DBLP": "journals/corr/SchulmanWDRK17", "CorpusId": 28695052 }, "url": "https://www.semanticscholar.org/paper/dce6f9d4017b1785979e7520fd0834ef8cf02f4b", "referenceCount": 14, "citationCount": 14872, "influentialCitationCount": 3164, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Attention is All you Need", "abstract": "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "Ashish Vaswani", "Noam M. Shazeer", "Niki Parmar", "Jakob Uszkoreit", "Llion Jones", "Aidan N. Gomez", "Lukasz Kaiser", "Illia Polosukhin" ], "externalIds": { "MAG": "2963403868", "DBLP": "conf/nips/VaswaniSPUJGKP17", "ArXiv": "1706.03762", "CorpusId": 13756489 }, "url": "https://www.semanticscholar.org/paper/204e3073870fae3d05bcbc2f6a8e263d9b72e776", "referenceCount": 41, "citationCount": 104988, "influentialCitationCount": 15363, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Domain randomization for transferring deep neural networks from simulation to the real world", "abstract": "Bridging the ‘reality gap’ that separates simulated robotics from experiments on hardware could accelerate robotic research through improved data availability. This paper explores domain randomization, a simple technique for training models on simulated images that transfer to real images by randomizing rendering in the simulator. With enough variability in the simulator, the real world may appear to the model as just another variation. We focus on the task of object localization, which is a stepping stone to general robotic manipulation skills. We find that it is possible to train a real-world object detector that is accurate to 1.5 cm and robust to distractors and partial occlusions using only data from a simulator with non-realistic random textures. To demonstrate the capabilities of our detectors, we show they can be used to perform grasping in a cluttered environment. To our knowledge, this is the first successful transfer of a deep neural network trained only on simulated RGB images (without pre-training on real images) to the real world for the purpose of robotic control.", "year": 2017, "venue": "IEEE/RJS International Conference on Intelligent RObots and Systems", "authors": [ "Joshua Tobin", "Rachel Fong", "Alex Ray", "Jonas Schneider", "Wojciech Zaremba", "P. Abbeel" ], "externalIds": { "DBLP": "journals/corr/TobinFRSZA17", "ArXiv": "1703.06907", "MAG": "2605102758", "DOI": "10.1109/IROS.2017.8202133", "CorpusId": 2413610 }, "url": "https://www.semanticscholar.org/paper/32ceb28e45a445df4d89df281bb0e3ab5aab1a2a", "referenceCount": 65, "citationCount": 2610, "influentialCitationCount": 168, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning modular neural network policies for multi-task and multi-robot transfer", "abstract": "Reinforcement learning (RL) can automate a wide variety of robotic skills, but learning each new skill requires considerable real-world data collection and manual representation engineering to design policy classes or features. Using deep reinforcement learning to train general purpose neural network policies alleviates some of the burden of manual representation engineering by using expressive policy classes, but exacerbates the challenge of data collection, since such methods tend to be less efficient than RL with low-dimensional, hand-designed representations. Transfer learning can mitigate this problem by enabling us to transfer information from one skill to another and even from one robot to another. We show that neural network policies can be decomposed into “task-specific” and “robot-specific” modules, where the task-specific modules are shared across robots, and the robot-specific modules are shared across all tasks on that robot. This allows for sharing task information, such as perception, between robots and sharing robot information, such as dynamics and kinematics, between tasks. We exploit this decomposition to train mix-and-match modules that can solve new robot-task combinations that were not seen during training. Using a novel approach to train modular neural networks, we demonstrate the effectiveness of our transfer method for enabling zero-shot generalization with a variety of robots and tasks in simulation for both visual and non-visual tasks.", "year": 2016, "venue": "IEEE International Conference on Robotics and Automation", "authors": [ "Coline Devin", "Abhishek Gupta", "Trevor Darrell", "P. Abbeel", "S. Levine" ], "externalIds": { "MAG": "2526379199", "ArXiv": "1609.07088", "DBLP": "journals/corr/DevinGDAL16", "DOI": "10.1109/ICRA.2017.7989250", "CorpusId": 18015872 }, "url": "https://www.semanticscholar.org/paper/8fab7d7dfd233fd5d19bc2641b4c1ca74fc7bc6a", "referenceCount": 34, "citationCount": 377, "influentialCitationCount": 18, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Successor Features for Transfer in Reinforcement Learning", "abstract": "Transfer in reinforcement learning refers to the notion that generalization should occur not only within a task but also across tasks. We propose a transfer framework for the scenario where the reward function changes between tasks but the environment's dynamics remain the same. Our approach rests on two key ideas: \"successor features\", a value function representation that decouples the dynamics of the environment from the rewards, and \"generalized policy improvement\", a generalization of dynamic programming's policy improvement operation that considers a set of policies rather than a single one. Put together, the two ideas lead to an approach that integrates seamlessly within the reinforcement learning framework and allows the free exchange of information across tasks. The proposed method also provides performance guarantees for the transferred policy even before any learning has taken place. We derive two theorems that set our approach in firm theoretical ground and present experiments that show that it successfully promotes transfer in practice, significantly outperforming alternative methods in a sequence of navigation tasks and in the control of a simulated robotic arm.", "year": 2016, "venue": "Neural Information Processing Systems", "authors": [ "André Barreto", "Will Dabney", "R. Munos", "Jonathan J. Hunt", "T. Schaul", "David Silver", "H. V. Hasselt" ], "externalIds": { "DBLP": "journals/corr/BarretoMSS16", "ArXiv": "1606.05312", "MAG": "2950426624", "CorpusId": 4650427 }, "url": "https://www.semanticscholar.org/paper/d8686b657b61a37da351af2952aabd8b281de408", "referenceCount": 41, "citationCount": 525, "influentialCitationCount": 81, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Recurrent Latent Variable Model for Sequential Data", "abstract": "In this paper, we explore the inclusion of latent random variables into the hidden state of a recurrent neural network (RNN) by combining the elements of the variational autoencoder. We argue that through the use of high-level latent random variables, the variational RNN (VRNN)1 can model the kind of variability observed in highly structured sequential data such as natural speech. We empirically evaluate the proposed model against other related sequential models on four speech datasets and one handwriting dataset. Our results show the important roles that latent random variables can play in the RNN dynamics.", "year": 2015, "venue": "Neural Information Processing Systems", "authors": [ "Junyoung Chung", "Kyle Kastner", "Laurent Dinh", "Kratarth Goel", "Aaron C. Courville", "Yoshua Bengio" ], "externalIds": { "MAG": "2950067852", "DBLP": "journals/corr/ChungKDGCB15", "ArXiv": "1506.02216", "CorpusId": 1594370 }, "url": "https://www.semanticscholar.org/paper/0c3b69b5247ef18fd5bab1109d87a04184ea8f4b", "referenceCount": 19, "citationCount": 1184, "influentialCitationCount": 201, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MuJoCo: A physics engine for model-based control", "abstract": "We describe a new physics engine tailored to model-based control. Multi-joint dynamics are represented in generalized coordinates and computed via recursive algorithms. Contact responses are computed via efficient new algorithms we have developed, based on the modern velocity-stepping approach which avoids the difficulties with spring-dampers. Models are specified using either a high-level C++ API or an intuitive XML file format. A built-in compiler transforms the user model into an optimized data structure used for runtime computation. The engine can compute both forward and inverse dynamics. The latter are well-defined even in the presence of contacts and equality constraints. The model can include tendon wrapping as well as actuator activation states (e.g. pneumatic cylinders or muscles). To facilitate optimal control applications and in particular sampling and finite differencing, the dynamics can be evaluated for different states and controls in parallel. Around 400,000 dynamics evaluations per second are possible on a 12-core machine, for a 3D homanoid with 18 dofs and 6 active contacts. We have already used the engine in a number of control applications. It will soon be made publicly available.", "year": 2012, "venue": "2012 IEEE/RSJ International Conference on Intelligent Robots and Systems", "authors": [ "E. Todorov", "Tom Erez", "Yuval Tassa" ], "externalIds": { "DBLP": "conf/iros/TodorovET12", "MAG": "2158782408", "DOI": "10.1109/IROS.2012.6386109", "CorpusId": 5230692 }, "url": "https://www.semanticscholar.org/paper/b354ee518bfc1ac0d8ac447eece9edb69e92eae1", "referenceCount": 17, "citationCount": 4908, "influentialCitationCount": 666, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Policy Invariance Under Reward Transformations: Theory and Application to Reward Shaping", "abstract": "This paper investigates conditions under which modi(cid:12)cations to the reward function of a Markov decision process preserve the optimal policy. It is shown that, besides the positive linear transformation familiar from utility theory, one can add a reward for transitions between states that is expressible as the di(cid:11)erence in value of an arbitrary potential function applied to those states. Further-more, this is shown to be a necessary condition for invariance, in the sense that any other transformation may yield suboptimal policies unless further assumptions are made about the underlying MDP. These results shed light on the practice of reward shaping, a method used in reinforcement learning whereby additional training rewards are used to guide the learning agent. In particular, some well-known \\bugs\" in reward shaping procedures are shown to arise from non-potential-based rewards, and methods are given for constructing shaping potentials corresponding to distance-based and subgoal-based heuristics. We show that such potentials can lead to substantial reductions in learning time.", "year": 1999, "venue": "International Conference on Machine Learning", "authors": [ "A. Ng", "Daishi Harada", "Stuart J. Russell" ], "externalIds": { "DBLP": "conf/icml/NgHR99", "MAG": "1777239053", "CorpusId": 5730166 }, "url": "https://www.semanticscholar.org/paper/94066dc12fe31e96af7557838159bde598cb4f10", "referenceCount": 15, "citationCount": 2236, "influentialCitationCount": 235, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "No free lunch theorems for optimization", "abstract": "A framework is developed to explore the connection between effective optimization algorithms and the problems they are solving. A number of \"no free lunch\" (NFL) theorems are presented which establish that for any algorithm, any elevated performance over one class of problems is offset by performance over another class. These theorems result in a geometric interpretation of what it means for an algorithm to be well suited to an optimization problem. Applications of the NFL theorems to information-theoretic aspects of optimization and benchmark measures of performance are also presented. Other issues addressed include time-varying optimization problems and a priori \"head-to-head\" minimax distinctions between optimization algorithms, distinctions that result despite the NFL theorems' enforcing of a type of uniformity over all algorithms.", "year": 1997, "venue": "IEEE Transactions on Evolutionary Computation", "authors": [ "D. Wolpert", "W. Macready" ], "externalIds": { "MAG": "2151554678", "DBLP": "journals/tec/DolpertM97", "DOI": "10.1109/4235.585893", "CorpusId": 5553697 }, "url": "https://www.semanticscholar.org/paper/8315dff3d304baf47c025f4b33535b9d693350c1", "referenceCount": 26, "citationCount": 11921, "influentialCitationCount": 310, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "AUTO-ENCODING VARIATIONAL BAYES", "abstract": "To make decisions based on a model fit by Auto-Encoding Variational Bayes (AEVB), practitioners typically use importance sampling to estimate a functional of the posterior distribution. The variational distribution found by AEVB serves as the proposal distribution for importance sampling. However, this proposal distribution may give unreliable (high variance) importance sampling estimates, thus leading to poor decisions. We explore how changing the objective function for learning the variational distribution, while continuing to learn the generative model based on the ELBO, affects the quality of downstream decisions. For a particular model, we characterize the error of importance sampling as a function of posterior variance and show that proposal distributions learned with evidence upper bounds are better. Motivated by these theoretical results, we propose a novel variant of the VAE. In addition to experimenting with MNIST, we present a full-fledged application of the proposed method to single-cell RNA sequencing. In this challenging instance of multiple hypothesis testing, the proposed method surpasses the current state of the art.", "year": 2020, "venue": "", "authors": [ "Romain Lopez", "Pierre Boyeau", "N. Yosef", "Michael I. Jordan", "J. Regier" ], "externalIds": { "CorpusId": 211146177 }, "url": "https://www.semanticscholar.org/paper/ef4f5a50837a7c1b3e87b9300ffc7ba00d461a0f", "referenceCount": 53, "citationCount": 11952, "influentialCitationCount": 1706, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Improving Language Understanding by Generative Pre-Training", "abstract": "Natural language understanding comprises a wide range of diverse tasks such as textual entailment, question answering, semantic similarity assessment, and document classification. Although large unlabeled text corpora are abundant, labeled data for learning these specific tasks is scarce, making it challenging for discriminatively trained models to perform adequately. We demonstrate that large gains on these tasks can be realized by generative pre-training of a language model on a diverse corpus of unlabeled text, followed by discriminative fine-tuning on each specific task. In contrast to previous approaches, we make use of task-aware input transformations during fine-tuning to achieve effective transfer while requiring minimal changes to the model architecture. We demonstrate the effectiveness of our approach on a wide range of benchmarks for natural language understanding. Our general task-agnostic model outperforms discriminatively trained models that use architectures specifically crafted for each task, significantly improving upon the state of the art in 9 out of the 12 tasks studied. For instance, we achieve absolute improvements of 8.9% on commonsense reasoning (Stories Cloze Test), 5.7% on question answering (RACE), and 1.5% on textual entailment (MultiNLI).", "year": 2018, "venue": "", "authors": [ "Alec Radford", "Karthik Narasimhan" ], "externalIds": { "MAG": "2965425874", "CorpusId": 49313245 }, "url": "https://www.semanticscholar.org/paper/cd18800a0fe0b668a1cc19f2ec95b5003d0a5035", "referenceCount": 73, "citationCount": 9710, "influentialCitationCount": 1083, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The Lack of A Priori Distinctions Between Learning Algorithms", "abstract": "Plutowski, M., et al. (1994). Cross-validation estimates integrated mean squared error. In Advances in neural information processing systems 6, Cowan et al. some constant set by k and m. It's also true that E(C OTS | φ, d X) is not drastically different if one considers d X 's with a different m'. Accordingly, our summand doesn't vary drastically between d X 's of one m' and d X 's of another. Since n >> m and π(x) is uniform though, almost all of the terms in the sum have m' = m.", "year": 1996, "venue": "", "authors": [ "M. Kauffman", "San Mateo", "E. Prescott" ], "externalIds": { "MAG": "2948781549", "CorpusId": 15897108 }, "url": "https://www.semanticscholar.org/paper/604954a3600f749b25a9f52317a42d13a8ec0339", "referenceCount": 0, "citationCount": 825, "influentialCitationCount": 38, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "“Unitree robotics,”", "abstract": null, "year": null, "venue": "unitree.com", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "“Bridging the sim-to-real gap from the information bottleneck perspective,”", "abstract": null, "year": null, "venue": "8th Annual Conference on Robot Learning", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "“Ase: large-scale reusable adversarial skill embeddings for physically simulated characters,”", "abstract": null, "year": null, "venue": "ACM Transactions on Graphics", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Uncertainty Representations in State-Space Layers for Deep Reinforcement Learning under Partial Observability": { "paper_title": "Uncertainty Representations in State-Space Layers for Deep Reinforcement Learning under Partial Observability", "arxiv_id": "2409.16824v1", "keyword": "reinforcement learning", "authors": [ "Carlos E. Luis", "Alessandro G. Bottero", "Julia Vinogradska", "Felix Berkenkamp", "Jan Peters" ], "references": [ { "title": "KalMamba: Towards Efficient Probabilistic State Space Models for RL under Uncertainty", "abstract": "Probabilistic State Space Models (SSMs) are essential for Reinforcement Learning (RL) from high-dimensional, partial information as they provide concise representations for control. Yet, they lack the computational efficiency of their recent deterministic counterparts such as S4 or Mamba. We propose KalMamba, an efficient architecture to learn representations for RL that combines the strengths of probabilistic SSMs with the scalability of deterministic SSMs. KalMamba leverages Mamba to learn the dynamics parameters of a linear Gaussian SSM in a latent space. Inference in this latent space amounts to standard Kalman filtering and smoothing. We realize these operations using parallel associative scanning, similar to Mamba, to obtain a principled, highly efficient, and scalable probabilistic SSM. Our experiments show that KalMamba competes with state-of-the-art SSM approaches in RL while significantly improving computational efficiency, especially on longer interaction sequences.", "year": 2024, "venue": "arXiv.org", "authors": [ "P. Becker", "Niklas Freymuth", "Gerhard Neumann" ], "externalIds": { "DBLP": "journals/corr/abs-2406-15131", "ArXiv": "2406.15131", "DOI": "10.48550/arXiv.2406.15131", "CorpusId": 270688347 }, "url": "https://www.semanticscholar.org/paper/b7fdd6f8b54a4702f7728abf14c725af034dd436", "referenceCount": 55, "citationCount": 1, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Mastering Memory Tasks with World Models", "abstract": "Current model-based reinforcement learning (MBRL) agents struggle with long-term dependencies. This limits their ability to effectively solve tasks involving extended time gaps between actions and outcomes, or tasks demanding the recalling of distant observations to inform current actions. To improve temporal coherence, we integrate a new family of state space models (SSMs) in world models of MBRL agents to present a new method, Recall to Imagine (R2I). This integration aims to enhance both long-term memory and long-horizon credit assignment. Through a diverse set of illustrative tasks, we systematically demonstrate that R2I not only establishes a new state-of-the-art for challenging memory and credit assignment RL tasks, such as BSuite and POPGym, but also showcases superhuman performance in the complex memory domain of Memory Maze. At the same time, it upholds comparable performance in classic RL tasks, such as Atari and DMC, suggesting the generality of our method. We also show that R2I is faster than the state-of-the-art MBRL method, DreamerV3, resulting in faster wall-time convergence.", "year": 2024, "venue": "International Conference on Learning Representations", "authors": [ "Mohammad Reza Samsami", "Artem Zholus", "Janarthanan Rajendran", "Sarath Chandar" ], "externalIds": { "DBLP": "journals/corr/abs-2403-04253", "ArXiv": "2403.04253", "DOI": "10.48550/arXiv.2403.04253", "CorpusId": 268264561 }, "url": "https://www.semanticscholar.org/paper/e34868427f607bd35f11576963c36b95673e5a75", "referenceCount": 64, "citationCount": 9, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Bridging State and History Representations: Understanding Self-Predictive RL", "abstract": "Representations are at the core of all deep reinforcement learning (RL) methods for both Markov decision processes (MDPs) and partially observable Markov decision processes (POMDPs). Many representation learning methods and theoretical frameworks have been developed to understand what constitutes an effective representation. However, the relationships between these methods and the shared properties among them remain unclear. In this paper, we show that many of these seemingly distinct methods and frameworks for state and history abstractions are, in fact, based on a common idea of self-predictive abstraction. Furthermore, we provide theoretical insights into the widely adopted objectives and optimization, such as the stop-gradient technique, in learning self-predictive representations. These findings together yield a minimalist algorithm to learn self-predictive representations for states and histories. We validate our theories by applying our algorithm to standard MDPs, MDPs with distractors, and POMDPs with sparse rewards. These findings culminate in a set of preliminary guidelines for RL practitioners.", "year": 2024, "venue": "International Conference on Learning Representations", "authors": [ "Tianwei Ni", "Benjamin Eysenbach", "Erfan Seyedsalehi", "Michel Ma", "Clement Gehring", "Aditya Mahajan", "Pierre-Luc Bacon" ], "externalIds": { "DBLP": "conf/iclr/NiESMGMB24", "ArXiv": "2401.08898", "DOI": "10.48550/arXiv.2401.08898", "CorpusId": 267027743 }, "url": "https://www.semanticscholar.org/paper/c3eed6435ae609ee800c6e9daee038535af2ef22", "referenceCount": 124, "citationCount": 9, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Mamba: Linear-Time Sequence Modeling with Selective State Spaces", "abstract": "Foundation models, now powering most of the exciting applications in deep learning, are almost universally based on the Transformer architecture and its core attention module. Many subquadratic-time architectures such as linear attention, gated convolution and recurrent models, and structured state space models (SSMs) have been developed to address Transformers' computational inefficiency on long sequences, but they have not performed as well as attention on important modalities such as language. We identify that a key weakness of such models is their inability to perform content-based reasoning, and make several improvements. First, simply letting the SSM parameters be functions of the input addresses their weakness with discrete modalities, allowing the model to selectively propagate or forget information along the sequence length dimension depending on the current token. Second, even though this change prevents the use of efficient convolutions, we design a hardware-aware parallel algorithm in recurrent mode. We integrate these selective SSMs into a simplified end-to-end neural network architecture without attention or even MLP blocks (Mamba). Mamba enjoys fast inference (5$\\times$ higher throughput than Transformers) and linear scaling in sequence length, and its performance improves on real data up to million-length sequences. As a general sequence model backbone, Mamba achieves state-of-the-art performance across several modalities such as language, audio, and genomics. On language modeling, our Mamba-3B model outperforms Transformers of the same size and matches Transformers twice its size, both in pretraining and downstream evaluation.", "year": 2023, "venue": "arXiv.org", "authors": [ "Albert Gu", "Tri Dao" ], "externalIds": { "ArXiv": "2312.00752", "DBLP": "journals/corr/abs-2312-00752", "DOI": "10.48550/arXiv.2312.00752", "CorpusId": 265551773 }, "url": "https://www.semanticscholar.org/paper/7bbc7595196a0606a07506c4fb1473e5e87f6082", "referenceCount": 0, "citationCount": 968, "influentialCitationCount": 281, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Multi Time Scale World Models", "abstract": "Intelligent agents use internal world models to reason and make predictions about different courses of their actions at many scales. Devising learning paradigms and architectures that allow machines to learn world models that operate at multiple levels of temporal abstractions while dealing with complex uncertainty predictions is a major technical hurdle. In this work, we propose a probabilistic formalism to learn multi-time scale world models which we call the Multi Time Scale State Space (MTS3) model. Our model uses a computationally efficient inference scheme on multiple time scales for highly accurate long-horizon predictions and uncertainty estimates over several seconds into the future. Our experiments, which focus on action conditional long horizon future predictions, show that MTS3 outperforms recent methods on several system identification benchmarks including complex simulated and real-world dynamical systems. Code is available at this repository: https://github.com/ALRhub/MTS3.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Vaisakh Shaj", "Saleh Gholam Zadeh", "Ozan Demir", "L. R. Douat", "Gerhard Neumann" ], "externalIds": { "DBLP": "journals/corr/abs-2310-18534", "ArXiv": "2310.18534", "DOI": "10.48550/arXiv.2310.18534", "CorpusId": 264590603 }, "url": "https://www.semanticscholar.org/paper/f6dc0af5a7d38e8cb8adb944d8ecfbddc2ba77b1", "referenceCount": 37, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Value-Distributional Model-Based Reinforcement Learning", "abstract": "Quantifying uncertainty about a policy's long-term performance is important to solve sequential decision-making tasks. We study the problem from a model-based Bayesian reinforcement learning perspective, where the goal is to learn the posterior distribution over value functions induced by parameter (epistemic) uncertainty of the Markov decision process. Previous work restricts the analysis to a few moments of the distribution over values or imposes a particular distribution shape, e.g., Gaussians. Inspired by distributional reinforcement learning, we introduce a Bellman operator whose fixed-point is the value distribution function. Based on our theory, we propose Epistemic Quantile-Regression (EQR), a model-based algorithm that learns a value distribution function. We combine EQR with soft actor-critic (SAC) for policy optimization with an arbitrary differentiable objective function of the learned value distribution. Evaluation across several continuous-control tasks shows performance benefits with respect to both model-based and model-free algorithms. The code is available at https://github.com/boschresearch/dist-mbrl.", "year": 2023, "venue": "arXiv.org", "authors": [ "Carlos E. Luis", "A. Bottero", "Julia Vinogradska", "Felix Berkenkamp", "Jan Peters" ], "externalIds": { "DBLP": "journals/corr/abs-2308-06590", "ArXiv": "2308.06590", "DOI": "10.48550/arXiv.2308.06590", "CorpusId": 260886933 }, "url": "https://www.semanticscholar.org/paper/28aa4452c63da832a3e185475285044ba1496391", "referenceCount": 61, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "When Do Transformers Shine in RL? Decoupling Memory from Credit Assignment", "abstract": "Reinforcement learning (RL) algorithms face two distinct challenges: learning effective representations of past and present observations, and determining how actions influence future returns. Both challenges involve modeling long-term dependencies. The Transformer architecture has been very successful to solve problems that involve long-term dependencies, including in the RL domain. However, the underlying reason for the strong performance of Transformer-based RL methods remains unclear: is it because they learn effective memory, or because they perform effective credit assignment? After introducing formal definitions of memory length and credit assignment length, we design simple configurable tasks to measure these distinct quantities. Our empirical results reveal that Transformers can enhance the memory capability of RL algorithms, scaling up to tasks that require memorizing observations $1500$ steps ago. However, Transformers do not improve long-term credit assignment. In summary, our results provide an explanation for the success of Transformers in RL, while also highlighting an important area for future research and benchmark design. Our code is open-sourced at https://github.com/twni2016/Memory-RL", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Tianwei Ni", "Michel Ma", "Benjamin Eysenbach", "Pierre-Luc Bacon" ], "externalIds": { "DBLP": "conf/nips/NiMEB23", "ArXiv": "2307.03864", "DOI": "10.48550/arXiv.2307.03864", "CorpusId": 259501310 }, "url": "https://www.semanticscholar.org/paper/7177fcb8e0311eeebd4070b62312a4bec1432ea3", "referenceCount": 71, "citationCount": 22, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Structured State Space Models for In-Context Reinforcement Learning", "abstract": "Structured state space sequence (S4) models have recently achieved state-of-the-art performance on long-range sequence modeling tasks. These models also have fast inference speeds and parallelisable training, making them potentially useful in many reinforcement learning settings. We propose a modification to a variant of S4 that enables us to initialise and reset the hidden state in parallel, allowing us to tackle reinforcement learning tasks. We show that our modified architecture runs asymptotically faster than Transformers in sequence length and performs better than RNN's on a simple memory-based task. We evaluate our modified architecture on a set of partially-observable environments and find that, in practice, our model outperforms RNN's while also running over five times faster. Then, by leveraging the model's ability to handle long-range sequences, we achieve strong performance on a challenging meta-learning task in which the agent is given a randomly-sampled continuous control environment, combined with a randomly-sampled linear projection of the environment's observations and actions. Furthermore, we show the resulting model can adapt to out-of-distribution held-out tasks. Overall, the results presented in this paper show that structured state space models are fast and performant for in-context reinforcement learning tasks. We provide code at https://github.com/luchris429/popjaxrl.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Chris Xiaoxuan Lu", "Yannick Schroecker", "Albert Gu", "Emilio Parisotto", "J. Foerster", "Satinder Singh", "Feryal M. P. Behbahani" ], "externalIds": { "DBLP": "conf/nips/0001SGPF0B23", "ArXiv": "2303.03982", "DOI": "10.48550/arXiv.2303.03982", "CorpusId": 257378507 }, "url": "https://www.semanticscholar.org/paper/d98b5c1d0f9a4e39dc79ea7a3f74e54789df5e13", "referenceCount": 45, "citationCount": 54, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "POPGym: Benchmarking Partially Observable Reinforcement Learning", "abstract": "Real world applications of Reinforcement Learning (RL) are often partially observable, thus requiring memory. Despite this, partial observability is still largely ignored by contemporary RL benchmarks and libraries. We introduce Partially Observable Process Gym (POPGym), a two-part library containing (1) a diverse collection of 15 partially observable environments, each with multiple difficulties and (2) implementations of 13 memory model baselines -- the most in a single RL library. Existing partially observable benchmarks tend to fixate on 3D visual navigation, which is computationally expensive and only one type of POMDP. In contrast, POPGym environments are diverse, produce smaller observations, use less memory, and often converge within two hours of training on a consumer-grade GPU. We implement our high-level memory API and memory baselines on top of the popular RLlib framework, providing plug-and-play compatibility with various training algorithms, exploration strategies, and distributed training paradigms. Using POPGym, we execute the largest comparison across RL memory models to date. POPGym is available at https://github.com/proroklab/popgym.", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Steven D. Morad", "Ryan Kortvelesy", "Matteo Bettini", "Stephan Liwicki", "Amanda Prorok" ], "externalIds": { "DBLP": "journals/corr/abs-2303-01859", "ArXiv": "2303.01859", "DOI": "10.48550/arXiv.2303.01859", "CorpusId": 257353458 }, "url": "https://www.semanticscholar.org/paper/c77112b5b2aff937ca071b349e47176b684c45b2", "referenceCount": 70, "citationCount": 27, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Hungry Hungry Hippos: Towards Language Modeling with State Space Models", "abstract": "State space models (SSMs) have demonstrated state-of-the-art sequence modeling performance in some modalities, but underperform attention in language modeling. Moreover, despite scaling nearly linearly in sequence length instead of quadratically, SSMs are still slower than Transformers due to poor hardware utilization. In this paper, we make progress on understanding the expressivity gap between SSMs and attention in language modeling, and on reducing the hardware barrier between SSMs and attention. First, we use synthetic language modeling tasks to understand the gap between SSMs and attention. We find that existing SSMs struggle with two capabilities: recalling earlier tokens in the sequence and comparing tokens across the sequence. To understand the impact on language modeling, we propose a new SSM layer, H3, that is explicitly designed for these abilities. H3 matches attention on the synthetic languages and comes within 0.4 PPL of Transformers on OpenWebText. Furthermore, a hybrid 125M-parameter H3-attention model that retains two attention layers surprisingly outperforms Transformers on OpenWebText by 1.0 PPL. Next, to improve the efficiency of training SSMs on modern hardware, we propose FlashConv. FlashConv uses a fused block FFT algorithm to improve efficiency on sequences up to 8K, and introduces a novel state passing algorithm that exploits the recurrent properties of SSMs to scale to longer sequences. FlashConv yields 2$\\times$ speedup on the long-range arena benchmark and allows hybrid language models to generate text 2.4$\\times$ faster than Transformers. Using FlashConv, we scale hybrid H3-attention language models up to 2.7B parameters on the Pile and find promising initial results, achieving lower perplexity than Transformers and outperforming Transformers in zero- and few-shot learning on a majority of tasks in the SuperGLUE benchmark.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Tri Dao", "Daniel Y. Fu", "Khaled Kamal Saab", "A. Thomas", "A. Rudra", "Christopher Ré" ], "externalIds": { "DBLP": "journals/corr/abs-2212-14052", "ArXiv": "2212.14052", "DOI": "10.48550/arXiv.2212.14052", "CorpusId": 255340454 }, "url": "https://www.semanticscholar.org/paper/5a77b508302771fc083bf24e0bcda8553c9b5421", "referenceCount": 65, "citationCount": 241, "influentialCitationCount": 19, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "On Uncertainty in Deep State Space Models for Model-Based Reinforcement Learning", "abstract": "Improved state space models, such as Recurrent State Space Models (RSSMs), are a key factor behind recent advances in model-based reinforcement learning (RL). Yet, despite their empirical success, many of the underlying design choices are not well understood. We show that RSSMs use a suboptimal inference scheme and that models trained using this inference overestimate the aleatoric uncertainty of the ground truth system. We find this overestimation implicitly regularizes RSSMs and allows them to succeed in model-based RL. We postulate that this implicit regularization fulfills the same functionality as explicitly modeling epistemic uncertainty, which is crucial for many other model-based RL approaches. Yet, overestimating aleatoric uncertainty can also impair performance in cases where accurately estimating it matters, e.g., when we have to deal with occlusions, missing observations, or fusing sensor modalities at different frequencies. Moreover, the implicit regularization is a side-effect of the inference scheme and not the result of a rigorous, principled formulation, which renders analyzing or improving RSSMs difficult. Thus, we propose an alternative approach building on well-understood components for modeling aleatoric and epistemic uncertainty, dubbed Variational Recurrent Kalman Network (VRKN). This approach uses Kalman updates for exact smoothing inference in a latent space and Monte Carlo Dropout to model epistemic uncertainty. Due to the Kalman updates, the VRKN can naturally handle missing observations or sensor fusion problems with varying numbers of observations per time step. Our experiments show that using the VRKN instead of the RSSM improves performance in tasks where appropriately capturing aleatoric uncertainty is crucial while matching it in the deterministic standard benchmarks.", "year": 2022, "venue": "Trans. Mach. Learn. Res.", "authors": [ "P. Becker", "G. Neumann" ], "externalIds": { "ArXiv": "2210.09256", "DBLP": "journals/corr/abs-2210-09256", "DOI": "10.48550/arXiv.2210.09256", "CorpusId": 252918781 }, "url": "https://www.semanticscholar.org/paper/5a617480cd981a6d01b87ae167d36afc2ec9c24a", "referenceCount": 53, "citationCount": 6, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Mega: Moving Average Equipped Gated Attention", "abstract": "The design choices in the Transformer attention mechanism, including weak inductive bias and quadratic computational complexity, have limited its application for modeling long sequences. In this paper, we introduce Mega, a simple, theoretically grounded, single-head gated attention mechanism equipped with (exponential) moving average to incorporate inductive bias of position-aware local dependencies into the position-agnostic attention mechanism. We further propose a variant of Mega that offers linear time and space complexity yet yields only minimal quality loss, by efficiently splitting the whole sequence into multiple chunks with fixed length. Extensive experiments on a wide range of sequence modeling benchmarks, including the Long Range Arena, neural machine translation, auto-regressive language modeling, and image and speech classification, show that Mega achieves significant improvements over other sequence models, including variants of Transformers and recent state space models.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Xuezhe Ma", "Chunting Zhou", "Xiang Kong", "Junxian He", "Liangke Gui", "Graham Neubig", "Jonathan May", "Luke Zettlemoyer" ], "externalIds": { "DBLP": "conf/iclr/MaZKHGNMZ23", "ArXiv": "2209.10655", "DOI": "10.48550/arXiv.2209.10655", "CorpusId": 252439127 }, "url": "https://www.semanticscholar.org/paper/70e91e16eb321067d9402710e14a40cf28311f73", "referenceCount": 81, "citationCount": 134, "influentialCitationCount": 27, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Simplified State Space Layers for Sequence Modeling", "abstract": "Models using structured state space sequence (S4) layers have achieved state-of-the-art performance on long-range sequence modeling tasks. An S4 layer combines linear state space models (SSMs), the HiPPO framework, and deep learning to achieve high performance. We build on the design of the S4 layer and introduce a new state space layer, the S5 layer. Whereas an S4 layer uses many independent single-input, single-output SSMs, the S5 layer uses one multi-input, multi-output SSM. We establish a connection between S5 and S4, and use this to develop the initialization and parameterization used by the S5 model. The result is a state space layer that can leverage efficient and widely implemented parallel scans, allowing S5 to match the computational efficiency of S4, while also achieving state-of-the-art performance on several long-range sequence modeling tasks. S5 averages 87.4% on the long range arena benchmark, and 98.5% on the most difficult Path-X task.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Jimmy Smith", "Andrew Warrington", "Scott W. Linderman" ], "externalIds": { "DBLP": "conf/iclr/SmithWL23", "ArXiv": "2208.04933", "DOI": "10.48550/arXiv.2208.04933", "CorpusId": 251442769 }, "url": "https://www.semanticscholar.org/paper/6d7d141c75af752ffc0d8a6184cca3f9323d6c74", "referenceCount": 82, "citationCount": 290, "influentialCitationCount": 31, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Hidden Parameter Recurrent State Space Models For Changing Dynamics Scenarios", "abstract": "Recurrent State-space models (RSSMs) are highly expressive models for learning patterns in time series data and system identification. However, these models assume that the dynamics are fixed and unchanging, which is rarely the case in real-world scenarios. Many control applications often exhibit tasks with similar but not identical dynamics which can be modeled as a latent variable. We introduce the Hidden Parameter Recurrent State Space Models (HiP-RSSMs), a framework that parametrizes a family of related dynamical systems with a low-dimensional set of latent factors. We present a simple and effective way of learning and performing inference over this Gaussian graphical model that avoids approximations like variational inference. We show that HiP-RSSMs outperforms RSSMs and competing multi-task models on several challenging robotic benchmarks both on real-world systems and simulations.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Vaisakh Shaj", "Dieter Buchler", "Rohit Sonker", "P. Becker", "G. Neumann" ], "externalIds": { "DBLP": "conf/iclr/ShajBSBN22", "ArXiv": "2206.14697", "DOI": "10.48550/arXiv.2206.14697", "CorpusId": 247696299 }, "url": "https://www.semanticscholar.org/paper/b66f6ef8c4ff9b5fce38396d43c7891ad4e6969f", "referenceCount": 50, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "On the Parameterization and Initialization of Diagonal State Space Models", "abstract": "State space models (SSM) have recently been shown to be very effective as a deep learning layer as a promising alternative to sequence models such as RNNs, CNNs, or Transformers. The first version to show this potential was the S4 model, which is particularly effective on tasks involving long-range dependencies by using a prescribed state matrix called the HiPPO matrix. While this has an interpretable mathematical mechanism for modeling long dependencies, it introduces a custom representation and algorithm that can be difficult to implement. On the other hand, a recent variant of S4 called DSS showed that restricting the state matrix to be fully diagonal can still preserve the performance of the original model when using a specific initialization based on approximating S4's matrix. This work seeks to systematically understand how to parameterize and initialize such diagonal state space models. While it follows from classical results that almost all SSMs have an equivalent diagonal form, we show that the initialization is critical for performance. We explain why DSS works mathematically, by showing that the diagonal restriction of S4's matrix surprisingly recovers the same kernel in the limit of infinite state dimension. We also systematically describe various design choices in parameterizing and computing diagonal SSMs, and perform a controlled empirical study ablating the effects of these choices. Our final model S4D is a simple diagonal version of S4 whose kernel computation requires just 2 lines of code and performs comparably to S4 in almost all settings, with state-of-the-art results for image, audio, and medical time-series domains, and averaging 85\\% on the Long Range Arena benchmark.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Albert Gu", "Ankit Gupta", "Karan Goel", "Christopher Ré" ], "externalIds": { "DBLP": "journals/corr/abs-2206-11893", "ArXiv": "2206.11893", "DOI": "10.48550/arXiv.2206.11893", "CorpusId": 249953875 }, "url": "https://www.semanticscholar.org/paper/ca444821352a4bd91884413d8070446e2960715a", "referenceCount": 41, "citationCount": 182, "influentialCitationCount": 36, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "When Is Partially Observable Reinforcement Learning Not Scary?", "abstract": "Applications of Reinforcement Learning (RL), in which agents learn to make a sequence of decisions despite lacking complete information about the latent states of the controlled system, that is, they act under partial observability of the states, are ubiquitous. Partially observable RL can be notoriously difficult -- well-known information-theoretic results show that learning partially observable Markov decision processes (POMDPs) requires an exponential number of samples in the worst case. Yet, this does not rule out the existence of large subclasses of POMDPs over which learning is tractable. In this paper we identify such a subclass, which we call weakly revealing POMDPs. This family rules out the pathological instances of POMDPs where observations are uninformative to a degree that makes learning hard. We prove that for weakly revealing POMDPs, a simple algorithm combining optimism and Maximum Likelihood Estimation (MLE) is sufficient to guarantee polynomial sample complexity. To the best of our knowledge, this is the first provably sample-efficient result for learning from interactions in overcomplete POMDPs, where the number of latent states can be larger than the number of observations.", "year": 2022, "venue": "Annual Conference Computational Learning Theory", "authors": [ "Qinghua Liu", "Alan Chung", "Csaba Szepesvari", "Chi Jin" ], "externalIds": { "DBLP": "conf/colt/LiuCSJ22", "ArXiv": "2204.08967", "DOI": "10.48550/arXiv.2204.08967", "CorpusId": 248239669 }, "url": "https://www.semanticscholar.org/paper/a1282f8334e781ed33d7479fcde32956d7e9c0dc", "referenceCount": 58, "citationCount": 74, "influentialCitationCount": 16, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "It's Raw! Audio Generation with State-Space Models", "abstract": "Developing architectures suitable for modeling raw audio is a challenging problem due to the high sampling rates of audio waveforms. Standard sequence modeling approaches like RNNs and CNNs have previously been tailored to fit the demands of audio, but the resultant architectures make undesirable computational tradeoffs and struggle to model waveforms effectively. We propose SaShiMi, a new multi-scale architecture for waveform modeling built around the recently introduced S4 model for long sequence modeling. We identify that S4 can be unstable during autoregressive generation, and provide a simple improvement to its parameterization by drawing connections to Hurwitz matrices. SaShiMi yields state-of-the-art performance for unconditional waveform generation in the autoregressive setting. Additionally, SaShiMi improves non-autoregressive generation performance when used as the backbone architecture for a diffusion model. Compared to prior architectures in the autoregressive generation setting, SaShiMi generates piano and speech waveforms which humans find more musical and coherent respectively, e.g. 2x better mean opinion scores than WaveNet on an unconditional speech generation task. On a music generation task, SaShiMi outperforms WaveNet on density estimation and speed at both training and inference even when using 3x fewer parameters. Code can be found at https://github.com/HazyResearch/state-spaces and samples at https://hazyresearch.stanford.edu/sashimi-examples.", "year": 2022, "venue": "International Conference on Machine Learning", "authors": [ "Karan Goel", "Albert Gu", "Chris Donahue", "Christopher R'e" ], "externalIds": { "ArXiv": "2202.09729", "DBLP": "journals/corr/abs-2202-09729", "CorpusId": 247011489 }, "url": "https://www.semanticscholar.org/paper/b55ee75940d24934a54d7f1acfde06e9cb45ac44", "referenceCount": 55, "citationCount": 141, "influentialCitationCount": 14, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "TransDreamer: Reinforcement Learning with Transformer World Models", "abstract": "The Dreamer agent provides various benefits of Model-Based Reinforcement Learning (MBRL) such as sample efficiency, reusable knowledge, and safe planning. However, its world model and policy networks inherit the limitations of recurrent neural networks and thus an important question is how an MBRL framework can benefit from the recent advances of transformers and what the challenges are in doing so. In this paper, we propose a transformer-based MBRL agent, called TransDreamer. We first introduce the Transformer State-Space Model, a world model that leverages a transformer for dynamics predictions. We then share this world model with a transformer-based policy network and obtain stability in training a transformer-based RL agent. In experiments, we apply the proposed model to 2D visual RL and 3D first-person visual RL tasks both requiring long-range memory access for memory-based reasoning. We show that the proposed model outperforms Dreamer in these complex tasks.", "year": 2022, "venue": "arXiv.org", "authors": [ "Changgu Chen", "Yi-Fu Wu", "Jaesik Yoon", "Sungjin Ahn" ], "externalIds": { "DBLP": "journals/corr/abs-2202-09481", "ArXiv": "2202.09481", "CorpusId": 247011881 }, "url": "https://www.semanticscholar.org/paper/2fec20377bc947ec1df003b4aedcb4d7f25ac934", "referenceCount": 54, "citationCount": 70, "influentialCitationCount": 7, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Efficiently Modeling Long Sequences with Structured State Spaces", "abstract": "A central goal of sequence modeling is designing a single principled model that can address sequence data across a range of modalities and tasks, particularly on long-range dependencies. Although conventional models including RNNs, CNNs, and Transformers have specialized variants for capturing long dependencies, they still struggle to scale to very long sequences of $10000$ or more steps. A promising recent approach proposed modeling sequences by simulating the fundamental state space model (SSM) \\( x'(t) = Ax(t) + Bu(t), y(t) = Cx(t) + Du(t) \\), and showed that for appropriate choices of the state matrix \\( A \\), this system could handle long-range dependencies mathematically and empirically. However, this method has prohibitive computation and memory requirements, rendering it infeasible as a general sequence modeling solution. We propose the Structured State Space sequence model (S4) based on a new parameterization for the SSM, and show that it can be computed much more efficiently than prior approaches while preserving their theoretical strengths. Our technique involves conditioning \\( A \\) with a low-rank correction, allowing it to be diagonalized stably and reducing the SSM to the well-studied computation of a Cauchy kernel. S4 achieves strong empirical results across a diverse range of established benchmarks, including (i) 91\\% accuracy on sequential CIFAR-10 with no data augmentation or auxiliary losses, on par with a larger 2-D ResNet, (ii) substantially closing the gap to Transformers on image and language modeling tasks, while performing generation $60\\times$ faster (iii) SoTA on every task from the Long Range Arena benchmark, including solving the challenging Path-X task of length 16k that all prior work fails on, while being as efficient as all competitors.", "year": 2021, "venue": "International Conference on Learning Representations", "authors": [ "Albert Gu", "Karan Goel", "Christopher R'e" ], "externalIds": { "DBLP": "conf/iclr/GuGR22", "ArXiv": "2111.00396", "CorpusId": 240354066 }, "url": "https://www.semanticscholar.org/paper/ac2618b2ce5cdcf86f9371bcca98bc5e37e46f51", "referenceCount": 61, "citationCount": 930, "influentialCitationCount": 142, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Recurrent Model-Free RL Can Be a Strong Baseline for Many POMDPs", "abstract": "Many problems in RL, such as meta-RL, robust RL, generalization in RL, and temporal credit assignment, can be cast as POMDPs. In theory, simply augmenting model-free RL with memory-based architectures, such as recurrent neural networks, provides a general approach to solving all types of POMDPs. However, prior work has found that such recurrent model-free RL methods tend to perform worse than more specialized algorithms that are designed for specific types of POMDPs. This paper revisits this claim. We find that careful architecture and hyperparameter decisions can often yield a recurrent model-free implementation that performs on par with (and occasionally substantially better than) more sophisticated recent techniques. We compare to 21 environments from 6 prior specialized methods and find that our implementation achieves greater sample efficiency and asymptotic performance than these methods on 18/21 environments. We also release a simple and efficient implementation of recurrent model-free RL for future work to use as a baseline for POMDPs.", "year": 2021, "venue": "International Conference on Machine Learning", "authors": [ "Tianwei Ni", "Benjamin Eysenbach", "R. Salakhutdinov" ], "externalIds": { "ArXiv": "2110.05038", "DBLP": "conf/icml/NiES22", "CorpusId": 246680124 }, "url": "https://www.semanticscholar.org/paper/a7d58bd29778ef0d15b9e9e3eb2f37a8cf1ea70c", "referenceCount": 137, "citationCount": 85, "influentialCitationCount": 11, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep Reinforcement Learning at the Edge of the Statistical Precipice", "abstract": "Deep reinforcement learning (RL) algorithms are predominantly evaluated by comparing their relative performance on a large suite of tasks. Most published results on deep RL benchmarks compare point estimates of aggregate performance such as mean and median scores across tasks, ignoring the statistical uncertainty implied by the use of a finite number of training runs. Beginning with the Arcade Learning Environment (ALE), the shift towards computationally-demanding benchmarks has led to the practice of evaluating only a small number of runs per task, exacerbating the statistical uncertainty in point estimates. In this paper, we argue that reliable evaluation in the few run deep RL regime cannot ignore the uncertainty in results without running the risk of slowing down progress in the field. We illustrate this point using a case study on the Atari 100k benchmark, where we find substantial discrepancies between conclusions drawn from point estimates alone versus a more thorough statistical analysis. With the aim of increasing the field's confidence in reported results with a handful of runs, we advocate for reporting interval estimates of aggregate performance and propose performance profiles to account for the variability in results, as well as present more robust and efficient aggregate metrics, such as interquartile mean scores, to achieve small uncertainty in results. Using such statistical tools, we scrutinize performance evaluations of existing algorithms on other widely used RL benchmarks including the ALE, Procgen, and the DeepMind Control Suite, again revealing discrepancies in prior comparisons. Our findings call for a change in how we evaluate performance in deep RL, for which we present a more rigorous evaluation methodology, accompanied with an open-source library rliable, to prevent unreliable results from stagnating the field.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Rishabh Agarwal", "Max Schwarzer", "P. S. Castro", "Aaron C. Courville", "Marc G. Bellemare" ], "externalIds": { "DBLP": "conf/nips/AgarwalSCCB21", "ArXiv": "2108.13264", "CorpusId": 237353084 }, "url": "https://www.semanticscholar.org/paper/558ca2e8c7eb56edd77a52b084e6cc24dffe5bcd", "referenceCount": 121, "citationCount": 502, "influentialCitationCount": 79, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Efficient Transformers in Reinforcement Learning using Actor-Learner Distillation", "abstract": "Many real-world applications such as robotics provide hard constraints on power and compute that limit the viable model complexity of Reinforcement Learning (RL) agents. Similarly, in many distributed RL settings, acting is done on un-accelerated hardware such as CPUs, which likewise restricts model size to prevent intractable experiment run times. These\"actor-latency\"constrained settings present a major obstruction to the scaling up of model complexity that has recently been extremely successful in supervised learning. To be able to utilize large model capacity while still operating within the limits imposed by the system during acting, we develop an\"Actor-Learner Distillation\"(ALD) procedure that leverages a continual form of distillation that transfers learning progress from a large capacity learner model to a small capacity actor model. As a case study, we develop this procedure in the context of partially-observable environments, where transformer models have had large improvements over LSTMs recently, at the cost of significantly higher computational complexity. With transformer models as the learner and LSTMs as the actor, we demonstrate in several challenging memory environments that using Actor-Learner Distillation recovers the clear sample-efficiency gains of the transformer learner model while maintaining the fast inference and reduced total training time of the LSTM actor model.", "year": 2021, "venue": "International Conference on Learning Representations", "authors": [ "Emilio Parisotto", "R. Salakhutdinov" ], "externalIds": { "DBLP": "conf/iclr/ParisottoS21", "ArXiv": "2104.01655", "CorpusId": 233025422 }, "url": "https://www.semanticscholar.org/paper/cd37fee4da0d4483322d6fa3cc67af9ed8c07be6", "referenceCount": 36, "citationCount": 37, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Action-Conditional Recurrent Kalman Networks For Forward and Inverse Dynamics Learning", "abstract": "Estimating accurate forward and inverse dynamics models is a crucial component of model-based control for sophisticated robots such as robots driven by hydraulics, artificial muscles, or robots dealing with different contact situations. Analytic models to such processes are often unavailable or inaccurate due to complex hysteresis effects, unmodelled friction and stiction phenomena, and unknown effects during contact situations. A promising approach is to obtain spatio-temporal models in a data-driven way using recurrent neural networks, as they can overcome those issues. However, such models often do not meet accuracy demands sufficiently, degenerate in performance for the required high sampling frequencies and cannot provide uncertainty estimates. We adopt a recent probabilistic recurrent neural network architecture, called Recurrent Kalman Networks (RKNs), to model learning by conditioning its transition dynamics on the control actions. RKNs outperform standard recurrent networks such as LSTMs on many state estimation tasks. Inspired by Kalman filters, the RKN provides an elegant way to achieve action conditioning within its recurrent cell by leveraging additive interactions between the current latent state and the action variables. We present two architectures, one for forward model learning and one for inverse model learning. Both architectures significantly outperform existing model learning frameworks as well as analytical models in terms of prediction performance on a variety of real robot dynamics models.", "year": 2020, "venue": "Conference on Robot Learning", "authors": [ "Vaisakh Shaj", "P. Becker", "Dieter Buchler", "Harit Pandya", "N. V. Duijkeren", "C. J. Taylor", "Marc Hanheide", "G. Neumann" ], "externalIds": { "DBLP": "journals/corr/abs-2010-10201", "MAG": "3094010042", "ArXiv": "2010.10201", "DOI": "10.5445/IR/1000125269", "CorpusId": 224803315 }, "url": "https://www.semanticscholar.org/paper/890951ff12f9872d64f6c41c89353c2b42f4a513", "referenceCount": 29, "citationCount": 11, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "HiPPO: Recurrent Memory with Optimal Polynomial Projections", "abstract": "A central problem in learning from sequential data is representing cumulative history in an incremental fashion as more data is processed. We introduce a general framework (HiPPO) for the online compression of continuous signals and discrete time series by projection onto polynomial bases. Given a measure that specifies the importance of each time step in the past, HiPPO produces an optimal solution to a natural online function approximation problem. As special cases, our framework yields a short derivation of the recent Legendre Memory Unit (LMU) from first principles, and generalizes the ubiquitous gating mechanism of recurrent neural networks such as GRUs. This formal framework yields a new memory update mechanism (HiPPO-LegS) that scales through time to remember all history, avoiding priors on the timescale. HiPPO-LegS enjoys the theoretical benefits of timescale robustness, fast updates, and bounded gradients. By incorporating the memory dynamics into recurrent neural networks, HiPPO RNNs can empirically capture complex temporal dependencies. On the benchmark permuted MNIST dataset, HiPPO-LegS sets a new state-of-the-art accuracy of 98.3%. Finally, on a novel trajectory classification task testing robustness to out-of-distribution timescales and missing data, HiPPO-LegS outperforms RNN and neural ODE baselines by 25-40% accuracy.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Albert Gu", "Tri Dao", "Stefano Ermon", "A. Rudra", "C. Ré" ], "externalIds": { "ArXiv": "2008.07669", "MAG": "3099512283", "DBLP": "conf/nips/GuDERR20", "CorpusId": 221150566 }, "url": "https://www.semanticscholar.org/paper/0964490205fdc38c2f0980c9d778069089ca92e3", "referenceCount": 81, "citationCount": 295, "influentialCitationCount": 36, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "dm_control: Software and Tasks for Continuous Control", "abstract": null, "year": 2020, "venue": "Softw. Impacts", "authors": [ "Yuval Tassa", "S. Tunyasuvunakool", "Alistair Muldal", "Yotam Doron", "Siqi Liu", "Steven Bohez", "J. Merel", "Tom Erez", "T. Lillicrap", "N. Heess" ], "externalIds": { "MAG": "3036619998", "DBLP": "journals/simpa/Tunyasuvunakool20", "ArXiv": "2006.12983", "DOI": "10.1016/j.simpa.2020.100022", "CorpusId": 219980295 }, "url": "https://www.semanticscholar.org/paper/8ba600c169f0d2422625822223976bce562eabe1", "referenceCount": 49, "citationCount": 325, "influentialCitationCount": 59, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Objective Mismatch in Model-based Reinforcement Learning", "abstract": "Model-based reinforcement learning (MBRL) has been shown to be a powerful framework for data-efficiently learning control of continuous tasks. Recent work in MBRL has mostly focused on using more advanced function approximators and planning schemes, with little development of the general framework. In this paper, we identify a fundamental issue of the standard MBRL framework -- what we call the objective mismatch issue. Objective mismatch arises when one objective is optimized in the hope that a second, often uncorrelated, metric will also be optimized. In the context of MBRL, we characterize the objective mismatch between training the forward dynamics model w.r.t.~the likelihood of the one-step ahead prediction, and the overall goal of improving performance on a downstream control task. For example, this issue can emerge with the realization that dynamics models effective for a specific task do not necessarily need to be globally accurate, and vice versa globally accurate models might not be sufficiently accurate locally to obtain good control performance on a specific task. In our experiments, we study this objective mismatch issue and demonstrate that the likelihood of one-step ahead predictions is not always correlated with control performance. This observation highlights a critical limitation in the MBRL framework which will require further research to be fully understood and addressed. We propose an initial method to mitigate the mismatch issue by re-weighting dynamics model training. Building on it, we conclude with a discussion about other potential directions of research for addressing this issue.", "year": 2020, "venue": "Conference on Learning for Dynamics & Control", "authors": [ "Nathan Lambert", "Brandon Amos", "Omry Yadan", "R. Calandra" ], "externalIds": { "ArXiv": "2002.04523", "DBLP": "journals/corr/abs-2002-04523", "MAG": "3107736070", "CorpusId": 210155681 }, "url": "https://www.semanticscholar.org/paper/ecbdc83a1aa8d196943d8997300be871b1c7c2dc", "referenceCount": 34, "citationCount": 87, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Dream to Control: Learning Behaviors by Latent Imagination", "abstract": "To select effective actions in complex environments, intelligent agents need to generalize from past experience. World models can represent knowledge about the environment to facilitate such generalization. While learning world models from high-dimensional sensory inputs is becoming feasible through deep learning, there are many potential ways for deriving behaviors from them. We present Dreamer, a reinforcement learning agent that solves long-horizon tasks purely by latent imagination. We efficiently learn behaviors by backpropagating analytic gradients of learned state values through trajectories imagined in the compact state space of a learned world model. On 20 challenging visual control tasks, Dreamer exceeds existing approaches in data-efficiency, computation time, and final performance.", "year": 2019, "venue": "International Conference on Learning Representations", "authors": [ "Danijar Hafner", "T. Lillicrap", "Jimmy Ba", "Mohammad Norouzi" ], "externalIds": { "MAG": "2995298643", "ArXiv": "1912.01603", "DBLP": "conf/iclr/HafnerLB020", "CorpusId": 208547755 }, "url": "https://www.semanticscholar.org/paper/0cc956565c7d249d4197eeb1dbab6523c648b2c9", "referenceCount": 71, "citationCount": 1084, "influentialCitationCount": 194, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "PyTorch: An Imperative Style, High-Performance Deep Learning Library", "abstract": "Deep learning frameworks have often focused on either usability or speed, but not both. PyTorch is a machine learning library that shows that these two goals are in fact compatible: it was designed from first principles to support an imperative and Pythonic programming style that supports code as a model, makes debugging easy and is consistent with other popular scientific computing libraries, while remaining efficient and supporting hardware accelerators such as GPUs. In this paper, we detail the principles that drove the implementation of PyTorch and how they are reflected in its architecture. We emphasize that every aspect of PyTorch is a regular Python program under the full control of its user. We also explain how the careful and pragmatic implementation of the key components of its runtime enables them to work together to achieve compelling performance. We demonstrate the efficiency of individual subsystems, as well as the overall speed of PyTorch on several commonly used benchmarks.", "year": 2019, "venue": "Neural Information Processing Systems", "authors": [ "Adam Paszke", "Sam Gross", "Francisco Massa", "Adam Lerer", "James Bradbury", "Gregory Chanan", "Trevor Killeen", "Zeming Lin", "N. Gimelshein", "L. Antiga", "Alban Desmaison", "Andreas Köpf", "E. Yang", "Zach DeVito", "Martin Raison", "Alykhan Tejani", "Sasank Chilamkurthy", "Benoit Steiner", "Lu Fang", "Junjie Bai", "Soumith Chintala" ], "externalIds": { "MAG": "2970971581", "DBLP": "journals/corr/abs-1912-01703", "ArXiv": "1912.01703", "CorpusId": 202786778 }, "url": "https://www.semanticscholar.org/paper/3c8a456509e6c0805354bd40a35e3f2dbf8069b1", "referenceCount": 39, "citationCount": 36158, "influentialCitationCount": 3694, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Grandmaster level in StarCraft II using multi-agent reinforcement learning", "abstract": null, "year": 2019, "venue": "Nature", "authors": [ "O. Vinyals", "Igor Babuschkin", "Wojciech M. Czarnecki", "Michaël Mathieu", "Andrew Dudzik", "Junyoung Chung", "David Choi", "Richard Powell", "T. Ewalds", "Petko Georgiev", "Junhyuk Oh", "Dan Horgan", "M. Kroiss", "Ivo Danihelka", "Aja Huang", "L. Sifre", "Trevor Cai", "J. Agapiou", "Max Jaderberg", "A. Vezhnevets", "Rémi Leblond", "Tobias Pohlen", "Valentin Dalibard", "D. Budden", "Yury Sulsky", "James Molloy", "T. Paine", "Caglar Gulcehre", "Ziyun Wang", "T. Pfaff", "Yuhuai Wu", "Roman Ring", "Dani Yogatama", "Dario Wünsch", "Katrina McKinney", "Oliver Smith", "T. Schaul", "T. Lillicrap", "K. Kavukcuoglu", "D. Hassabis", "C. Apps", "David Silver" ], "externalIds": { "DBLP": "journals/nature/VinyalsBCMDCCPE19", "MAG": "2982316857", "DOI": "10.1038/s41586-019-1724-z", "CorpusId": 204972004, "PubMed": "31666705" }, "url": "https://www.semanticscholar.org/paper/361c00b22e29d0816ca896513d2c165e26399821", "referenceCount": 59, "citationCount": 3229, "influentialCitationCount": 151, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Generalization of Reinforcement Learners with Working and Episodic Memory", "abstract": "Memory is an important aspect of intelligence and plays a role in many deep reinforcement learning models. However, little progress has been made in understanding when specific memory systems help more than others and how well they generalize. The field also has yet to see a prevalent consistent and rigorous approach for evaluating agent performance on holdout data. In this paper, we aim to develop a comprehensive methodology to test different kinds of memory in an agent and assess how well the agent can apply what it learns in training to a holdout set that differs from the training set along dimensions that we suggest are relevant for evaluating memory-specific generalization. To that end, we first construct a diverse set of memory tasks that allow us to evaluate test-time generalization across multiple dimensions. Second, we develop and perform multiple ablations on an agent architecture that combines multiple memory systems, observe its baseline models, and investigate its performance against the task suite.", "year": 2019, "venue": "Neural Information Processing Systems", "authors": [ "Meire Fortunato", "Melissa Tan", "Ryan Faulkner", "S. Hansen", "Adrià Puigdomènech Badia", "Gavin Buttimore", "Charlie Deck", "Joel Z. Leibo", "C. Blundell" ], "externalIds": { "MAG": "2970567787", "DBLP": "journals/corr/abs-1910-13406", "ArXiv": "1910.13406", "CorpusId": 202769991 }, "url": "https://www.semanticscholar.org/paper/f47374c67ae0d45454feb2ba354d05f0da2889d7", "referenceCount": 31, "citationCount": 65, "influentialCitationCount": 6, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Root Mean Square Layer Normalization", "abstract": "Layer normalization (LayerNorm) has been successfully applied to various deep neural networks to help stabilize training and boost model convergence because of its capability in handling re-centering and re-scaling of both inputs and weight matrix. However, the computational overhead introduced by LayerNorm makes these improvements expensive and significantly slows the underlying network, e.g. RNN in particular. In this paper, we hypothesize that re-centering invariance in LayerNorm is dispensable and propose root mean square layer normalization, or RMSNorm. RMSNorm regularizes the summed inputs to a neuron in one layer according to root mean square (RMS), giving the model re-scaling invariance property and implicit learning rate adaptation ability. RMSNorm is computationally simpler and thus more efficient than LayerNorm. We also present partial RMSNorm, or pRMSNorm where the RMS is estimated from p% of the summed inputs without breaking the above properties. Extensive experiments on several tasks using diverse network architectures show that RMSNorm achieves comparable performance against LayerNorm but reduces the running time by 7%~64% on different models. Source code is available at https://github.com/bzhangGo/rmsnorm.", "year": 2019, "venue": "Neural Information Processing Systems", "authors": [ "Biao Zhang", "Rico Sennrich" ], "externalIds": { "ArXiv": "1910.07467", "DBLP": "conf/nips/ZhangS19a", "MAG": "2981040094", "DOI": "10.5167/UZH-177483", "CorpusId": 113405151 }, "url": "https://www.semanticscholar.org/paper/10eda4521c032adabaa8e70d6569e17370b29dcd", "referenceCount": 38, "citationCount": 373, "influentialCitationCount": 25, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Soft Actor-Critic for Discrete Action Settings", "abstract": "Soft Actor-Critic is a state-of-the-art reinforcement learning algorithm for continuous action settings that is not applicable to discrete action settings. Many important settings involve discrete actions, however, and so here we derive an alternative version of the Soft Actor-Critic algorithm that is applicable to discrete action settings. We then show that, even without any hyperparameter tuning, it is competitive with the tuned model-free state-of-the-art on a selection of games from the Atari suite.", "year": 2019, "venue": "arXiv.org", "authors": [ "Petros Christodoulou" ], "externalIds": { "MAG": "2981037657", "ArXiv": "1910.07207", "DBLP": "journals/corr/abs-1910-07207", "CorpusId": 204734462 }, "url": "https://www.semanticscholar.org/paper/0a0866ec7180bbf87b1c87ed48bd4fa00574b814", "referenceCount": 11, "citationCount": 243, "influentialCitationCount": 32, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Superhuman AI for multiplayer poker", "abstract": "AI now masters six-player poker Computer programs have shown superiority over humans in two-player games such as chess, Go, and heads-up, no-limit Texas hold'em poker. However, poker games usually include six players—a much trickier challenge for artificial intelligence than the two-player variant. Brown and Sandholm developed a program, dubbed Pluribus, that learned how to play six-player no-limit Texas hold'em by playing against five copies of itself (see the Perspective by Blair and Saffidine). When pitted against five elite professional poker players, or with five copies of Pluribus playing against one professional, the computer performed significantly better than humans over the course of 10,000 hands of poker. Science, this issue p. 885; see also p. 864 An AI dubbed Pluribus performs significantly better than human professionals in six-player no-limit Texas hold’em poker. In recent years there have been great strides in artificial intelligence (AI), with games often serving as challenge problems, benchmarks, and milestones for progress. Poker has served for decades as such a challenge problem. Past successes in such benchmarks, including poker, have been limited to two-player games. However, poker in particular is traditionally played with more than two players. Multiplayer games present fundamental additional issues beyond those in two-player games, and multiplayer poker is a recognized AI milestone. In this paper we present Pluribus, an AI that we show is stronger than top human professionals in six-player no-limit Texas hold’em poker, the most popular form of poker played by humans.", "year": 2019, "venue": "Science", "authors": [ "Noam Brown", "T. Sandholm" ], "externalIds": { "MAG": "2960876848", "DOI": "10.1126/science.aay2400", "CorpusId": 195892791, "PubMed": "31296650" }, "url": "https://www.semanticscholar.org/paper/2ee463bba9d4db6aec0eab17e54431a6dc80bf17", "referenceCount": 30, "citationCount": 645, "influentialCitationCount": 36, "isOpenAccess": true, "fieldsOfStudy": [ "Psychology", "Medicine" ] }, { "title": "Temporal Parallelization of Bayesian Smoothers", "abstract": "This article presents algorithms for temporal parallelization of Bayesian smoothers. We define the elements and the operators to pose these problems as the solutions to all-prefix-sums operations for which efficient parallel scan-algorithms are available. We present the temporal parallelization of the general Bayesian filtering and smoothing equations, and specialize them to linear/Gaussian models. The advantage of the proposed algorithms is that they reduce the linear complexity of standard smoothing algorithms with respect to time to logarithmic.", "year": 2019, "venue": "IEEE Transactions on Automatic Control", "authors": [ "Simo Särkkä", "Á. F. García-Fernández" ], "externalIds": { "ArXiv": "1905.13002", "DBLP": "journals/tac/SarkkaG21", "MAG": "3008520999", "DOI": "10.1109/TAC.2020.2976316", "CorpusId": 213695560 }, "url": "https://www.semanticscholar.org/paper/df4d76b8882cc4bd27e6735507a9b00aea4b4d99", "referenceCount": 36, "citationCount": 30, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Recurrent Kalman Networks: Factorized Inference in High-Dimensional Deep Feature Spaces", "abstract": "In order to integrate uncertainty estimates into deep time-series modelling, Kalman Filters (KFs) (Kalman et al., 1960) have been integrated with deep learning models, however, such approaches typically rely on approximate inference tech- niques such as variational inference which makes learning more complex and often less scalable due to approximation errors. We propose a new deep approach to Kalman filtering which can be learned directly in an end-to-end manner using backpropagation without additional approximations. Our approach uses a high-dimensional factorized latent state representation for which the Kalman updates simplify to scalar operations and thus avoids hard to backpropagate, computationally heavy and potentially unstable matrix inversions. Moreover, we use locally linear dynamic models to efficiently propagate the latent state to the next time step. The resulting network architecture, which we call Recurrent Kalman Network (RKN), can be used for any time-series data, similar to a LSTM (Hochreiter & Schmidhuber, 1997) but uses an explicit representation of uncertainty. As shown by our experiments, the RKN obtains much more accurate uncertainty estimates than an LSTM or Gated Recurrent Units (GRUs) (Cho et al., 2014) while also showing a slightly improved prediction performance and outperforms various recent generative models on an image imputation task.", "year": 2019, "venue": "International Conference on Machine Learning", "authors": [ "P. Becker", "Harit Pandya", "Gregor H. W. Gebhardt", "Cheng Zhao", "James Taylor", "G. Neumann" ], "externalIds": { "MAG": "2950396620", "DBLP": "journals/corr/abs-1905-07357", "ArXiv": "1905.07357", "CorpusId": 86625937 }, "url": "https://www.semanticscholar.org/paper/a753a9a34fc19e43e03786f4a01011146f7c6e5c", "referenceCount": 25, "citationCount": 82, "influentialCitationCount": 16, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Soft Actor-Critic Algorithms and Applications", "abstract": "Model-free deep reinforcement learning (RL) algorithms have been successfully applied to a range of challenging sequential decision making and control tasks. However, these methods typically suffer from two major challenges: high sample complexity and brittleness to hyperparameters. Both of these challenges limit the applicability of such methods to real-world domains. In this paper, we describe Soft Actor-Critic (SAC), our recently introduced off-policy actor-critic algorithm based on the maximum entropy RL framework. In this framework, the actor aims to simultaneously maximize expected return and entropy. That is, to succeed at the task while acting as randomly as possible. We extend SAC to incorporate a number of modifications that accelerate training and improve stability with respect to the hyperparameters, including a constrained formulation that automatically tunes the temperature hyperparameter. We systematically evaluate SAC on a range of benchmark tasks, as well as real-world challenging tasks such as locomotion for a quadrupedal robot and robotic manipulation with a dexterous hand. With these improvements, SAC achieves state-of-the-art performance, outperforming prior on-policy and off-policy methods in sample-efficiency and asymptotic performance. Furthermore, we demonstrate that, in contrast to other off-policy algorithms, our approach is very stable, achieving similar performance across different random seeds. These results suggest that SAC is a promising candidate for learning in real-world robotics tasks.", "year": 2018, "venue": "arXiv.org", "authors": [ "Tuomas Haarnoja", "Aurick Zhou", "Kristian Hartikainen", "G. Tucker", "Sehoon Ha", "Jie Tan", "Vikash Kumar", "Henry Zhu", "Abhishek Gupta", "P. Abbeel", "S. Levine" ], "externalIds": { "DBLP": "journals/corr/abs-1812-05905", "MAG": "2904246096", "ArXiv": "1812.05905", "CorpusId": 55703664 }, "url": "https://www.semanticscholar.org/paper/12c0751b4f51ed833172a713b7e32390032ead93", "referenceCount": 49, "citationCount": 2036, "influentialCitationCount": 475, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Learning Latent Dynamics for Planning from Pixels", "abstract": "Planning has been very successful for control tasks with known environment dynamics. To leverage planning in unknown environments, the agent needs to learn the dynamics from interactions with the world. However, learning dynamics models that are accurate enough for planning has been a long-standing challenge, especially in image-based domains. We propose the Deep Planning Network (PlaNet), a purely model-based agent that learns the environment dynamics from images and chooses actions through fast online planning in latent space. To achieve high performance, the dynamics model must accurately predict the rewards ahead for multiple time steps. We approach this using a latent dynamics model with both deterministic and stochastic transition components. Moreover, we propose a multi-step variational inference objective that we name latent overshooting. Using only pixel observations, our agent solves continuous control tasks with contact dynamics, partial observability, and sparse rewards, which exceed the difficulty of tasks that were previously solved by planning with learned models. PlaNet uses substantially fewer episodes and reaches final performance close to and sometimes higher than strong model-free algorithms.", "year": 2018, "venue": "International Conference on Machine Learning", "authors": [ "Danijar Hafner", "T. Lillicrap", "Ian S. Fischer", "Ruben Villegas", "David R Ha", "Honglak Lee", "James Davidson" ], "externalIds": { "DBLP": "conf/icml/HafnerLFVHLD19", "MAG": "2950004691", "ArXiv": "1811.04551", "CorpusId": 53280207 }, "url": "https://www.semanticscholar.org/paper/fea3e63c97c7292dc6fbcb3ffe7131eb54053986", "referenceCount": 67, "citationCount": 1246, "influentialCitationCount": 192, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Recurrent World Models Facilitate Policy Evolution", "abstract": "A generative recurrent neural network is quickly trained in an unsupervised manner to model popular reinforcement learning environments through compressed spatio-temporal representations. The world model's extracted features are fed into compact and simple policies trained by evolution, achieving state of the art results in various environments. We also train our agent entirely inside of an environment generated by its own internal world model, and transfer this policy back into the actual environment. Interactive version of this paper is available at https://worldmodels.github.io", "year": 2018, "venue": "Neural Information Processing Systems", "authors": [ "David R Ha", "J. Schmidhuber" ], "externalIds": { "MAG": "2953072278", "DBLP": "journals/corr/abs-1809-01999", "ArXiv": "1809.01999", "CorpusId": 52171619 }, "url": "https://www.semanticscholar.org/paper/41cca0b0a27ba363ca56e7033569aeb1922b0ac9", "referenceCount": 101, "citationCount": 795, "influentialCitationCount": 77, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Soft Actor-Critic: Off-Policy Maximum Entropy Deep Reinforcement Learning with a Stochastic Actor", "abstract": "Model-free deep reinforcement learning (RL) algorithms have been demonstrated on a range of challenging decision making and control tasks. However, these methods typically suffer from two major challenges: very high sample complexity and brittle convergence properties, which necessitate meticulous hyperparameter tuning. Both of these challenges severely limit the applicability of such methods to complex, real-world domains. In this paper, we propose soft actor-critic, an off-policy actor-critic deep RL algorithm based on the maximum entropy reinforcement learning framework. In this framework, the actor aims to maximize expected reward while also maximizing entropy. That is, to succeed at the task while acting as randomly as possible. Prior deep RL methods based on this framework have been formulated as Q-learning methods. By combining off-policy updates with a stable stochastic actor-critic formulation, our method achieves state-of-the-art performance on a range of continuous control benchmark tasks, outperforming prior on-policy and off-policy methods. Furthermore, we demonstrate that, in contrast to other off-policy algorithms, our approach is very stable, achieving very similar performance across different random seeds.", "year": 2018, "venue": "International Conference on Machine Learning", "authors": [ "Tuomas Haarnoja", "Aurick Zhou", "P. Abbeel", "S. Levine" ], "externalIds": { "MAG": "2962902376", "ArXiv": "1801.01290", "DBLP": "journals/corr/abs-1801-01290", "CorpusId": 28202810 }, "url": "https://www.semanticscholar.org/paper/811df72e210e20de99719539505da54762a11c6d", "referenceCount": 42, "citationCount": 6859, "influentialCitationCount": 1509, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Uncertainty-driven Imagination for Continuous Deep Reinforcement Learning", "abstract": "Continuous control of high-dimensional systems can be achieved by current state-of-the-art reinforcement learning methods such as the Deep Deterministic Policy Gradient algorithm, but needs a significant amount of data samples. For real-world systems, this can be an obstacle since excessive data collection can be expensive, tedious or lead to physical damage. The main incentive of this work is to keep the advantages of model-free Q-learning while minimizing real-world interaction by the employment of a dynamics model learned in parallel. To counteract adverse effects of imaginary rollouts with an inaccurate model, a notion of uncertainty is introduced, to make use of artificial data only in cases of high uncertainty. We evaluate our approach on three simulated robot tasks and achieve faster learning by at least 40 per cent in comparison to vanilla DDPG with multiple updates.", "year": 2017, "venue": "Conference on Robot Learning", "authors": [ "Gabriel Kalweit", "J. Boedecker" ], "externalIds": { "MAG": "2774354230", "DBLP": "conf/corl/KalweitB17", "CorpusId": 4083410 }, "url": "https://www.semanticscholar.org/paper/6e745266a5c85980e75f9d637d4d23cfc030cfaf", "referenceCount": 29, "citationCount": 122, "influentialCitationCount": 8, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Proximal Policy Optimization Algorithms", "abstract": "We propose a new family of policy gradient methods for reinforcement learning, which alternate between sampling data through interaction with the environment, and optimizing a \"surrogate\" objective function using stochastic gradient ascent. Whereas standard policy gradient methods perform one gradient update per data sample, we propose a novel objective function that enables multiple epochs of minibatch updates. The new methods, which we call proximal policy optimization (PPO), have some of the benefits of trust region policy optimization (TRPO), but they are much simpler to implement, more general, and have better sample complexity (empirically). Our experiments test PPO on a collection of benchmark tasks, including simulated robotic locomotion and Atari game playing, and we show that PPO outperforms other online policy gradient methods, and overall strikes a favorable balance between sample complexity, simplicity, and wall-time.", "year": 2017, "venue": "arXiv.org", "authors": [ "John Schulman", "Filip Wolski", "Prafulla Dhariwal", "Alec Radford", "Oleg Klimov" ], "externalIds": { "MAG": "2736601468", "ArXiv": "1707.06347", "DBLP": "journals/corr/SchulmanWDRK17", "CorpusId": 28695052 }, "url": "https://www.semanticscholar.org/paper/dce6f9d4017b1785979e7520fd0834ef8cf02f4b", "referenceCount": 14, "citationCount": 14872, "influentialCitationCount": 3164, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Attention is All you Need", "abstract": "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "Ashish Vaswani", "Noam M. Shazeer", "Niki Parmar", "Jakob Uszkoreit", "Llion Jones", "Aidan N. Gomez", "Lukasz Kaiser", "Illia Polosukhin" ], "externalIds": { "MAG": "2963403868", "DBLP": "conf/nips/VaswaniSPUJGKP17", "ArXiv": "1706.03762", "CorpusId": 13756489 }, "url": "https://www.semanticscholar.org/paper/204e3073870fae3d05bcbc2f6a8e263d9b72e776", "referenceCount": 41, "citationCount": 104988, "influentialCitationCount": 15363, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Target-driven visual navigation in indoor scenes using deep reinforcement learning", "abstract": "Two less addressed issues of deep reinforcement learning are (1) lack of generalization capability to new goals, and (2) data inefficiency, i.e., the model requires several (and often costly) episodes of trial and error to converge, which makes it impractical to be applied to real-world scenarios. In this paper, we address these two issues and apply our model to target-driven visual navigation. To address the first issue, we propose an actor-critic model whose policy is a function of the goal as well as the current state, which allows better generalization. To address the second issue, we propose the AI2-THOR framework, which provides an environment with high-quality 3D scenes and a physics engine. Our framework enables agents to take actions and interact with objects. Hence, we can collect a huge number of training samples efficiently. We show that our proposed method (1) converges faster than the state-of-the-art deep reinforcement learning methods, (2) generalizes across targets and scenes, (3) generalizes to a real robot scenario with a small amount of fine-tuning (although the model is trained in simulation), (4) is end-to-end trainable and does not need feature engineering, feature matching between frames or 3D reconstruction of the environment.", "year": 2016, "venue": "IEEE International Conference on Robotics and Automation", "authors": [ "Yuke Zhu", "Roozbeh Mottaghi", "Eric Kolve", "Joseph J. Lim", "A. Gupta", "Li Fei-Fei", "Ali Farhadi" ], "externalIds": { "DBLP": "journals/corr/ZhuMKLGFF16", "MAG": "2522340145", "ArXiv": "1609.05143", "DOI": "10.1109/ICRA.2017.7989381", "CorpusId": 2305273 }, "url": "https://www.semanticscholar.org/paper/7af7f2f539cd3479faae4c66bbef49b0f66202fa", "referenceCount": 55, "citationCount": 1417, "influentialCitationCount": 120, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Control of Memory, Active Perception, and Action in Minecraft", "abstract": "In this paper, we introduce a new set of reinforcement learning (RL) tasks in Minecraft (a flexible 3D world). We then use these tasks to systematically compare and contrast existing deep reinforcement learning (DRL) architectures with our new memory-based DRL architectures. These tasks are designed to emphasize, in a controllable manner, issues that pose challenges for RL methods including partial observability (due to first-person visual observations), delayed rewards, high-dimensional visual observations, and the need to use active perception in a correct manner so as to perform well in the tasks. While these tasks are conceptually simple to describe, by virtue of having all of these challenges simultaneously they are difficult for current DRL architectures. Additionally, we evaluate the generalization performance of the architectures on environments not used during training. The experimental results show that our new architectures generalize to unseen environments better than existing DRL architectures.", "year": 2016, "venue": "International Conference on Machine Learning", "authors": [ "Junhyuk Oh", "Valliappa Chockalingam", "Satinder Singh", "Honglak Lee" ], "externalIds": { "DBLP": "conf/icml/OhCSL16", "ArXiv": "1605.09128", "MAG": "2418628973", "CorpusId": 10899248 }, "url": "https://www.semanticscholar.org/paper/5129a9cbb6de3c6579f6a7d974394d392ac29829", "referenceCount": 42, "citationCount": 290, "influentialCitationCount": 20, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Memory-based control with recurrent neural networks", "abstract": "Partially observed control problems are a challenging aspect of reinforcement learning. We extend two related, model-free algorithms for continuous control -- deterministic policy gradient and stochastic value gradient -- to solve partially observed domains using recurrent neural networks trained with backpropagation through time. \nWe demonstrate that this approach, coupled with long-short term memory is able to solve a variety of physical control problems exhibiting an assortment of memory requirements. These include the short-term integration of information from noisy sensors and the identification of system parameters, as well as long-term memory problems that require preserving information over many time steps. We also demonstrate success on a combined exploration and memory problem in the form of a simplified version of the well-known Morris water maze task. Finally, we show that our approach can deal with high-dimensional observations by learning directly from pixels. \nWe find that recurrent deterministic and stochastic policies are able to learn similarly good solutions to these tasks, including the water maze where the agent must learn effective search strategies.", "year": 2015, "venue": "arXiv.org", "authors": [ "N. Heess", "Jonathan J. Hunt", "T. Lillicrap", "David Silver" ], "externalIds": { "DBLP": "journals/corr/HeessHLS15", "ArXiv": "1512.04455", "MAG": "2291973609", "CorpusId": 7399885 }, "url": "https://www.semanticscholar.org/paper/bcfe915d5983dffbfe95801e9e6757205b3a4723", "referenceCount": 33, "citationCount": 294, "influentialCitationCount": 41, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep Recurrent Q-Learning for Partially Observable MDPs", "abstract": "Deep Reinforcement Learning has yielded proficient controllers for complex tasks. However, these controllers have limited memory and rely on being able to perceive the complete game screen at each decision point. To address these shortcomings, this article investigates the effects of adding recurrency to a Deep Q-Network (DQN) by replacing the first post-convolutional fully-connected layer with a recurrent LSTM. The resulting \\textit{Deep Recurrent Q-Network} (DRQN), although capable of seeing only a single frame at each timestep, successfully integrates information through time and replicates DQN's performance on standard Atari games and partially observed equivalents featuring flickering game screens. Additionally, when trained with partial observations and evaluated with incrementally more complete observations, DRQN's performance scales as a function of observability. Conversely, when trained with full observations and evaluated with partial observations, DRQN's performance degrades less than DQN's. Thus, given the same length of history, recurrency is a viable alternative to stacking a history of frames in the DQN's input layer and while recurrency confers no systematic advantage when learning to play the game, the recurrent net can better adapt at evaluation time if the quality of observations changes.", "year": 2015, "venue": "AAAI Fall Symposia", "authors": [ "Matthew J. Hausknecht", "P. Stone" ], "externalIds": { "DBLP": "journals/corr/HausknechtS15", "ArXiv": "1507.06527", "MAG": "2952684340", "CorpusId": 8696662 }, "url": "https://www.semanticscholar.org/paper/f5f323e62acb75f785e00b4c90ace16f1690076f", "referenceCount": 17, "citationCount": 1555, "influentialCitationCount": 172, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "On the Properties of Neural Machine Translation: Encoder–Decoder Approaches", "abstract": "Neural machine translation is a relatively new approach to statistical machine translation based purely on neural networks. The neural machine translation models often consist of an encoder and a decoder. The encoder extracts a fixed-length representation from a variable-length input sentence, and the decoder generates a correct translation from this representation. In this paper, we focus on analyzing the properties of the neural machine translation using two models; RNN Encoder--Decoder and a newly proposed gated recursive convolutional neural network. We show that the neural machine translation performs relatively well on short sentences without unknown words, but its performance degrades rapidly as the length of the sentence and the number of unknown words increase. Furthermore, we find that the proposed gated recursive convolutional network learns a grammatical structure of a sentence automatically.", "year": 2014, "venue": "SSST@EMNLP", "authors": [ "Kyunghyun Cho", "B. V. Merrienboer", "Dzmitry Bahdanau", "Yoshua Bengio" ], "externalIds": { "DBLP": "journals/corr/ChoMBB14", "MAG": "2964199361", "ArXiv": "1409.1259", "ACL": "W14-4012", "DOI": "10.3115/v1/W14-4012", "CorpusId": 11336213 }, "url": "https://www.semanticscholar.org/paper/1eb09fecd75eb27825dce4f964b97f4f5cc399d7", "referenceCount": 15, "citationCount": 6371, "influentialCitationCount": 868, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Playing Atari with Deep Reinforcement Learning", "abstract": "We present the first deep learning model to successfully learn control policies directly from high-dimensional sensory input using reinforcement learning. The model is a convolutional neural network, trained with a variant of Q-learning, whose input is raw pixels and whose output is a value function estimating future rewards. We apply our method to seven Atari 2600 games from the Arcade Learning Environment, with no adjustment of the architecture or learning algorithm. We find that it outperforms all previous approaches on six of the games and surpasses a human expert on three of them.", "year": 2013, "venue": "arXiv.org", "authors": [ "Volodymyr Mnih", "K. Kavukcuoglu", "David Silver", "Alex Graves", "Ioannis Antonoglou", "Daan Wierstra", "Martin A. Riedmiller" ], "externalIds": { "DBLP": "journals/corr/MnihKSGAWR13", "MAG": "1757796397", "ArXiv": "1312.5602", "CorpusId": 15238391 }, "url": "https://www.semanticscholar.org/paper/2319a491378867c7049b3da055c5df60e1671158", "referenceCount": 30, "citationCount": 11117, "influentialCitationCount": 1357, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The Arcade Learning Environment: An Evaluation Platform for General Agents", "abstract": "In this article we introduce the Arcade Learning Environment (ALE): both a challenge problem and a platform and methodology for evaluating the development of general, domain-independent AI technology. ALE provides an interface to hundreds of Atari 2600 game environments, each one different, interesting, and designed to be a challenge for human players. ALE presents significant research challenges for reinforcement learning, model learning, model-based planning, imitation learning, transfer learning, and intrinsic motivation. Most importantly, it provides a rigorous testbed for evaluating and comparing approaches to these problems. We illustrate the promise of ALE by developing and benchmarking domain-independent agents designed using well-established AI techniques for both reinforcement learning and planning. In doing so, we also propose an evaluation methodology made possible by ALE, reporting empirical results on over 55 different games. All of the software, including the benchmark agents, is publicly available.", "year": 2012, "venue": "Journal of Artificial Intelligence Research", "authors": [ "Marc G. Bellemare", "Yavar Naddaf", "J. Veness", "Michael Bowling" ], "externalIds": { "ArXiv": "1207.4708", "DBLP": "journals/jair/BellemareNVB13", "MAG": "2150468603", "DOI": "10.1613/jair.3912", "CorpusId": 1552061 }, "url": "https://www.semanticscholar.org/paper/f82e4ff4f003581330338aaae71f60316e58dd26", "referenceCount": 36, "citationCount": 2802, "influentialCitationCount": 423, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A contextual-bandit approach to personalized news article recommendation", "abstract": "Personalized web services strive to adapt their services (advertisements, news articles, etc.) to individual users by making use of both content and user information. Despite a few recent advances, this problem remains challenging for at least two reasons. First, web service is featured with dynamically changing pools of content, rendering traditional collaborative filtering methods inapplicable. Second, the scale of most web services of practical interest calls for solutions that are both fast in learning and computation.\n In this work, we model personalized recommendation of news articles as a contextual bandit problem, a principled approach in which a learning algorithm sequentially selects articles to serve users based on contextual information about the users and articles, while simultaneously adapting its article-selection strategy based on user-click feedback to maximize total user clicks.\n The contributions of this work are three-fold. First, we propose a new, general contextual bandit algorithm that is computationally efficient and well motivated from learning theory. Second, we argue that any bandit algorithm can be reliably evaluated offline using previously recorded random traffic. Finally, using this offline evaluation method, we successfully applied our new algorithm to a Yahoo! Front Page Today Module dataset containing over 33 million events. Results showed a 12.5% click lift compared to a standard context-free bandit algorithm, and the advantage becomes even greater when data gets more scarce.", "year": 2010, "venue": "The Web Conference", "authors": [ "Lihong Li", "Wei Chu", "J. Langford", "R. Schapire" ], "externalIds": { "ArXiv": "1003.0146", "DBLP": "journals/corr/abs-1003-0146", "MAG": "2112420033", "DOI": "10.1145/1772690.1772758", "CorpusId": 207178795 }, "url": "https://www.semanticscholar.org/paper/ec0072bc37f83f1a81459df43289613e04cc61e1", "referenceCount": 92, "citationCount": 2755, "influentialCitationCount": 390, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Solving Deep Memory POMDPs with Recurrent Policy Gradients", "abstract": null, "year": 2007, "venue": "International Conference on Artificial Neural Networks", "authors": [ "Daan Wierstra", "A. Förster", "Jan Peters", "J. Schmidhuber" ], "externalIds": { "MAG": "1662842982", "DBLP": "conf/icann/WierstraFPS07", "DOI": "10.1007/978-3-540-74690-4_71", "CorpusId": 14039355 }, "url": "https://www.semanticscholar.org/paper/92d009217b100882376ae5c90217da2e92471ad7", "referenceCount": 23, "citationCount": 182, "influentialCitationCount": 12, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Reinforcement Learning with Long Short-Term Memory", "abstract": "This paper presents reinforcement learning with a Long Short-Term Memory recurrent neural network: RL-LSTM. Model-free RL-LSTM using Advantage (λ) learning and directed exploration can solve non-Markovian tasks with long-term dependencies between relevant events. This is demonstrated in a T-maze task, as well as in a difficult variation of the pole balancing task.", "year": 2001, "venue": "Neural Information Processing Systems", "authors": [ "B. Bakker" ], "externalIds": { "DBLP": "conf/nips/Bakker01", "MAG": "2096533821", "CorpusId": 6627108 }, "url": "https://www.semanticscholar.org/paper/3b90b73fa0f904a2dc84bca4b3f80cbb51d7025f", "referenceCount": 14, "citationCount": 296, "influentialCitationCount": 35, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Planning and Acting in Partially Observable Stochastic Domains", "abstract": null, "year": 1998, "venue": "Artificial Intelligence", "authors": [ "L. Kaelbling", "M. Littman", "A. Cassandra" ], "externalIds": { "MAG": "2168359464", "DBLP": "journals/ai/KaelblingLC98", "DOI": "10.1016/S0004-3702(98)00023-X", "CorpusId": 5613003 }, "url": "https://www.semanticscholar.org/paper/116d7798c1123cf7fad4176e98f58fd49de4f8f1", "referenceCount": 78, "citationCount": 4582, "influentialCitationCount": 518, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Long Short-Term Memory", "abstract": "Learning to store information over extended time intervals by recurrent backpropagation takes a very long time, mostly because of insufficient, decaying error backflow. We briefly review Hochreiter's (1991) analysis of this problem, then address it by introducing a novel, efficient, gradient based method called long short-term memory (LSTM). Truncating the gradient where this does not do harm, LSTM can learn to bridge minimal time lags in excess of 1000 discrete-time steps by enforcing constant error flow through constant error carousels within special units. Multiplicative gate units learn to open and close access to the constant error flow. LSTM is local in space and time; its computational complexity per time step and weight is O. 1. Our experiments with artificial data involve local, distributed, real-valued, and noisy pattern representations. In comparisons with real-time recurrent learning, back propagation through time, recurrent cascade correlation, Elman nets, and neural sequence chunking, LSTM leads to many more successful runs, and learns much faster. LSTM also solves complex, artificial long-time-lag tasks that have never been solved by previous recurrent network algorithms.", "year": 1997, "venue": "Neural Computation", "authors": [ "Sepp Hochreiter", "J. Schmidhuber" ], "externalIds": { "MAG": "2064675550", "DBLP": "journals/neco/HochreiterS97", "DOI": "10.1162/neco.1997.9.8.1735", "CorpusId": 1915014, "PubMed": "9377276" }, "url": "https://www.semanticscholar.org/paper/2e9d221c206e9503ceb452302d68d10e293f2a10", "referenceCount": 48, "citationCount": 80986, "influentialCitationCount": 9250, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Reinforcement learning with hidden states", "abstract": null, "year": 1993, "venue": "", "authors": [ "Longxin Lin", "Tom Michael Mitchell" ], "externalIds": { "MAG": "1499371387", "DOI": "10.7551/mitpress/3116.003.0038", "CorpusId": 60745539 }, "url": "https://www.semanticscholar.org/paper/171b9d5f0f29388c1e638de55aed3d19c3524df5", "referenceCount": 0, "citationCount": 137, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Curious model-building control systems", "abstract": "A novel curious model-building control system is described which actively tries to provoke situations for which it learned to expect to learn something about the environment. Such a system has been implemented as a four-network system based on Watkins' Q-learning algorithm which can be used to maximize the expectation of the temporal derivative of the adaptive assumed reliability of future predictions. An experiment with an artificial nondeterministic environment demonstrates that the system can be superior to previous model-building control systems, which do not address the problem of modeling the reliability of the world model's predictions in uncertain environments and use ad-hoc methods (like random search) to train the world model.<>", "year": 1991, "venue": "[Proceedings] 1991 IEEE International Joint Conference on Neural Networks", "authors": [ "J. Schmidhuber" ], "externalIds": { "MAG": "1863227302", "DOI": "10.1109/IJCNN.1991.170605", "CorpusId": 17874844 }, "url": "https://www.semanticscholar.org/paper/94db34f4b68189bfcba22beab33ee3b54f10b876", "referenceCount": 18, "citationCount": 672, "influentialCitationCount": 48, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The Complexity of Markov Decision Processes", "abstract": "We investigate the complexity of the classical problem of optimal policy computation in Markov decision processes. All three variants of the problem finite horizon, infinite horizon discounted, and infinite horizon average cost were known to be solvable in polynomial time by dynamic programming finite horizon problems, linear programming, or successive approximation techniques infinite horizon. We show that they are complete for P, and therefore most likely cannot be solved by highly parallel algorithms. We also show that, in contrast, the deterministic cases of all three problems can be solved very fast in parallel. The version with partially observed states is shown to be PSPACE-complete, and thus even less likely to be solved in polynomial time than the NP-complete problems; in fact, we show that, most likely, it is not possible to have an efficient on-line implementation involving polynomial time on-line computations and memory of an optimal policy, even if an arbitrary amount of precomputation is allowed. Finally, the variant of the problem in which there are no observations is shown to be NP-complete.", "year": 1987, "venue": "Mathematics of Operations Research", "authors": [ "C. Papadimitriou", "J. Tsitsiklis" ], "externalIds": { "DBLP": "journals/mor/PapadimitriouT87", "MAG": "2032100464", "DOI": "10.1287/moor.12.3.441", "CorpusId": 29322444 }, "url": "https://www.semanticscholar.org/paper/d51eb16dfed68bf6e16b8b4516d607370b91189a", "referenceCount": 13, "citationCount": 1553, "influentialCitationCount": 114, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "S4ND: Modeling Images and Videos as Multidimensional Signals with State Spaces", "abstract": null, "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Eric Nguyen", "Karan Goel", "Albert Gu", "G. Downs", "Preey Shah", "Tri Dao", "S. Baccus", "C. Ré" ], "externalIds": { "DBLP": "conf/nips/NguyenGGDSDBR22", "CorpusId": 260443992 }, "url": "https://www.semanticscholar.org/paper/cf0f8f585c8822e3c6bcd9527d546eefc8486aea", "referenceCount": 0, "citationCount": 107, "influentialCitationCount": 6, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "VariBAD: Variational Bayes-Adaptive Deep RL via Meta-Learning", "abstract": "Trading off exploration and exploitation in an unknown environment is key to maximising expected online return during learning. A Bayes-optimal policy, which does so optimally, conditions its actions not only on the environment state but also on the agent’s uncertainty about the environment. Computing a Bayes-optimal policy is however intractable for all but the smallest tasks. In this paper, we introduce variational Bayes-Adaptive Deep RL (variBAD), a way to meta-learn approximately Bayes-optimal policies for complex tasks. VariBAD simultaneously meta-learns a variational auto-encoder to perform approximate inference", "year": 2021, "venue": "Journal of machine learning research", "authors": [ "L. Zintgraf", "Sebastian Schulze", "Cong Lu", "Leo Feng", "Maximilian Igl", "K. Shiarlis", "Y. Gal", "Katja Hofmann", "Shimon Whiteson" ], "externalIds": { "DBLP": "journals/jmlr/ZintgrafSLFISGH21", "CorpusId": 244835342 }, "url": "https://www.semanticscholar.org/paper/c7629a4d7e1c87fd1ea73850bcb800538fd0aa4b", "referenceCount": 69, "citationCount": 33, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "PyTorch Implementations of Reinforcement Learning Algorithms", "abstract": null, "year": 2018, "venue": ": /", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Backprop", "abstract": null, "year": 2017, "venue": "Encyclopedia of Machine Learning and Data Mining", "authors": [], "externalIds": { "DBLP": "reference/ml/X17am", "DOI": "10.1007/978-1-4899-7687-1_100030", "CorpusId": 263884549 }, "url": "https://www.semanticscholar.org/paper/b96d308a9df2f824dc0acd8ea206522877b07580", "referenceCount": 0, "citationCount": 41, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Machine learning - a probabilistic perspective", "abstract": "All rights reserved. No part of this book may be reproduced in any form by any electronic or mechanical means (including photocopying, recording, or information storage and retrieval) without permission in writing from the publisher. Machine learning : a probabilistic perspective / Kevin P. Murphy. p. cm. — (Adaptive computation and machine learning series) Includes bibliographical references and index. Contents Preface xxvii 1 Introduction 1 1.1 Machine learning: what and why? 1 1.1.1 Types of machine learning 2 1.2 Supervised learning 3 1.2.1 Classification 3 1.2.2 Regression 8 1.3 Unsupervised learning 9 1.3.1 Discovering clusters 10 1.3.2 Discovering latent factors 11 1.3.3 Discovering graph structure 13 1.3.4 Matrix completion 14 1.4 Some basic concepts in machine learning 16 1.4.1 Parametric vs non-parametric models 16 1.4.2 A simple non-parametric classifier: K-nearest neighbors 16 1.4.3 The curse of dimensionality 18 1.4.4 Parametric models for classification and regression 19 1.4.5", "year": 2012, "venue": "Adaptive computation and machine learning series", "authors": [ "Kevin P. Murphy" ], "externalIds": { "DBLP": "books/lib/Murphy12", "CorpusId": 17793133 }, "url": "https://www.semanticscholar.org/paper/360ca02e6f5a5e1af3dce4866a257aafc2d6d6f5", "referenceCount": 0, "citationCount": 9092, "influentialCitationCount": 1130, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Pattern Recognition and Machine Learning , volume 29", "abstract": null, "year": 2006, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "A New Approach to Linear Filtering and Prediction Problems", "abstract": "AN IMPORTANT class of theoretical and practical problems in communication and control is of a statistical nature. Such problems are: (i) Prediction of random signals; (ii) separation of random signals from random noise; (iii) detection of signals of known form (pulses, sinusoids) in the presence of random noise. In his pioneering work, Wiener [1]3 showed that problems (i) and (ii) lead to the so-called Wiener-Hopf integral equation; he also gave a method (spectral factorization) for the solution of this integral equation in the practically important special case of stationary statistics and rational spectra. Many extensions and generalizations followed Wiener’s basic work. Zadeh and Ragazzini solved the finite-memory case [2]. Concurrently and independently of Bode and Shannon [3], they also gave a simplified method [2) of solution. Booton discussed the nonstationary Wiener-Hopf equation [4]. These results are now in standard texts [5-6]. A somewhat different approach along these main lines has been given recently by Darlington [7]. For extensions to sampled signals, see, e.g., Franklin [8], Lees [9]. Another approach based on the eigenfunctions of the WienerHopf equation (which applies also to nonstationary problems whereas the preceding methods in general don’t), has been pioneered by Davis [10] and applied by many others, e.g., Shinbrot [11], Blum [12], Pugachev [13], Solodovnikov [14]. In all these works, the objective is to obtain the specification of a linear dynamic system (Wiener filter) which accomplishes the prediction, separation, or detection of a random signal.4 ——— 1 This research was supported in part by the U. S. Air Force Office of Scientific Research under Contract AF 49 (638)-382. 2 7212 Bellona Ave. 3 Numbers in brackets designate References at end of paper. 4 Of course, in general these tasks may be done better by nonlinear filters. At present, however, little or nothing is known about how to obtain (both theoretically and practically) these nonlinear filters. Contributed by the Instruments and Regulators Division and presented at the Instruments and Regulators Conference, March 29– Apri1 2, 1959, of THE AMERICAN SOCIETY OF MECHANICAL ENGINEERS. NOTE: Statements and opinions advanced in papers are to be understood as individual expressions of their authors and not those of the Society. Manuscript received at ASME Headquarters, February 24, 1959. Paper No. 59-IRD—11. A New Approach to Linear Filtering and Prediction Problems", "year": 2002, "venue": "", "authors": [], "externalIds": { "CorpusId": 1242324 }, "url": "https://www.semanticscholar.org/paper/255a77422b1da74da05d1714b7875356187385bd", "referenceCount": 25, "citationCount": 26660, "influentialCitationCount": 1749, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Networks adjusting networks", "abstract": "This paper describes extensions of previousàdaptive critics' which have been one-dimensional, acyclic, and suited only for feed-forward controllers. The extensions address the following issues: 1. Feed-forward adaptive critics for fully recurrent probabilistic control nets. 2. Recurrent adaptive critics. 3. Vector-valued adaptive critics based on a system identiication component. Furthermore an idea is described for approximating recurrent back propagation with a 3-network method which is local in time. In one experiment a linear adaptive critic adjusts a recurrent network such that it solves a non-linear task (a `delayed XOR'-problem). In another experiment a four-dimensional adaptive critic quickly learns to solve a complicated pole balancing task.", "year": 1990, "venue": "Forschungsberichte, TU Munich", "authors": [ "J. Schmidhuber" ], "externalIds": { "DBLP": "tr/tum/FKI-125-90", "MAG": "2784022806", "CorpusId": 14701780 }, "url": "https://www.semanticscholar.org/paper/92483870100a2252b2e08dffe0c03019087eef6e", "referenceCount": 20, "citationCount": 27, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "N = 64 , L = 2 N = 64 , L = 3 N = 128 , L = 2 N = 128 , L = 3 N = 256 , L = 2", "abstract": null, "year": null, "venue": "Best POPGym Figure 18: POPGym training curves for vSSM", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Research on Predicting Public Opinion Event Heat Levels Based on Large Language Models": { "paper_title": "Research on Predicting Public Opinion Event Heat Levels Based on Large Language Models", "arxiv_id": "2409.18548v1", "keyword": "large language model", "authors": [ "Yi Ren", "Tianyi Zhang", "Weibin Li", "DuoMu Zhou", "Chenhao Qin", "FangCheng Dong" ], "references": [ { "title": "A Novel Adaptive Fine-Tuning Algorithm for Multimodal Models: Self-Optimizing Classification and Selection of High-Quality Datasets in Remote Sensing", "abstract": "We propose an adaptive fine-tuning algorithm for multimodal large models. The core steps of this algorithm involve two stages of truncation. First, the vast amount of data is projected into a semantic vector space, and the MiniBatchKMeans algorithm is used for automated clustering. This classification ensures that the data within each cluster exhibit high semantic similarity. Next, we process the data in each cluster, calculating the translational difference between the original and perturbed data in the multimodal large model's vector space. This difference serves as a generalization metric for the data. Based on this metric, we select the data with high generalization potential for training. We applied this algorithm to train the InternLM-XComposer2-VL-7B model on two 3090 GPUs using one-third of the GeoChat multimodal remote sensing dataset. The results demonstrate that our algorithm outperforms the state-of-the-art baselines. various baselines. The model trained on our optimally chosen one-third dataset, based on experimental validation, exhibited only 1% reduction in performance across various remote sensing metrics compared to the model trained on the full dataset. This approach significantly preserved general-purpose capabilities while reducing training time by 68.2%. Furthermore, the model achieved scores of 89.86 and 77.19 on the UCMerced and AID evaluation datasets, respectively, surpassing the GeoChat dataset by 5.43 and 5.16 points. It only showed a 0.91-point average decrease on the LRBEN evaluation dataset.", "year": 2024, "venue": "", "authors": [ "Yi Ren", "Tianyi Zhang", "Zhixiong Han", "Weibin Li", "Zhiyang Wang", "Wenbo Ji", "Chenhao Qin", "Chenbin Liang", "Licheng Jiao" ], "externalIds": { "ArXiv": "2409.13345", "CorpusId": 272770204 }, "url": "https://www.semanticscholar.org/paper/9afcd8c64f8383ec87ee1d8f774d286084e4bc67", "referenceCount": 1, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Super-Resolution Water Body Extraction Based on MF-SegFormer", "abstract": "Surface water body (WB) as one of the world’s most critical natural resources, plays a significant role in forming and sustaining life. Therefore, accurate extraction of WB is particularly important. However, how to make full use of existing low-resolution images and labels to achieve accurate WB extraction on super-resolution images is a challenge. This study proposed a new method for super-resolution WB extraction on Landsat 8 OLI images based on MF-SegFormer. WB extraction and analysis were performed on test datasets at 30 m and 15 m resolutions, respectively. Precision, recall, F1-score, and mIoU are used as evaluation metrics. The results showed that the Multiscale Fusion SegFormer (MF-SegFormer) network has the best extraction results, and the constructed WB extraction model performed well on the 15 m super-resolution test dataset. In areas with rich details, our constructed WB extraction model demonstrates better performance in extracting from super-resolution images. This study provides technical support for further super-resolution WB research.", "year": 2024, "venue": "IEEE International Geoscience and Remote Sensing Symposium", "authors": [ "Tianyi Zhang", "Weibin Li", "Xihui Feng", "Yi Ren", "Chenhao Qin", "Wenbo Ji", "Xin Yang" ], "externalIds": { "DBLP": "conf/igarss/ZhangLFRQJY24", "DOI": "10.1109/IGARSS53475.2024.10640498", "CorpusId": 272432511 }, "url": "https://www.semanticscholar.org/paper/94c724e4f9da561859d37d9cef55628d3ee78067", "referenceCount": 11, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Improved DeepLabv3+ Based Flood Water Body Extraction Model for SAR Imagery", "abstract": "In recent years, with the intensification of global climate change and the increasing frequency of natural disasters, flooding has become a widespread concern worldwide. Deep learning-based techniques for extracting floods from remote sensing images are crucial in disaster analysis, emergency response, and early warning systems. In this study, we innovatively developed a high-precision model for extracting floodwater bodies based on DeepLabv3+. To enhance the network’s comprehension of inter-feature dependencies, we introduced the feature-dense connectivity operation. Furthermore, to prioritize essential channel and spatial information, particularly at water body edges, we seamlessly integrated the Convolutional Block Attention Module (CBAM) mechanism into our model. This strategic combination of DeepLabv3+, feature-dense connectivity, and CBAM not only showcases our novel approach but also significantly enhances the model’s overall effectiveness in floodwater body extraction. The experiments prove that our method outperforms models such as DeepLabv3+, SegFormer, SETR and FastFCN, performing the best in all metrics on both pre-disaster and post-disaster data from the DaLi flood dataset. Additionally, the model’s mean Intersection over Union (mIoU) results improved by 0.3% and 4.1% compared to the unimproved DeepLabV3+ on pre-disaster and post-disaster data, respectively. This study contributes to the timely extraction of flood hazards and facilitates emergency response.", "year": 2024, "venue": "IEEE International Geoscience and Remote Sensing Symposium", "authors": [ "Chenhao Qin", "Weibin Li", "Tianyi Zhang", "Wenbo Ji", "Xihui Feng", "Yi Ren" ], "externalIds": { "DBLP": "conf/igarss/QinLZJFR24", "DOI": "10.1109/IGARSS53475.2024.10640606", "CorpusId": 272430983 }, "url": "https://www.semanticscholar.org/paper/a869fdad33fbabeb412c1d1ef54d10e8151f7e31", "referenceCount": 7, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "LawGPT: A Chinese Legal Knowledge-Enhanced Large Language Model", "abstract": "Large language models (LLMs), including both proprietary and open-source models, have showcased remarkable capabilities in addressing a wide range of downstream tasks. Nonetheless, when it comes to practical Chinese legal tasks, these models fail to meet the actual requirements. Proprietary models do not ensure data privacy for sensitive legal cases, while open-source models demonstrate unsatisfactory performance due to their lack of legal knowledge. To address this problem, we introduce LawGPT, the first open-source model specifically designed for Chinese legal applications. LawGPT comprises two key components: legal-oriented pre-training and legal supervised fine-tuning. Specifically, we employ large-scale Chinese legal documents for legal-oriented pre-training to incorporate legal domain knowledge. To further improve the model's performance on downstream legal tasks, we create a knowledge-driven instruction dataset for legal supervised fine-tuning. Our experimental results demonstrate that LawGPT outperforms the open-source LLaMA 7B model. Our code and resources are publicly available at https://github.com/pengxiao-song/LaWGPT and have received 5.7K stars on GitHub.", "year": 2024, "venue": "arXiv.org", "authors": [ "Zhi Zhou", "Jiang-Xin Shi", "Peng-Xiao Song", "Xiaowen Yang", "Yi-Xuan Jin", "Lan-Zhe Guo", "Yu-Feng Li" ], "externalIds": { "ArXiv": "2406.04614", "DBLP": "journals/corr/abs-2406-04614", "DOI": "10.48550/arXiv.2406.04614", "CorpusId": 270357534 }, "url": "https://www.semanticscholar.org/paper/84ca0de20f31a5a19896aa1bfde71a96dc592b7f", "referenceCount": 40, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DeepSeek-V2: A Strong, Economical, and Efficient Mixture-of-Experts Language Model", "abstract": "We present DeepSeek-V2, a strong Mixture-of-Experts (MoE) language model characterized by economical training and efficient inference. It comprises 236B total parameters, of which 21B are activated for each token, and supports a context length of 128K tokens. DeepSeek-V2 adopts innovative architectures including Multi-head Latent Attention (MLA) and DeepSeekMoE. MLA guarantees efficient inference through significantly compressing the Key-Value (KV) cache into a latent vector, while DeepSeekMoE enables training strong models at an economical cost through sparse computation. Compared with DeepSeek 67B, DeepSeek-V2 achieves significantly stronger performance, and meanwhile saves 42.5% of training costs, reduces the KV cache by 93.3%, and boosts the maximum generation throughput to 5.76 times. We pretrain DeepSeek-V2 on a high-quality and multi-source corpus consisting of 8.1T tokens, and further perform Supervised Fine-Tuning (SFT) and Reinforcement Learning (RL) to fully unlock its potential. Evaluation results show that, even with only 21B activated parameters, DeepSeek-V2 and its chat versions still achieve top-tier performance among open-source models.", "year": 2024, "venue": "arXiv.org", "authors": [ "Zhihong Shao", "Damai Dai", "Daya Guo", "Bo Liu (Benjamin Liu)", "Zihan Wang" ], "externalIds": { "ArXiv": "2405.04434", "DBLP": "journals/corr/abs-2405-04434", "DOI": "10.48550/arXiv.2405.04434", "CorpusId": 269613809 }, "url": "https://www.semanticscholar.org/paper/53a803388e83ae89261624099d7be4287ace67cb", "referenceCount": 58, "citationCount": 78, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Retrieve Anything To Augment Large Language Models", "abstract": "Large language models (LLMs) face significant challenges stemming from their inherent limitations in knowledge, memory, alignment, and action. These challenges cannot be addressed by LLMs alone, but should rely on assistance from the external world, such as knowledge base, memory store, demonstration examples, and tools. Retrieval augmentation stands as a vital mechanism for bridging the gap between LLMs and the external assistance. However, conventional methods encounter two pressing issues. On the one hand, the general-purpose retrievers are not properly optimized for the retrieval augmentation of LLMs. On the other hand, the task-specific retrievers lack the required versatility, hindering their performance across the diverse retrieval augmentation scenarios. In this work, we present a novel approach, the LLM-Embedder, which comprehensively supports the diverse retrieval augmentation needs of LLMs with one unified embedding model. Training such a unified model is non-trivial, as various retrieval tasks aim to capture distinct semantic relationships, often subject to mutual interference. To address this challenge, we systematically optimize our training methodology. This includes reward formulation based on LLMs' feedback, the stabilization of knowledge distillation, multi-task fine-tuning with explicit instructions, and homogeneous in-batch negative sampling. These optimization strategies contribute to the outstanding empirical performance of the LLM-Embedder. Notably, it yields remarkable enhancements in retrieval augmentation for LLMs, surpassing both general-purpose and task-specific retrievers in various evaluation scenarios. Our checkpoint and source code are publicly available at https://github.com/FlagOpen/FlagEmbedding.", "year": 2023, "venue": "arXiv.org", "authors": [ "Peitian Zhang", "Shitao Xiao", "Zheng Liu", "Zhicheng Dou", "Jian-Yun Nie" ], "externalIds": { "ArXiv": "2310.07554", "DBLP": "journals/corr/abs-2310-07554", "DOI": "10.48550/arXiv.2310.07554", "CorpusId": 263835099 }, "url": "https://www.semanticscholar.org/paper/232e07b0ef0148c5325fda96eb9057c7a6db2ec2", "referenceCount": 102, "citationCount": 29, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Leveraging Large Language Models for Topic Classification in the Domain of Public Affairs", "abstract": null, "year": 2023, "venue": "ICDAR Workshops", "authors": [ "Alejandro Peña", "A. Morales", "Julian Fierrez", "Ignacio Serna", "J. Ortega-Garcia", "Iñigo Puente", "Jorge Cordova", "Gonzalo Cordova" ], "externalIds": { "ArXiv": "2306.02864", "DBLP": "conf/icdar/PenaMFSOPCC23", "DOI": "10.1007/978-3-031-41498-5_2", "CorpusId": 259076261 }, "url": "https://www.semanticscholar.org/paper/661e64593fca437e41d4b90bcbc440ba76d988d2", "referenceCount": 23, "citationCount": 13, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ChatGPT: Applications, Opportunities, and Threats", "abstract": "Developed by OpenAI, ChatGPT (Conditional Generative Pre-trained Transformer) is an artificial intelligence technology that is fine-tuned using supervised machine learning and reinforcement learning techniques, allowing a computer to generate natural language conversation fully autonomously. ChatGPT is built on the transformer architecture and trained on millions of conversations from various sources. The system combines the power of pre-trained deep learning models with a programmability layer to provide a strong base for generating natural language conversations. In this study, after reviewing the existing literature, we examine the applications, opportunities, and threats of ChatGPT in 10 main domains, providing detailed examples for the business and industry as well as education. We also conducted an experimental study, checking the effectiveness and comparing the performances of GPT-3.5 and GPT-4, and found that the latter performs significantly better. Despite its exceptional ability to generate natural-sounding responses, the authors believe that ChatGPT does not possess the same level of understanding, empathy, and creativity as a human and cannot fully replace them in most situations.", "year": 2023, "venue": "Systems and Information Engineering Design Symposium", "authors": [ "Aram Bahrini", "Mohammadsadra Khamoshifar", "H. Abbasimehr", "R. Riggs", "Maryam Esmaeili", "Rastin Mastali Majdabadkohne", "Morteza Pasehvar" ], "externalIds": { "DBLP": "journals/corr/abs-2304-09103", "ArXiv": "2304.09103", "DOI": "10.1109/SIEDS58326.2023.10137850", "CorpusId": 258187497 }, "url": "https://www.semanticscholar.org/paper/f942258b941095a5f7aa59c71422e0327e2496b1", "referenceCount": 27, "citationCount": 85, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "HuaTuo: Tuning LLaMA Model with Chinese Medical Knowledge", "abstract": "Large Language Models (LLMs), such as the LLaMA model, have demonstrated their effectiveness in various general-domain natural language processing (NLP) tasks. Nevertheless, LLMs have not yet performed optimally in biomedical domain tasks due to the need for medical expertise in the responses. In response to this challenge, we propose HuaTuo, a LLaMA-based model that has been supervised-fine-tuned with generated QA (Question-Answer) instances. The experimental results demonstrate that HuaTuo generates responses that possess more reliable medical knowledge. Our proposed HuaTuo model is accessible at https://github.com/SCIR-HI/Huatuo-Llama-Med-Chinese.", "year": 2023, "venue": "arXiv.org", "authors": [ "Hao Wang", "Chi-Liang Liu", "Nuwa Xi", "Zewen Qiang", "Sendong Zhao", "Bing Qin", "Ting Liu" ], "externalIds": { "ArXiv": "2304.06975", "DBLP": "journals/corr/abs-2304-06975", "DOI": "10.48550/arXiv.2304.06975", "CorpusId": 258170497 }, "url": "https://www.semanticscholar.org/paper/302ee27524a717ddc21f332ca634b9211c6ec6aa", "referenceCount": 13, "citationCount": 133, "influentialCitationCount": 20, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "GPT-4 Technical Report", "abstract": "We report the development of GPT-4, a large-scale, multimodal model which can accept image and text inputs and produce text outputs. While less capable than humans in many real-world scenarios, GPT-4 exhibits human-level performance on various professional and academic benchmarks, including passing a simulated bar exam with a score around the top 10% of test takers. GPT-4 is a Transformer-based model pre-trained to predict the next token in a document. The post-training alignment process results in improved performance on measures of factuality and adherence to desired behavior. A core component of this project was developing infrastructure and optimization methods that behave predictably across a wide range of scales. This allowed us to accurately predict some aspects of GPT-4's performance based on models trained with no more than 1/1,000th the compute of GPT-4.", "year": 2023, "venue": "", "authors": [ "OpenAI Josh Achiam", "Steven Adler", "Sandhini Agarwal", "Lama Ahmad", "Ilge Akkaya", "Florencia Leoni Aleman", "Diogo Almeida", "Janko Altenschmidt", "Sam Altman", "Shyamal Anadkat", "Red Avila", "Igor Babuschkin", "S. Balaji", "Valerie Balcom", "Paul Baltescu", "Haim-ing Bao", "Mo Bavarian", "Jeff Belgum", "Irwan Bello", "Jake Berdine", "Gabriel Bernadett-Shapiro", "Christopher Berner", "Lenny Bogdonoff", "Oleg Boiko", "Madelaine Boyd", "Anna-Luisa Brakman", "Greg Brockman", "Tim Brooks", "Miles Brundage", "Kevin Button", "Trevor Cai", "Rosie Campbell", "Andrew Cann", "Brittany Carey", "Chelsea Carlson", "Rory Carmichael", "Brooke Chan", "Che Chang", "Fotis Chantzis", "Derek Chen", "Sully Chen", "Ruby Chen", "Jason Chen", "Mark Chen", "B. Chess", "Chester Cho", "Casey Chu", "Hyung Won Chung", "Dave Cummings", "Jeremiah Currier", "Yunxing Dai", "Cory Decareaux", "Thomas Degry", "Noah Deutsch", "Damien Deville", "Arka Dhar", "David Dohan", "Steve Dowling", "Sheila Dunning", "Adrien Ecoffet", "Atty Eleti", "Tyna Eloundou", "David Farhi", "Liam Fedus", "Niko Felix", "Sim'on Posada Fishman", "Juston Forte", "Is-abella Fulford", "Leo Gao", "Elie Georges", "C. Gibson", "Vik Goel", "Tarun Gogineni", "Gabriel Goh", "Raphael Gontijo-Lopes", "Jonathan Gordon", "Morgan Grafstein", "Scott Gray", "Ryan Greene", "Joshua Gross", "S. Gu", "Yufei Guo", "Chris Hallacy", "Jesse Han", "Jeff Harris", "Yuchen He", "Mike Heaton", "Johannes Heidecke", "Chris Hesse", "Alan Hickey", "Wade Hickey", "Peter Hoeschele", "Brandon Houghton", "Kenny Hsu", "Shengli Hu", "Xin Hu", "Joost Huizinga", "Shantanu Jain", "Shawn Jain", "Joanne Jang", "Angela Jiang", "Roger Jiang", "Haozhun Jin", "Denny Jin", "Shino Jomoto", "B. Jonn", "Heewoo Jun", "Tomer Kaftan", "Lukasz Kaiser", "Ali Kamali", "I. Kanitscheider", "N. Keskar", "Tabarak Khan", "Logan Kilpatrick", "Jong Wook Kim", "Christina Kim", "Yongjik Kim", "Hendrik Kirchner", "J. Kiros", "Matthew Knight", "Daniel Kokotajlo", "Lukasz Kondraciuk", "A. Kondrich", "Aris Konstantinidis", "Kyle Kosic", "Gretchen Krueger", "Vishal Kuo", "Michael Lampe", "Ikai Lan", "Teddy Lee", "J. Leike", "Jade Leung", "Daniel Levy", "Chak Ming Li", "Rachel Lim", "Molly Lin", "Stephanie Lin", "Ma-teusz Litwin", "Theresa Lopez", "Ryan Lowe", "Patricia Lue", "A. Makanju", "Kim Malfacini", "Sam Manning", "Todor Markov", "Yaniv Markovski", "Bianca Martin", "Katie Mayer", "Andrew Mayne", "Bob McGrew", "S. McKinney", "C. McLeavey", "Paul McMillan", "Jake McNeil", "David Medina", "Aalok Mehta", "Jacob Menick", "Luke Metz", "Andrey Mishchenko", "Pamela Mishkin", "Vinnie Monaco", "Evan Morikawa", "Daniel P. Mossing", "Tong Mu", "Mira Murati", "O. Murk", "David M'ely", "Ashvin Nair", "Reiichiro Nakano", "Rajeev Nayak", "Arvind Neelakantan", "Richard Ngo", "Hyeonwoo Noh", "Ouyang Long", "Cullen O'Keefe", "J. Pachocki", "Alex Paino", "Joe Palermo", "Ashley Pantuliano", "Giambattista Parascandolo", "Joel Parish", "Emy Parparita", "Alexandre Passos", "Mikhail Pavlov", "Andrew Peng", "Adam Perelman", "Filipe de Avila Belbute Peres", "Michael Petrov", "Henrique Pondé de Oliveira Pinto", "Michael Pokorny", "Michelle Pokrass", "Vitchyr H. Pong", "Tolly Powell", "Alethea Power", "Boris Power", "Elizabeth Proehl", "Raul Puri", "Alec Radford", "Jack W. Rae", "Aditya Ramesh", "Cameron Raymond", "Francis Real", "Kendra Rimbach", "Carl Ross", "Bob Rotsted", "Henri Roussez", "Nick Ryder", "M. Saltarelli", "Ted Sanders", "Shibani Santurkar", "Girish Sastry", "Heather Schmidt", "David Schnurr", "John Schulman", "Daniel Selsam", "Kyla Sheppard", "Toki Sherbakov", "Jessica Shieh", "Sarah Shoker", "Pranav Shyam", "Szymon Sidor", "Eric Sigler", "Maddie Simens", "Jordan Sitkin", "Katarina Slama", "Ian Sohl", "Benjamin D. Sokolowsky", "Yang Song", "Natalie Staudacher", "F. Such", "Natalie Summers", "I. Sutskever", "Jie Tang", "N. Tezak", "Madeleine Thompson", "Phil Tillet", "Amin Tootoonchian", "Elizabeth Tseng", "Preston Tuggle", "Nick Turley", "Jerry Tworek", "Juan Felipe Cer'on Uribe", "Andrea Vallone", "Arun Vijayvergiya", "Chelsea Voss", "Carroll L. Wainwright", "Justin Jay Wang", "Alvin Wang", "Ben Wang", "Jonathan Ward", "Jason Wei", "CJ Weinmann", "Akila Welihinda", "P. Welinder", "Jiayi Weng", "Lilian Weng", "Matt Wiethoff", "Dave Willner", "Clemens Winter", "Samuel Wolrich", "Hannah Wong", "Lauren Workman", "Sherwin Wu", "Jeff Wu", "Michael Wu", "Kai Xiao", "Tao Xu", "Sarah Yoo", "Kevin Yu", "Qim-ing Yuan", "Wojciech Zaremba", "Rowan Zellers", "Chong Zhang", "Marvin Zhang", "Shengjia Zhao", "Tianhao Zheng", "Juntang Zhuang", "William Zhuk", "Barret Zoph" ], "externalIds": { "ArXiv": "2303.08774", "CorpusId": 257532815 }, "url": "https://www.semanticscholar.org/paper/163b4d6a79a5b19af88b8585456363340d9efd04", "referenceCount": 0, "citationCount": 7054, "influentialCitationCount": 1038, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "LLaMA: Open and Efficient Foundation Language Models", "abstract": "We introduce LLaMA, a collection of foundation language models ranging from 7B to 65B parameters. We train our models on trillions of tokens, and show that it is possible to train state-of-the-art models using publicly available datasets exclusively, without resorting to proprietary and inaccessible datasets. In particular, LLaMA-13B outperforms GPT-3 (175B) on most benchmarks, and LLaMA-65B is competitive with the best models, Chinchilla-70B and PaLM-540B. We release all our models to the research community.", "year": 2023, "venue": "arXiv.org", "authors": [ "Hugo Touvron", "Thibaut Lavril", "Gautier Izacard", "Xavier Martinet", "Marie-Anne Lachaux", "Timothée Lacroix", "Baptiste Rozière", "Naman Goyal", "Eric Hambro", "Faisal Azhar", "Aurelien Rodriguez", "Armand Joulin", "Edouard Grave", "Guillaume Lample" ], "externalIds": { "DBLP": "journals/corr/abs-2302-13971", "ArXiv": "2302.13971", "CorpusId": 257219404 }, "url": "https://www.semanticscholar.org/paper/57e849d0de13ed5f91d086936296721d4ff75a75", "referenceCount": 80, "citationCount": 8031, "influentialCitationCount": 1073, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "GLM: General Language Model Pretraining with Autoregressive Blank Infilling", "abstract": "There have been various types of pretraining architectures including autoencoding models (e.g., BERT), autoregressive models (e.g., GPT), and encoder-decoder models (e.g., T5). However, none of the pretraining frameworks performs the best for all tasks of three main categories including natural language understanding (NLU), unconditional generation, and conditional generation. We propose a General Language Model (GLM) based on autoregressive blank infilling to address this challenge. GLM improves blank filling pretraining by adding 2D positional encodings and allowing an arbitrary order to predict spans, which results in performance gains over BERT and T5 on NLU tasks. Meanwhile, GLM can be pretrained for different types of tasks by varying the number and lengths of blanks. On a wide range of tasks across NLU, conditional and unconditional generation, GLM outperforms BERT, T5, and GPT given the same model sizes and data, and achieves the best performance from a single pretrained model with 1.25× parameters of BERT Large , demonstrating its generalizability to different downstream tasks.", "year": 2021, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Zhengxiao Du", "Yujie Qian", "Xiao Liu", "Ming Ding", "J. Qiu", "Zhilin Yang", "Jie Tang" ], "externalIds": { "DBLP": "conf/acl/DuQLDQY022", "ArXiv": "2103.10360", "ACL": "2022.acl-long.26", "DOI": "10.18653/v1/2022.acl-long.26", "CorpusId": 247519241 }, "url": "https://www.semanticscholar.org/paper/50796b0f3edf9cb5ff1e447c298b33755378aa4f", "referenceCount": 64, "citationCount": 1078, "influentialCitationCount": 134, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Language Models are Few-Shot Learners", "abstract": "Recent work has demonstrated substantial gains on many NLP tasks and benchmarks by pre-training on a large corpus of text followed by fine-tuning on a specific task. While typically task-agnostic in architecture, this method still requires task-specific fine-tuning datasets of thousands or tens of thousands of examples. By contrast, humans can generally perform a new language task from only a few examples or from simple instructions - something which current NLP systems still largely struggle to do. Here we show that scaling up language models greatly improves task-agnostic, few-shot performance, sometimes even reaching competitiveness with prior state-of-the-art fine-tuning approaches. Specifically, we train GPT-3, an autoregressive language model with 175 billion parameters, 10x more than any previous non-sparse language model, and test its performance in the few-shot setting. For all tasks, GPT-3 is applied without any gradient updates or fine-tuning, with tasks and few-shot demonstrations specified purely via text interaction with the model. GPT-3 achieves strong performance on many NLP datasets, including translation, question-answering, and cloze tasks, as well as several tasks that require on-the-fly reasoning or domain adaptation, such as unscrambling words, using a novel word in a sentence, or performing 3-digit arithmetic. At the same time, we also identify some datasets where GPT-3's few-shot learning still struggles, as well as some datasets where GPT-3 faces methodological issues related to training on large web corpora. Finally, we find that GPT-3 can generate samples of news articles which human evaluators have difficulty distinguishing from articles written by humans. We discuss broader societal impacts of this finding and of GPT-3 in general.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Tom B. Brown", "Benjamin Mann", "Nick Ryder", "Melanie Subbiah", "J. Kaplan", "Prafulla Dhariwal", "Arvind Neelakantan", "Pranav Shyam", "Girish Sastry", "Amanda Askell", "Sandhini Agarwal", "Ariel Herbert-Voss", "Gretchen Krueger", "T. Henighan", "R. Child", "A. Ramesh", "Daniel M. Ziegler", "Jeff Wu", "Clemens Winter", "Christopher Hesse", "Mark Chen", "Eric Sigler", "Ma-teusz Litwin", "Scott Gray", "B. Chess", "Jack Clark", "Christopher Berner", "Sam McCandlish", "Alec Radford", "I. Sutskever", "Dario Amodei" ], "externalIds": { "ArXiv": "2005.14165", "DBLP": "conf/nips/BrownMRSKDNSSAA20", "MAG": "3030163527", "CorpusId": 218971783 }, "url": "https://www.semanticscholar.org/paper/90abbc2cf38462b954ae1b772fac9532e2ccd8b0", "referenceCount": 146, "citationCount": 30854, "influentialCitationCount": 3529, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "“WaterGPT: Training a large language model to become a hydrology expert,”", "abstract": null, "year": null, "venue": "SSRN", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "“FUO_ED: A dataset for evaluating the performance of large language models in diagnosing complex cases of fever of unknown origin,”", "abstract": null, "year": null, "venue": "SSRN", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Trustworthy AI: Securing Sensitive Data in Large Language Models": { "paper_title": "Trustworthy AI: Securing Sensitive Data in Large Language Models", "arxiv_id": "2409.18222v1", "keyword": "large language model", "authors": [ "Georgios Feretzakis", "Vassilios S. Verykios" ], "references": [ { "title": "Llama 2: Open Foundation and Fine-Tuned Chat Models", "abstract": "In this work, we develop and release Llama 2, a collection of pretrained and fine-tuned large language models (LLMs) ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama 2-Chat, are optimized for dialogue use cases. Our models outperform open-source chat models on most benchmarks we tested, and based on our human evaluations for helpfulness and safety, may be a suitable substitute for closed-source models. We provide a detailed description of our approach to fine-tuning and safety improvements of Llama 2-Chat in order to enable the community to build on our work and contribute to the responsible development of LLMs.", "year": 2023, "venue": "arXiv.org", "authors": [ "Hugo Touvron", "Louis Martin", "Kevin R. Stone", "Peter Albert", "Amjad Almahairi", "Yasmine Babaei", "Nikolay Bashlykov", "Soumya Batra", "Prajjwal Bhargava", "Shruti Bhosale", "D. Bikel", "Lukas Blecher", "Cristian Cantón Ferrer", "Moya Chen", "Guillem Cucurull", "David Esiobu", "Jude Fernandes", "Jeremy Fu", "Wenyin Fu", "Brian Fuller", "Cynthia Gao", "Vedanuj Goswami", "Naman Goyal", "A. Hartshorn", "Saghar Hosseini", "Rui Hou", "Hakan Inan", "Marcin Kardas", "Viktor Kerkez", "Madian Khabsa", "Isabel M. Kloumann", "A. Korenev", "Punit Singh Koura", "Marie-Anne Lachaux", "Thibaut Lavril", "Jenya Lee", "Diana Liskovich", "Yinghai Lu", "Yuning Mao", "Xavier Martinet", "Todor Mihaylov", "Pushkar Mishra", "Igor Molybog", "Yixin Nie", "Andrew Poulton", "Jeremy Reizenstein", "Rashi Rungta", "Kalyan Saladi", "Alan Schelten", "Ruan Silva", "Eric Michael Smith", "R. Subramanian", "Xia Tan", "Binh Tang", "Ross Taylor", "Adina Williams", "Jian Xiang Kuan", "Puxin Xu", "Zhengxu Yan", "Iliyan Zarov", "Yuchen Zhang", "Angela Fan", "Melanie Kambadur", "Sharan Narang", "Aurelien Rodriguez", "Robert Stojnic", "Sergey Edunov", "Thomas Scialom" ], "externalIds": { "ArXiv": "2307.09288", "DBLP": "journals/corr/abs-2307-09288", "CorpusId": 259950998 }, "url": "https://www.semanticscholar.org/paper/104b0bb1da562d53cbda87aec79ef6a2827d191a", "referenceCount": 131, "citationCount": 7142, "influentialCitationCount": 1096, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "GPT-4 Technical Report", "abstract": "We report the development of GPT-4, a large-scale, multimodal model which can accept image and text inputs and produce text outputs. While less capable than humans in many real-world scenarios, GPT-4 exhibits human-level performance on various professional and academic benchmarks, including passing a simulated bar exam with a score around the top 10% of test takers. GPT-4 is a Transformer-based model pre-trained to predict the next token in a document. The post-training alignment process results in improved performance on measures of factuality and adherence to desired behavior. A core component of this project was developing infrastructure and optimization methods that behave predictably across a wide range of scales. This allowed us to accurately predict some aspects of GPT-4's performance based on models trained with no more than 1/1,000th the compute of GPT-4.", "year": 2023, "venue": "", "authors": [ "OpenAI Josh Achiam", "Steven Adler", "Sandhini Agarwal", "Lama Ahmad", "Ilge Akkaya", "Florencia Leoni Aleman", "Diogo Almeida", "Janko Altenschmidt", "Sam Altman", "Shyamal Anadkat", "Red Avila", "Igor Babuschkin", "S. Balaji", "Valerie Balcom", "Paul Baltescu", "Haim-ing Bao", "Mo Bavarian", "Jeff Belgum", "Irwan Bello", "Jake Berdine", "Gabriel Bernadett-Shapiro", "Christopher Berner", "Lenny Bogdonoff", "Oleg Boiko", "Madelaine Boyd", "Anna-Luisa Brakman", "Greg Brockman", "Tim Brooks", "Miles Brundage", "Kevin Button", "Trevor Cai", "Rosie Campbell", "Andrew Cann", "Brittany Carey", "Chelsea Carlson", "Rory Carmichael", "Brooke Chan", "Che Chang", "Fotis Chantzis", "Derek Chen", "Sully Chen", "Ruby Chen", "Jason Chen", "Mark Chen", "B. Chess", "Chester Cho", "Casey Chu", "Hyung Won Chung", "Dave Cummings", "Jeremiah Currier", "Yunxing Dai", "Cory Decareaux", "Thomas Degry", "Noah Deutsch", "Damien Deville", "Arka Dhar", "David Dohan", "Steve Dowling", "Sheila Dunning", "Adrien Ecoffet", "Atty Eleti", "Tyna Eloundou", "David Farhi", "Liam Fedus", "Niko Felix", "Sim'on Posada Fishman", "Juston Forte", "Is-abella Fulford", "Leo Gao", "Elie Georges", "C. Gibson", "Vik Goel", "Tarun Gogineni", "Gabriel Goh", "Raphael Gontijo-Lopes", "Jonathan Gordon", "Morgan Grafstein", "Scott Gray", "Ryan Greene", "Joshua Gross", "S. Gu", "Yufei Guo", "Chris Hallacy", "Jesse Han", "Jeff Harris", "Yuchen He", "Mike Heaton", "Johannes Heidecke", "Chris Hesse", "Alan Hickey", "Wade Hickey", "Peter Hoeschele", "Brandon Houghton", "Kenny Hsu", "Shengli Hu", "Xin Hu", "Joost Huizinga", "Shantanu Jain", "Shawn Jain", "Joanne Jang", "Angela Jiang", "Roger Jiang", "Haozhun Jin", "Denny Jin", "Shino Jomoto", "B. Jonn", "Heewoo Jun", "Tomer Kaftan", "Lukasz Kaiser", "Ali Kamali", "I. Kanitscheider", "N. Keskar", "Tabarak Khan", "Logan Kilpatrick", "Jong Wook Kim", "Christina Kim", "Yongjik Kim", "Hendrik Kirchner", "J. Kiros", "Matthew Knight", "Daniel Kokotajlo", "Lukasz Kondraciuk", "A. Kondrich", "Aris Konstantinidis", "Kyle Kosic", "Gretchen Krueger", "Vishal Kuo", "Michael Lampe", "Ikai Lan", "Teddy Lee", "J. Leike", "Jade Leung", "Daniel Levy", "Chak Ming Li", "Rachel Lim", "Molly Lin", "Stephanie Lin", "Ma-teusz Litwin", "Theresa Lopez", "Ryan Lowe", "Patricia Lue", "A. Makanju", "Kim Malfacini", "Sam Manning", "Todor Markov", "Yaniv Markovski", "Bianca Martin", "Katie Mayer", "Andrew Mayne", "Bob McGrew", "S. McKinney", "C. McLeavey", "Paul McMillan", "Jake McNeil", "David Medina", "Aalok Mehta", "Jacob Menick", "Luke Metz", "Andrey Mishchenko", "Pamela Mishkin", "Vinnie Monaco", "Evan Morikawa", "Daniel P. Mossing", "Tong Mu", "Mira Murati", "O. Murk", "David M'ely", "Ashvin Nair", "Reiichiro Nakano", "Rajeev Nayak", "Arvind Neelakantan", "Richard Ngo", "Hyeonwoo Noh", "Ouyang Long", "Cullen O'Keefe", "J. Pachocki", "Alex Paino", "Joe Palermo", "Ashley Pantuliano", "Giambattista Parascandolo", "Joel Parish", "Emy Parparita", "Alexandre Passos", "Mikhail Pavlov", "Andrew Peng", "Adam Perelman", "Filipe de Avila Belbute Peres", "Michael Petrov", "Henrique Pondé de Oliveira Pinto", "Michael Pokorny", "Michelle Pokrass", "Vitchyr H. Pong", "Tolly Powell", "Alethea Power", "Boris Power", "Elizabeth Proehl", "Raul Puri", "Alec Radford", "Jack W. Rae", "Aditya Ramesh", "Cameron Raymond", "Francis Real", "Kendra Rimbach", "Carl Ross", "Bob Rotsted", "Henri Roussez", "Nick Ryder", "M. Saltarelli", "Ted Sanders", "Shibani Santurkar", "Girish Sastry", "Heather Schmidt", "David Schnurr", "John Schulman", "Daniel Selsam", "Kyla Sheppard", "Toki Sherbakov", "Jessica Shieh", "Sarah Shoker", "Pranav Shyam", "Szymon Sidor", "Eric Sigler", "Maddie Simens", "Jordan Sitkin", "Katarina Slama", "Ian Sohl", "Benjamin D. Sokolowsky", "Yang Song", "Natalie Staudacher", "F. Such", "Natalie Summers", "I. Sutskever", "Jie Tang", "N. Tezak", "Madeleine Thompson", "Phil Tillet", "Amin Tootoonchian", "Elizabeth Tseng", "Preston Tuggle", "Nick Turley", "Jerry Tworek", "Juan Felipe Cer'on Uribe", "Andrea Vallone", "Arun Vijayvergiya", "Chelsea Voss", "Carroll L. Wainwright", "Justin Jay Wang", "Alvin Wang", "Ben Wang", "Jonathan Ward", "Jason Wei", "CJ Weinmann", "Akila Welihinda", "P. Welinder", "Jiayi Weng", "Lilian Weng", "Matt Wiethoff", "Dave Willner", "Clemens Winter", "Samuel Wolrich", "Hannah Wong", "Lauren Workman", "Sherwin Wu", "Jeff Wu", "Michael Wu", "Kai Xiao", "Tao Xu", "Sarah Yoo", "Kevin Yu", "Qim-ing Yuan", "Wojciech Zaremba", "Rowan Zellers", "Chong Zhang", "Marvin Zhang", "Shengjia Zhao", "Tianhao Zheng", "Juntang Zhuang", "William Zhuk", "Barret Zoph" ], "externalIds": { "ArXiv": "2303.08774", "CorpusId": 257532815 }, "url": "https://www.semanticscholar.org/paper/163b4d6a79a5b19af88b8585456363340d9efd04", "referenceCount": 0, "citationCount": 7054, "influentialCitationCount": 1038, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "LLaMA: Open and Efficient Foundation Language Models", "abstract": "We introduce LLaMA, a collection of foundation language models ranging from 7B to 65B parameters. We train our models on trillions of tokens, and show that it is possible to train state-of-the-art models using publicly available datasets exclusively, without resorting to proprietary and inaccessible datasets. In particular, LLaMA-13B outperforms GPT-3 (175B) on most benchmarks, and LLaMA-65B is competitive with the best models, Chinchilla-70B and PaLM-540B. We release all our models to the research community.", "year": 2023, "venue": "arXiv.org", "authors": [ "Hugo Touvron", "Thibaut Lavril", "Gautier Izacard", "Xavier Martinet", "Marie-Anne Lachaux", "Timothée Lacroix", "Baptiste Rozière", "Naman Goyal", "Eric Hambro", "Faisal Azhar", "Aurelien Rodriguez", "Armand Joulin", "Edouard Grave", "Guillaume Lample" ], "externalIds": { "DBLP": "journals/corr/abs-2302-13971", "ArXiv": "2302.13971", "CorpusId": 257219404 }, "url": "https://www.semanticscholar.org/paper/57e849d0de13ed5f91d086936296721d4ff75a75", "referenceCount": 80, "citationCount": 8031, "influentialCitationCount": 1073, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "On the Opportunities and Risks of Foundation Models", "abstract": "AI is undergoing a paradigm shift with the rise of models (e.g., BERT, DALL-E, GPT-3) that are trained on broad data at scale and are adaptable to a wide range of downstream tasks. We call these models foundation models to underscore their critically central yet incomplete character. This report provides a thorough account of the opportunities and risks of foundation models, ranging from their capabilities (e.g., language, vision, robotics, reasoning, human interaction) and technical principles(e.g., model architectures, training procedures, data, systems, security, evaluation, theory) to their applications (e.g., law, healthcare, education) and societal impact (e.g., inequity, misuse, economic and environmental impact, legal and ethical considerations). Though foundation models are based on standard deep learning and transfer learning, their scale results in new emergent capabilities,and their effectiveness across so many tasks incentivizes homogenization. Homogenization provides powerful leverage but demands caution, as the defects of the foundation model are inherited by all the adapted models downstream. Despite the impending widespread deployment of foundation models, we currently lack a clear understanding of how they work, when they fail, and what they are even capable of due to their emergent properties. To tackle these questions, we believe much of the critical research on foundation models will require deep interdisciplinary collaboration commensurate with their fundamentally sociotechnical nature.", "year": 2021, "venue": "arXiv.org", "authors": [ "Rishi Bommasani", "Drew A. Hudson", "E. Adeli", "R. Altman", "Simran Arora", "Sydney von Arx", "Michael S. Bernstein", "J. Bohg", "Antoine Bosselut", "E. Brunskill", "Erik Brynjolfsson", "S. Buch", "Dallas Card", "Rodrigo Castellon", "Niladri S. Chatterji", "Annie S. Chen", "Kathleen A. Creel", "Jared Davis", "Dora Demszky", "Chris Donahue", "M. Doumbouya", "Esin Durmus", "Stefano Ermon", "J. Etchemendy", "Kawin Ethayarajh", "L. Fei-Fei", "Chelsea Finn", "Trevor Gale", "Lauren Gillespie", "Karan Goel", "Noah D. Goodman", "S. Grossman", "Neel Guha", "Tatsunori Hashimoto", "Peter Henderson", "John Hewitt", "Daniel E. Ho", "Jenny Hong", "Kyle Hsu", "Jing Huang", "Thomas F. Icard", "Saahil Jain", "Dan Jurafsky", "Pratyusha Kalluri", "Siddharth Karamcheti", "G. Keeling", "Fereshte Khani", "O. Khattab", "Pang Wei Koh", "M. Krass", "Ranjay Krishna", "Rohith Kuditipudi", "Ananya Kumar", "Faisal Ladhak", "Mina Lee", "Tony Lee", "J. Leskovec", "Isabelle Levent", "Xiang Lisa Li", "Xuechen Li", "Tengyu Ma", "Ali Malik", "Christopher D. Manning", "Suvir Mirchandani", "E. Mitchell", "Zanele Munyikwa", "Suraj Nair", "A. Narayan", "D. Narayanan", "Benjamin Newman", "Allen Nie", "Juan Carlos Niebles", "H. Nilforoshan", "J. Nyarko", "Giray Ogut", "Laurel J. Orr", "Isabel Papadimitriou", "J. Park", "C. Piech", "Eva Portelance", "Christopher Potts", "Aditi Raghunathan", "Robert Reich", "Hongyu Ren", "Frieda Rong", "Yusuf Roohani", "Camilo Ruiz", "Jack Ryan", "Christopher R'e", "Dorsa Sadigh", "Shiori Sagawa", "Keshav Santhanam", "Andy Shih", "K. Srinivasan", "Alex Tamkin", "Rohan Taori", "A. Thomas", "Florian Tramèr", "Rose E. Wang", "William Wang", "Bohan Wu", "Jiajun Wu", "Yuhuai Wu", "Sang Michael Xie", "Michihiro Yasunaga", "Jiaxuan You", "M. Zaharia", "Michael Zhang", "Tianyi Zhang", "Xikun Zhang", "Yuhui Zhang", "Lucia Zheng", "Kaitlyn Zhou", "Percy Liang" ], "externalIds": { "ArXiv": "2108.07258", "DBLP": "journals/corr/abs-2108-07258", "CorpusId": 237091588 }, "url": "https://www.semanticscholar.org/paper/76e9e2ec3de437ffb30d8b7b629f7fe3e61de5c2", "referenceCount": 0, "citationCount": 3226, "influentialCitationCount": 144, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Demystifying the Draft EU Artificial Intelligence Act — Analysing the good, the bad, and the unclear elements of the proposed approach", "abstract": "In April 2021, the European Commission proposed a Regulation on Artificial Intelligence, known as the AI Act. We present an overview of the Act and analyse its implications, drawing on scholarship ranging from the study of contemporary AI practices to the structure of EU product safety regimes over the last four decades. Aspects of the AI Act, such as different rules for different risk-levels of AI, make sense. But we also find that some provisions of the draft AI Act have surprising legal implications, whilst others may be largely ineffective at achieving their stated goals. Several overarching aspects, including the enforcement regime and the effect of maximum harmonisation on the space for AI policy more generally, engender significant concern. These issues should be addressed as a priority in the legislative process.", "year": 2021, "venue": "Computer Law Review International", "authors": [ "Michael Veale", "Frederik J. Zuiderveen Borgesius" ], "externalIds": { "DBLP": "journals/corr/abs-2107-03721", "ArXiv": "2107.03721", "DOI": "10.9785/cri-2021-220402", "CorpusId": 235765823 }, "url": "https://www.semanticscholar.org/paper/8b165eba2d0b9308682fdc4d775c00d1d3907a59", "referenceCount": 75, "citationCount": 267, "influentialCitationCount": 13, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The Flores-101 Evaluation Benchmark for Low-Resource and Multilingual Machine Translation", "abstract": "One of the biggest challenges hindering progress in low-resource and multilingual machine translation is the lack of good evaluation benchmarks. Current evaluation benchmarks either lack good coverage of low-resource languages, consider only restricted domains, or are low quality because they are constructed using semi-automatic procedures. In this work, we introduce the Flores-101 evaluation benchmark, consisting of 3001 sentences extracted from English Wikipedia and covering a variety of different topics and domains. These sentences have been translated in 101 languages by professional translators through a carefully controlled process. The resulting dataset enables better assessment of model quality on the long tail of low-resource languages, including the evaluation of many-to-many multilingual translation systems, as all translations are fully aligned. By publicly releasing such a high-quality and high-coverage dataset, we hope to foster progress in the machine translation community and beyond.", "year": 2021, "venue": "Transactions of the Association for Computational Linguistics", "authors": [ "Naman Goyal", "Cynthia Gao", "Vishrav Chaudhary", "Peng-Jen Chen", "Guillaume Wenzek", "Da Ju", "Sanjan Krishnan", "Marc'Aurelio Ranzato", "Francisco Guzmán", "Angela Fan" ], "externalIds": { "ArXiv": "2106.03193", "ACL": "2022.tacl-1.30", "DBLP": "journals/tacl/GoyalGCCWJKRGF22", "DOI": "10.1162/tacl_a_00474", "CorpusId": 235358129 }, "url": "https://www.semanticscholar.org/paper/789b8487da7188442085983caba3ffaae05531e9", "referenceCount": 67, "citationCount": 432, "influentialCitationCount": 73, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Does BERT Pretrained on Clinical Notes Reveal Sensitive Data?", "abstract": "Large Transformers pretrained over clinical notes from Electronic Health Records (EHR) have afforded substantial gains in performance on predictive clinical tasks. The cost of training such models (and the necessity of data access to do so) coupled with their utility motivates parameter sharing, i.e., the release of pretrained models such as ClinicalBERT. While most efforts have used deidentified EHR, many researchers have access to large sets of sensitive, non-deidentified EHR with which they might train a BERT model (or similar). Would it be safe to release the weights of such a model if they did? In this work, we design a battery of approaches intended to recover Personal Health Information (PHI) from a trained BERT. Specifically, we attempt to recover patient names and conditions with which they are associated. We find that simple probing methods are not able to meaningfully extract sensitive information from BERT trained over the MIMIC-III corpus of EHR. However, more sophisticated “attacks” may succeed in doing so: To facilitate such research, we make our experimental setup and baseline probing models available at https://github.com/elehman16/exposing_patient_data_release.", "year": 2021, "venue": "North American Chapter of the Association for Computational Linguistics", "authors": [ "Eric P. Lehman", "Sarthak Jain", "Karl Pichotta", "Yoav Goldberg", "Byron C. Wallace" ], "externalIds": { "DBLP": "journals/corr/abs-2104-07762", "MAG": "3165327186", "ArXiv": "2104.07762", "ACL": "2021.naacl-main.73", "DOI": "10.18653/V1/2021.NAACL-MAIN.73", "CorpusId": 233289659 }, "url": "https://www.semanticscholar.org/paper/1d5c07e7415a7e9be078717197ddf9f3c70a2875", "referenceCount": 38, "citationCount": 107, "influentialCitationCount": 10, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "On the Dangers of Stochastic Parrots: Can Language Models Be Too Big? 🦜", "abstract": "The past 3 years of work in NLP have been characterized by the development and deployment of ever larger language models, especially for English. BERT, its variants, GPT-2/3, and others, most recently Switch-C, have pushed the boundaries of the possible both through architectural innovations and through sheer size. Using these pretrained models and the methodology of fine-tuning them for specific tasks, researchers have extended the state of the art on a wide array of tasks as measured by leaderboards on specific benchmarks for English. In this paper, we take a step back and ask: How big is too big? What are the possible risks associated with this technology and what paths are available for mitigating those risks? We provide recommendations including weighing the environmental and financial costs first, investing resources into curating and carefully documenting datasets rather than ingesting everything on the web, carrying out pre-development exercises evaluating how the planned approach fits into research and development goals and supports stakeholder values, and encouraging research directions beyond ever larger language models.", "year": 2021, "venue": "Conference on Fairness, Accountability and Transparency", "authors": [ "Emily M. Bender", "Timnit Gebru", "Angelina McMillan-Major", "Shmargaret Shmitchell" ], "externalIds": { "DBLP": "conf/fat/BenderGMS21", "DOI": "10.1145/3442188.3445922", "CorpusId": 262580630 }, "url": "https://www.semanticscholar.org/paper/ca2f1088d3e581b2c6c75cf0ebc96506d620f64d", "referenceCount": 164, "citationCount": 3180, "influentialCitationCount": 123, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Extracting Training Data from Large Language Models", "abstract": "It has become common to publish large (billion parameter) language models that have been trained on private datasets. This paper demonstrates that in such settings, an adversary can perform a training data extraction attack to recover individual training examples by querying the language model. \nWe demonstrate our attack on GPT-2, a language model trained on scrapes of the public Internet, and are able to extract hundreds of verbatim text sequences from the model's training data. These extracted examples include (public) personally identifiable information (names, phone numbers, and email addresses), IRC conversations, code, and 128-bit UUIDs. Our attack is possible even though each of the above sequences are included in just one document in the training data. \nWe comprehensively evaluate our extraction attack to understand the factors that contribute to its success. For example, we find that larger models are more vulnerable than smaller models. We conclude by drawing lessons and discussing possible safeguards for training large language models.", "year": 2020, "venue": "USENIX Security Symposium", "authors": [ "Nicholas Carlini", "Florian Tramèr", "Eric Wallace", "Matthew Jagielski", "Ariel Herbert-Voss", "Katherine Lee", "Adam Roberts", "Tom B. Brown", "D. Song", "Ú. Erlingsson", "Alina Oprea", "Colin Raffel" ], "externalIds": { "DBLP": "journals/corr/abs-2012-07805", "MAG": "3112689365", "ArXiv": "2012.07805", "CorpusId": 229156229 }, "url": "https://www.semanticscholar.org/paper/df7d26339adf4eb0c07160947b9d2973c24911ba", "referenceCount": 75, "citationCount": 1391, "influentialCitationCount": 133, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Reformulating Unsupervised Style Transfer as Paraphrase Generation", "abstract": "Modern NLP defines the task of style transfer as modifying the style of a given sentence without appreciably changing its semantics, which implies that the outputs of style transfer systems should be paraphrases of their inputs. However, many existing systems purportedly designed for style transfer inherently warp the input's meaning through attribute transfer, which changes semantic properties such as sentiment. In this paper, we reformulate unsupervised style transfer as a paraphrase generation problem, and present a simple methodology based on fine-tuning pretrained language models on automatically generated paraphrase data. Despite its simplicity, our method significantly outperforms state-of-the-art style transfer systems on both human and automatic evaluations. We also survey 23 style transfer papers and discover that existing automatic metrics can be easily gamed and propose fixed variants. Finally, we pivot to a more real-world style transfer setting by collecting a large dataset of 15M sentences in 11 diverse styles, which we use for an in-depth analysis of our system.", "year": 2020, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Kalpesh Krishna", "J. Wieting", "Mohit Iyyer" ], "externalIds": { "MAG": "3100727892", "ArXiv": "2010.05700", "DBLP": "conf/emnlp/KrishnaWI20", "ACL": "2020.emnlp-main.55", "DOI": "10.18653/v1/2020.emnlp-main.55", "CorpusId": 222291619 }, "url": "https://www.semanticscholar.org/paper/ccad27088b9098de4eaca8dc449b18766db4b3ab", "referenceCount": 112, "citationCount": 215, "influentialCitationCount": 41, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Language Models are Few-Shot Learners", "abstract": "Recent work has demonstrated substantial gains on many NLP tasks and benchmarks by pre-training on a large corpus of text followed by fine-tuning on a specific task. While typically task-agnostic in architecture, this method still requires task-specific fine-tuning datasets of thousands or tens of thousands of examples. By contrast, humans can generally perform a new language task from only a few examples or from simple instructions - something which current NLP systems still largely struggle to do. Here we show that scaling up language models greatly improves task-agnostic, few-shot performance, sometimes even reaching competitiveness with prior state-of-the-art fine-tuning approaches. Specifically, we train GPT-3, an autoregressive language model with 175 billion parameters, 10x more than any previous non-sparse language model, and test its performance in the few-shot setting. For all tasks, GPT-3 is applied without any gradient updates or fine-tuning, with tasks and few-shot demonstrations specified purely via text interaction with the model. GPT-3 achieves strong performance on many NLP datasets, including translation, question-answering, and cloze tasks, as well as several tasks that require on-the-fly reasoning or domain adaptation, such as unscrambling words, using a novel word in a sentence, or performing 3-digit arithmetic. At the same time, we also identify some datasets where GPT-3's few-shot learning still struggles, as well as some datasets where GPT-3 faces methodological issues related to training on large web corpora. Finally, we find that GPT-3 can generate samples of news articles which human evaluators have difficulty distinguishing from articles written by humans. We discuss broader societal impacts of this finding and of GPT-3 in general.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Tom B. Brown", "Benjamin Mann", "Nick Ryder", "Melanie Subbiah", "J. Kaplan", "Prafulla Dhariwal", "Arvind Neelakantan", "Pranav Shyam", "Girish Sastry", "Amanda Askell", "Sandhini Agarwal", "Ariel Herbert-Voss", "Gretchen Krueger", "T. Henighan", "R. Child", "A. Ramesh", "Daniel M. Ziegler", "Jeff Wu", "Clemens Winter", "Christopher Hesse", "Mark Chen", "Eric Sigler", "Ma-teusz Litwin", "Scott Gray", "B. Chess", "Jack Clark", "Christopher Berner", "Sam McCandlish", "Alec Radford", "I. Sutskever", "Dario Amodei" ], "externalIds": { "ArXiv": "2005.14165", "DBLP": "conf/nips/BrownMRSKDNSSAA20", "MAG": "3030163527", "CorpusId": 218971783 }, "url": "https://www.semanticscholar.org/paper/90abbc2cf38462b954ae1b772fac9532e2ccd8b0", "referenceCount": 146, "citationCount": 30854, "influentialCitationCount": 3529, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The future of digital health with federated learning", "abstract": null, "year": 2020, "venue": "npj Digital Medicine", "authors": [ "Nicola Rieke", "Jonny Hancox", "Wenqi Li", "F. Milletarì", "H. Roth", "Shadi Albarqouni", "S. Bakas", "M. Galtier", "B. Landman", "K. Maier-Hein", "S. Ourselin", "Micah J. Sheller", "Ronald M. Summers", "Andrew Trask", "Daguang Xu", "Maximilian Baust", "M. Cardoso" ], "externalIds": { "PubMedCentral": "7490367", "MAG": "3086590218", "DBLP": "journals/corr/abs-2003-08119", "ArXiv": "2003.08119", "DOI": "10.1038/s41746-020-00323-1", "CorpusId": 212747909, "PubMed": "33015372" }, "url": "https://www.semanticscholar.org/paper/b245c5d36702eaae0ff374fad92848024bc99534", "referenceCount": 82, "citationCount": 1367, "influentialCitationCount": 39, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Threats to Federated Learning: A Survey", "abstract": "With the emergence of data silos and popular privacy awareness, the traditional centralized approach of training artificial intelligence (AI) models is facing strong challenges. Federated learning (FL) has recently emerged as a promising solution under this new reality. Existing FL protocol design has been shown to exhibit vulnerabilities which can be exploited by adversaries both within and without the system to compromise data privacy. It is thus of paramount importance to make FL system designers to be aware of the implications of future FL algorithm design on privacy-preservation. Currently, there is no survey on this topic. In this paper, we bridge this important gap in FL literature. By providing a concise introduction to the concept of FL, and a unique taxonomy covering threat models and two major attacks on FL: 1) poisoning attacks and 2) inference attacks, this paper provides an accessible review of this important topic. We highlight the intuitions, key techniques as well as fundamental assumptions adopted by various attacks, and discuss promising future research directions towards more robust privacy preservation in FL.", "year": 2020, "venue": "arXiv.org", "authors": [ "Lingjuan Lyu", "Han Yu", "Qiang Yang" ], "externalIds": { "ArXiv": "2003.02133", "MAG": "3010262580", "DBLP": "journals/corr/abs-2003-02133", "CorpusId": 211990905 }, "url": "https://www.semanticscholar.org/paper/f3b684f3d2ddd29134c842f6d31664157703a089", "referenceCount": 46, "citationCount": 393, "influentialCitationCount": 21, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "The EU General Data Protection Regulation (GDPR)", "abstract": "This new book provides an article-by-article commentary on the new EU General Data Protection Regulation. Adopted in April 2016 and applicable from May 2018, the GDPR is the centrepiece of the recent reform of the EU regulatory framework for protection of personal data. It replaces the 1995 EU Data Protection Directive and has become the most significant piece of data protection legislation anywhere in the world. This book is edited by three leading authorities and written by a team of expert specialists in the field from around the EU and representing different sectors (including academia, the EU institutions, data protection authorities, and the private sector), thus providing a pan-European analysis of the GDPR. It examines each article of the GDPR in sequential order and explains how its provisions work, thus allowing the reader to easily and quickly elucidate the meaning of individual articles. An introductory chapter provides an overview of the background to the GDPR and its place in the greater structure of EU law and human rights law. Account is also taken of closely linked legal instruments, such as the Directive on Data Protection and Law Enforcement that was adopted concurrently with the GDPR, and of the ongoing work on the proposed new E-Privacy Regulation.", "year": 2020, "venue": "", "authors": [], "externalIds": { "DOI": "10.1093/oso/9780198826491.001.0001", "CorpusId": 169098226 }, "url": "https://www.semanticscholar.org/paper/37f7b0261170f60d3b5c33da660d7a6889037cfc", "referenceCount": 0, "citationCount": 1332, "influentialCitationCount": 72, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "Human-Centered Artificial Intelligence: Reliable, Safe & Trustworthy", "abstract": "ABSTRACT Well-designed technologies that offer high levels of human control and high levels of computer automation can increase human performance, leading to wider adoption. The Human-Centered Artificial Intelligence (HCAI) framework clarifies how to (1) design for high levels of human control and high levels of computer automation so as to increase human performance, (2) understand the situations in which full human control or full computer control are necessary, and (3) avoid the dangers of excessive human control or excessive computer control. The methods of HCAI are more likely to produce designs that are Reliable, Safe & Trustworthy (RST). Achieving these goals will dramatically increase human performance, while supporting human self-efficacy, mastery, creativity, and responsibility.", "year": 2020, "venue": "International journal of human computer interactions", "authors": [ "B. Shneiderman" ], "externalIds": { "MAG": "3008620093", "DBLP": "journals/ijhci/Shneiderman20", "ArXiv": "2002.04087", "DOI": "10.1080/10447318.2020.1741118", "CorpusId": 211259461 }, "url": "https://www.semanticscholar.org/paper/e49f67fa5c946ad24afcf59699a9cacf1ca53924", "referenceCount": 74, "citationCount": 511, "influentialCitationCount": 32, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Recommendation of the Council on Artificial Intelligence (OECD)", "abstract": "On May 22, 2019, the Organisation for Economic Co-operation and Development (OECD) Ministerial Council Meeting adopted the Recommendation on Artificial Intelligence, signed by all 36 OECD member countries and non-member countries Argentina, Brazil, Columbia, Costa Rica, Peru, and Romania. Its aim is to foster innovation and trust in artificial intelligence (AI) by promoting the “responsible stewardship of trustworthy AI.”", "year": 2020, "venue": "International Legal Materials", "authors": [ "K. Yeung" ], "externalIds": { "MAG": "3010919171", "DOI": "10.1017/ilm.2020.5", "CorpusId": 216239761 }, "url": "https://www.semanticscholar.org/paper/04c902a91806288af4c7646e95cc2c94d9f15d97", "referenceCount": 9, "citationCount": 339, "influentialCitationCount": 18, "isOpenAccess": false, "fieldsOfStudy": [ "Engineering" ] }, { "title": "Scaling Laws for Neural Language Models", "abstract": "We study empirical scaling laws for language model performance on the cross-entropy loss. The loss scales as a power-law with model size, dataset size, and the amount of compute used for training, with some trends spanning more than seven orders of magnitude. Other architectural details such as network width or depth have minimal effects within a wide range. Simple equations govern the dependence of overfitting on model/dataset size and the dependence of training speed on model size. These relationships allow us to determine the optimal allocation of a fixed compute budget. Larger models are significantly more sample-efficient, such that optimally compute-efficient training involves training very large models on a relatively modest amount of data and stopping significantly before convergence.", "year": 2020, "venue": "arXiv.org", "authors": [ "J. Kaplan", "Sam McCandlish", "T. Henighan", "Tom B. Brown", "B. Chess", "R. Child", "Scott Gray", "Alec Radford", "Jeff Wu", "Dario Amodei" ], "externalIds": { "MAG": "3001279689", "ArXiv": "2001.08361", "DBLP": "journals/corr/abs-2001-08361", "CorpusId": 210861095 }, "url": "https://www.semanticscholar.org/paper/e6c561d02500b2596a230b341a8eb8b921ca5bf2", "referenceCount": 59, "citationCount": 3074, "influentialCitationCount": 266, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Questioning the AI: Informing Design Practices for Explainable AI User Experiences", "abstract": "A surge of interest in explainable AI (XAI) has led to a vast collection of algorithmic work on the topic. While many recognize the necessity to incorporate explainability features in AI systems, how to address real-world user needs for understanding AI remains an open question. By interviewing 20 UX and design practitioners working on various AI products, we seek to identify gaps between the current XAI algorithmic work and practices to create explainable AI products. To do so, we develop an algorithm-informed XAI question bank in which user needs for explainability are represented as prototypical questions users might ask about the AI, and use it as a study probe. Our work contributes insights into the design space of XAI, informs efforts to support design practices in this space, and identifies opportunities for future XAI work. We also provide an extended XAI question bank and discuss how it can be used for creating user-centered XAI.", "year": 2020, "venue": "International Conference on Human Factors in Computing Systems", "authors": [ "Q. Liao", "D. Gruen", "Sarah Miller" ], "externalIds": { "DBLP": "journals/corr/abs-2001-02478", "ArXiv": "2001.02478", "MAG": "2999765337", "DOI": "10.1145/3313831.3376590", "CorpusId": 210064344 }, "url": "https://www.semanticscholar.org/paper/7d089d4cc4aff5c10c1704f02119e2487fc898c9", "referenceCount": 107, "citationCount": 584, "influentialCitationCount": 51, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension", "abstract": "We present BART, a denoising autoencoder for pretraining sequence-to-sequence models. BART is trained by (1) corrupting text with an arbitrary noising function, and (2) learning a model to reconstruct the original text. It uses a standard Tranformer-based neural machine translation architecture which, despite its simplicity, can be seen as generalizing BERT (due to the bidirectional encoder), GPT (with the left-to-right decoder), and other recent pretraining schemes. We evaluate a number of noising approaches, finding the best performance by both randomly shuffling the order of sentences and using a novel in-filling scheme, where spans of text are replaced with a single mask token. BART is particularly effective when fine tuned for text generation but also works well for comprehension tasks. It matches the performance of RoBERTa on GLUE and SQuAD, and achieves new state-of-the-art results on a range of abstractive dialogue, question answering, and summarization tasks, with gains of up to 3.5 ROUGE. BART also provides a 1.1 BLEU increase over a back-translation system for machine translation, with only target language pretraining. We also replicate other pretraining schemes within the BART framework, to understand their effect on end-task performance.", "year": 2019, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "M. Lewis", "Yinhan Liu", "Naman Goyal", "Marjan Ghazvininejad", "Abdel-rahman Mohamed", "Omer Levy", "Veselin Stoyanov", "Luke Zettlemoyer" ], "externalIds": { "MAG": "2982399380", "DBLP": "conf/acl/LewisLGGMLSZ20", "ACL": "2020.acl-main.703", "ArXiv": "1910.13461", "DOI": "10.18653/v1/2020.acl-main.703", "CorpusId": 204960716 }, "url": "https://www.semanticscholar.org/paper/395de0bd3837fdf4b4b5e5f04835bcc69c279481", "referenceCount": 36, "citationCount": 9214, "influentialCitationCount": 1963, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer", "abstract": "Transfer learning, where a model is first pre-trained on a data-rich task before being fine-tuned on a downstream task, has emerged as a powerful technique in natural language processing (NLP). The effectiveness of transfer learning has given rise to a diversity of approaches, methodology, and practice. In this paper, we explore the landscape of transfer learning techniques for NLP by introducing a unified framework that converts every language problem into a text-to-text format. Our systematic study compares pre-training objectives, architectures, unlabeled datasets, transfer approaches, and other factors on dozens of language understanding tasks. By combining the insights from our exploration with scale and our new \"Colossal Clean Crawled Corpus\", we achieve state-of-the-art results on many benchmarks covering summarization, question answering, text classification, and more. To facilitate future work on transfer learning for NLP, we release our dataset, pre-trained models, and code.", "year": 2019, "venue": "Journal of machine learning research", "authors": [ "Colin Raffel", "Noam M. Shazeer", "Adam Roberts", "Katherine Lee", "Sharan Narang", "Michael Matena", "Yanqi Zhou", "Wei Li", "Peter J. Liu" ], "externalIds": { "MAG": "2981852735", "DBLP": "journals/corr/abs-1910-10683", "ArXiv": "1910.10683", "CorpusId": 204838007 }, "url": "https://www.semanticscholar.org/paper/6c4b76232bb72897685d19b3d264c6ee3005bc2b", "referenceCount": 134, "citationCount": 15984, "influentialCitationCount": 2029, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "A Survey on Bias and Fairness in Machine Learning", "abstract": "With the widespread use of artificial intelligence (AI) systems and applications in our everyday lives, accounting for fairness has gained significant importance in designing and engineering of such systems. AI systems can be used in many sensitive environments to make important and life-changing decisions; thus, it is crucial to ensure that these decisions do not reflect discriminatory behavior toward certain groups or populations. More recently some work has been developed in traditional machine learning and deep learning that address such challenges in different subdomains. With the commercialization of these systems, researchers are becoming more aware of the biases that these applications can contain and are attempting to address them. In this survey, we investigated different real-world applications that have shown biases in various ways, and we listed different sources of biases that can affect AI applications. We then created a taxonomy for fairness definitions that machine learning researchers have defined to avoid the existing bias in AI systems. In addition to that, we examined different domains and subdomains in AI showing what researchers have observed with regard to unfair outcomes in the state-of-the-art methods and ways they have tried to address them. There are still many future directions and solutions that can be taken to mitigate the problem of bias in AI systems. We are hoping that this survey will motivate researchers to tackle these issues in the near future by observing existing work in their respective fields.", "year": 2019, "venue": "ACM Computing Surveys", "authors": [ "Ninareh Mehrabi", "Fred Morstatter", "N. Saxena", "Kristina Lerman", "A. Galstyan" ], "externalIds": { "ArXiv": "1908.09635", "MAG": "2969896603", "DBLP": "journals/csur/MehrabiMSLG21", "DOI": "10.1145/3457607", "CorpusId": 201666566 }, "url": "https://www.semanticscholar.org/paper/0090023afc66cd2741568599057f4e82b566137c", "referenceCount": 188, "citationCount": 3354, "influentialCitationCount": 221, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Universal Adversarial Triggers for Attacking and Analyzing NLP", "abstract": "Adversarial examples highlight model vulnerabilities and are useful for evaluation and interpretation. We define universal adversarial triggers: input-agnostic sequences of tokens that trigger a model to produce a specific prediction when concatenated to any input from a dataset. We propose a gradient-guided search over tokens which finds short trigger sequences (e.g., one word for classification and four words for language modeling) that successfully trigger the target prediction. For example, triggers cause SNLI entailment accuracy to drop from 89.94% to 0.55%, 72% of “why” questions in SQuAD to be answered “to kill american people”, and the GPT-2 language model to spew racist output even when conditioned on non-racial contexts. Furthermore, although the triggers are optimized using white-box access to a specific model, they transfer to other models for all tasks we consider. Finally, since triggers are input-agnostic, they provide an analysis of global model behavior. For instance, they confirm that SNLI models exploit dataset biases and help to diagnose heuristics learned by reading comprehension models.", "year": 2019, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Eric Wallace", "Shi Feng", "Nikhil Kandpal", "Matt Gardner", "Sameer Singh" ], "externalIds": { "MAG": "2970290563", "ArXiv": "1908.07125", "DBLP": "conf/emnlp/WallaceFKGS19", "ACL": "D19-1221", "DOI": "10.18653/v1/D19-1221", "CorpusId": 201698258 }, "url": "https://www.semanticscholar.org/paper/3caf34532597683c980134579b156cd0d7db2f40", "referenceCount": 38, "citationCount": 724, "influentialCitationCount": 69, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "RoBERTa: A Robustly Optimized BERT Pretraining Approach", "abstract": "Language model pretraining has led to significant performance gains but careful comparison between different approaches is challenging. Training is computationally expensive, often done on private datasets of different sizes, and, as we will show, hyperparameter choices have significant impact on the final results. We present a replication study of BERT pretraining (Devlin et al., 2019) that carefully measures the impact of many key hyperparameters and training data size. We find that BERT was significantly undertrained, and can match or exceed the performance of every model published after it. Our best model achieves state-of-the-art results on GLUE, RACE and SQuAD. These results highlight the importance of previously overlooked design choices, and raise questions about the source of recently reported improvements. We release our models and code.", "year": 2019, "venue": "arXiv.org", "authors": [ "Yinhan Liu", "Myle Ott", "Naman Goyal", "Jingfei Du", "Mandar Joshi", "Danqi Chen", "Omer Levy", "M. Lewis", "Luke Zettlemoyer", "Veselin Stoyanov" ], "externalIds": { "DBLP": "journals/corr/abs-1907-11692", "MAG": "2965373594", "ArXiv": "1907.11692", "CorpusId": 198953378 }, "url": "https://www.semanticscholar.org/paper/077f8329a7b6fa3b7c877a57b81eb6c18b5f87de", "referenceCount": 68, "citationCount": 20963, "influentialCitationCount": 4860, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Estimating the success of re-identifications in incomplete datasets using generative models", "abstract": null, "year": 2019, "venue": "Nature Communications", "authors": [ "Luc Rocher", "J. Hendrickx", "Y. de Montjoye" ], "externalIds": { "PubMedCentral": "6650473", "MAG": "2963693643", "DOI": "10.1038/s41467-019-10933-3", "CorpusId": 198190707, "PubMed": "31337762" }, "url": "https://www.semanticscholar.org/paper/f34b6423878090c7422c078a3dd6a3db80fb7532", "referenceCount": 74, "citationCount": 530, "influentialCitationCount": 22, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "DARPA's Explainable Artificial Intelligence (XAI) Program", "abstract": "Dramatic success in machine learning has led to a new wave of AI applications (for example, transportation, security, medicine, finance, defense) that offer tremendous benefits but cannot explain their decisions and actions to human users. DARPA’s explainable artificial intelligence (XAI) program endeavors to create AI systems whose learned models and decisions can be understood and appropriately trusted by end users. Realizing this goal requires methods for learning more explainable models, designing effective explanation interfaces, and understanding the psychologic requirements for effective explanations. The XAI developer teams are addressing the first two challenges by creating ML techniques and developing principles, strategies, and human-computer interaction techniques for generating effective explanations. Another XAI team is addressing the third challenge by summarizing, extending, and applying psychologic theories of explanation to help the XAI evaluator define a suitable evaluation framework, which the developer teams will use to test their systems. The XAI teams completed the first of this 4-year program in May 2018. In a series of ongoing evaluations, the developer teams are assessing how well their XAM systems’ explanations improve user understanding, user trust, and user task performance.", "year": 2019, "venue": "The AI Magazine", "authors": [ "David Gunning", "D. Aha" ], "externalIds": { "MAG": "2954503794", "DBLP": "journals/aim/GunningA19", "DOI": "10.1609/AIMAG.V40I2.2850", "CorpusId": 67773377 }, "url": "https://www.semanticscholar.org/paper/06645d735b59b14479ae1d0392136bbf44227d0f", "referenceCount": 32, "citationCount": 983, "influentialCitationCount": 61, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "XLNet: Generalized Autoregressive Pretraining for Language Understanding", "abstract": "With the capability of modeling bidirectional contexts, denoising autoencoding based pretraining like BERT achieves better performance than pretraining approaches based on autoregressive language modeling. However, relying on corrupting the input with masks, BERT neglects dependency between the masked positions and suffers from a pretrain-finetune discrepancy. In light of these pros and cons, we propose XLNet, a generalized autoregressive pretraining method that (1) enables learning bidirectional contexts by maximizing the expected likelihood over all permutations of the factorization order and (2) overcomes the limitations of BERT thanks to its autoregressive formulation. Furthermore, XLNet integrates ideas from Transformer-XL, the state-of-the-art autoregressive model, into pretraining. Empirically, under comparable experiment settings, XLNet outperforms BERT on 20 tasks, often by a large margin, including question answering, natural language inference, sentiment analysis, and document ranking.", "year": 2019, "venue": "Neural Information Processing Systems", "authors": [ "Zhilin Yang", "Zihang Dai", "Yiming Yang", "J. Carbonell", "R. Salakhutdinov", "Quoc V. Le" ], "externalIds": { "DBLP": "conf/nips/YangDYCSL19", "MAG": "2950813464", "ArXiv": "1906.08237", "CorpusId": 195069387 }, "url": "https://www.semanticscholar.org/paper/e0c6abdbdecf04ffac65c440da77fb9d66bb474c", "referenceCount": 47, "citationCount": 7672, "influentialCitationCount": 903, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "GLTR: Statistical Detection and Visualization of Generated Text", "abstract": "The rapid improvement of language models has raised the specter of abuse of text generation systems. This progress motivates the development of simple methods for detecting generated text that can be used by non-experts. In this work, we introduce GLTR, a tool to support humans in detecting whether a text was generated by a model. GLTR applies a suite of baseline statistical methods that can detect generation artifacts across multiple sampling schemes. In a human-subjects study, we show that the annotation scheme provided by GLTR improves the human detection-rate of fake text from 54% to 72% without any prior training. GLTR is open-source and publicly deployed, and has already been widely used to detect generated outputs.", "year": 2019, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Sebastian Gehrmann", "Hendrik Strobelt", "Alexander M. Rush" ], "externalIds": { "MAG": "2951080837", "DBLP": "conf/acl/GehrmannSR19", "ACL": "P19-3019", "ArXiv": "1906.04043", "DOI": "10.18653/v1/P19-3019", "CorpusId": 182952848 }, "url": "https://www.semanticscholar.org/paper/867db5097ad6aaef098c60b0845785b440eca49a", "referenceCount": 22, "citationCount": 404, "influentialCitationCount": 61, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "How to Fine-Tune BERT for Text Classification?", "abstract": null, "year": 2019, "venue": "China National Conference on Chinese Computational Linguistics", "authors": [ "Chi Sun", "Xipeng Qiu", "Yige Xu", "Xuanjing Huang" ], "externalIds": { "DBLP": "journals/corr/abs-1905-05583", "MAG": "2980708516", "ArXiv": "1905.05583", "DOI": "10.1007/978-3-030-32381-3_16", "CorpusId": 153312532 }, "url": "https://www.semanticscholar.org/paper/a022bda79947d1f656a1164003c1b3ae9a843df9", "referenceCount": 39, "citationCount": 1346, "influentialCitationCount": 105, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "SECNLP: A Survey of Embeddings in Clinical Natural Language Processing", "abstract": null, "year": 2019, "venue": "Journal of Biomedical Informatics", "authors": [ "Katikapalli Subramanyam Kalyan", "S. Sangeetha" ], "externalIds": { "DBLP": "journals/jbi/KalyanS20", "MAG": "2985932981", "ArXiv": "1903.01039", "DOI": "10.1016/J.JBI.2019.103323", "CorpusId": 67855323, "PubMed": "31711972" }, "url": "https://www.semanticscholar.org/paper/db9ff3080be1acac2f403d0c79c9ec776a3d3b5f", "referenceCount": 152, "citationCount": 77, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Actionable Auditing: Investigating the Impact of Publicly Naming Biased Performance Results of Commercial AI Products", "abstract": "Although algorithmic auditing has emerged as a key strategy to expose systematic biases embedded in software platforms, we struggle to understand the real-world impact of these audits, as scholarship on the impact of algorithmic audits on increasing algorithmic fairness and transparency in commercial systems is nascent. To analyze the impact of publicly naming and disclosing performance results of biased AI systems, we investigate the commercial impact of Gender Shades, the first algorithmic audit of gender and skin type performance disparities in commercial facial analysis models. This paper 1) outlines the audit design and structured disclosure procedure used in the Gender Shades study, 2) presents new performance metrics from targeted companies IBM, Microsoft and Megvii (Face++) on the Pilot Parliaments Benchmark (PPB) as of August 2018, 3) provides performance results on PPB by non-target companies Amazon and Kairos and, 4) explores differences in company responses as shared through corporate communications that contextualize differences in performance on PPB. Within 7 months of the original audit, we find that all three targets released new API versions. All targets reduced accuracy disparities between males and females and darker and lighter-skinned subgroups, with the most significant update occurring for the darker-skinned female subgroup, that underwent a 17.7% - 30.4% reduction in error between audit periods. Minimizing these disparities led to a 5.72% to 8.3% reduction in overall error on the Pilot Parliaments Benchmark (PPB) for target corporation APIs. The overall performance of non-targets Amazon and Kairos lags significantly behind that of the targets, with error rates of 8.66% and 6.60% overall, and error rates of 31.37% and 22.50% for the darker female subgroup, respectively.", "year": 2019, "venue": "AAAI/ACM Conference on AI, Ethics, and Society", "authors": [ "Inioluwa Deborah Raji", "Joy Buolamwini" ], "externalIds": { "MAG": "2962059918", "DBLP": "conf/aies/RajiB19", "DOI": "10.1145/3306618.3314244", "CorpusId": 91168921 }, "url": "https://www.semanticscholar.org/paper/5c5105eb4923932f6489b69a7651bff5cbcb77fe", "referenceCount": 38, "citationCount": 430, "influentialCitationCount": 17, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "On Using Linear Diophantine Equations for in-Parallel Hiding of Decision Tree Rules", "abstract": "Data sharing among organizations has become an increasingly common procedure in several areas such as advertising, marketing, electronic commerce, banking, and insurance sectors. However, any organization will most likely try to keep some patterns as hidden as possible once it shares its datasets with others. This paper focuses on preserving the privacy of sensitive patterns when inducing decision trees. We adopt a record augmentation approach to hide critical classification rules in binary datasets. Such a hiding methodology is preferred over other heuristic solutions like output perturbation or cryptographic techniques, which limit the usability of the data, since the raw data itself is readily available for public use. We propose a look ahead technique using linear Diophantine equations to add the appropriate number of instances while maintaining the initial entropy of the nodes. This method can be used to hide one or more decision tree rules optimally.", "year": 2019, "venue": "Entropy", "authors": [ "G. Feretzakis", "Dimitris Kalles", "V. Verykios" ], "externalIds": { "PubMedCentral": "7514174", "MAG": "2909893962", "DBLP": "journals/entropy/FeretzakisKV19", "DOI": "10.3390/e21010066", "CorpusId": 67772769, "PubMed": "33266782" }, "url": "https://www.semanticscholar.org/paper/7adb67b4023b2fddb55813ec546b0fcfeeef29ff", "referenceCount": 23, "citationCount": 10, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Healthcare Data Breaches: Implications for Digital Forensic Readiness", "abstract": null, "year": 2018, "venue": "Journal of medical systems", "authors": [ "M. Chernyshev", "S. Zeadally", "Zubair A. Baig" ], "externalIds": { "MAG": "2902689954", "DBLP": "journals/jms/ChernyshevZB19", "DOI": "10.1007/s10916-018-1123-2", "CorpusId": 53822817, "PubMed": "30488291" }, "url": "https://www.semanticscholar.org/paper/382e03b9f7fd05b229144566a22f31016d451d84", "referenceCount": 32, "citationCount": 75, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "On Using Linear Diophantine Equations for Efficient Hiding of Decision Tree Rules", "abstract": "Data sharing among organizations has become an increasingly common procedure in several areas like advertising, marketing, e-commerce and banking, but any organization will probably attempt to keep some patterns as hidden as possible when it shares its datasets with others. This paper focuses on preserving the privacy of sensitive patterns when inducing decision trees. We adopt a record augmentation approach for hiding sensitive classification rules in binary datasets. Such a hiding methodology is preferred over other heuristic solutions like output perturbation or crypto-graphic techniques - which restrict the usability of the data - since the raw data itself is readily available for public use. We propose a look-ahead approach using linear Diophantine equations in order to add the appropriate number of instances while maintaining the initial entropy of the nodes. This technique can be used to hide one or more decision tree rules in an optimal way.", "year": 2018, "venue": "Hellenic Conference on Artificial Intelligence", "authors": [ "G. Feretzakis", "Dimitris Kalles", "V. Verykios" ], "externalIds": { "DBLP": "conf/setn/FeretzakisKV18", "MAG": "2830176335", "DOI": "10.1145/3200947.3201030", "CorpusId": 49669645 }, "url": "https://www.semanticscholar.org/paper/655fe7bb228bef1b6556e54e9379f9459c232f1a", "referenceCount": 22, "citationCount": 7, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Delete, Retrieve, Generate: a Simple Approach to Sentiment and Style Transfer", "abstract": "We consider the task of text attribute transfer: transforming a sentence to alter a specific attribute (e.g., sentiment) while preserving its attribute-independent content (e.g., “screen is just the right size” to “screen is too small”). Our training data includes only sentences labeled with their attribute (e.g., positive and negative), but not pairs of sentences that only differ in the attributes, so we must learn to disentangle attributes from attribute-independent content in an unsupervised way. Previous work using adversarial methods has struggled to produce high-quality outputs. In this paper, we propose simpler methods motivated by the observation that text attributes are often marked by distinctive phrases (e.g., “too small”). Our strongest method extracts content words by deleting phrases associated with the sentence’s original attribute value, retrieves new phrases associated with the target attribute, and uses a neural model to fluently combine these into a final output. Based on human evaluation, our best method generates grammatical and appropriate responses on 22% more inputs than the best previous system, averaged over three attribute transfer datasets: altering sentiment of reviews on Yelp, altering sentiment of reviews on Amazon, and altering image captions to be more romantic or humorous.", "year": 2018, "venue": "North American Chapter of the Association for Computational Linguistics", "authors": [ "Juncen Li", "Robin Jia", "He He", "Percy Liang" ], "externalIds": { "DBLP": "journals/corr/abs-1804-06437", "ACL": "N18-1169", "ArXiv": "1804.06437", "MAG": "2963667126", "DOI": "10.18653/v1/N18-1169", "CorpusId": 4937880 }, "url": "https://www.semanticscholar.org/paper/1975ae6d8693eedfb07d5348798351fe51ab242b", "referenceCount": 23, "citationCount": 521, "influentialCitationCount": 155, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The Secret Sharer: Evaluating and Testing Unintended Memorization in Neural Networks", "abstract": "This paper describes a testing methodology for quantitatively assessing the risk that rare or unique training-data sequences are unintentionally memorized by generative sequence models---a common type of machine-learning model. Because such models are sometimes trained on sensitive data (e.g., the text of users' private messages), this methodology can benefit privacy by allowing deep-learning practitioners to select means of training that minimize such memorization. \nIn experiments, we show that unintended memorization is a persistent, hard-to-avoid issue that can have serious consequences. Specifically, for models trained without consideration of memorization, we describe new, efficient procedures that can extract unique, secret sequences, such as credit card numbers. We show that our testing strategy is a practical and easy-to-use first line of defense, e.g., by describing its application to quantitatively limit data exposure in Google's Smart Compose, a commercial text-completion neural network trained on millions of users' email messages.", "year": 2018, "venue": "USENIX Security Symposium", "authors": [ "Nicholas Carlini", "Chang Liu", "Ú. Erlingsson", "Jernej Kos", "D. Song" ], "externalIds": { "MAG": "2965267010", "ArXiv": "1802.08232", "DBLP": "conf/uss/Carlini0EKS19", "CorpusId": 170076423 }, "url": "https://www.semanticscholar.org/paper/520ec00dc35475e0554dbb72f27bd2eeb6f4191d", "referenceCount": 72, "citationCount": 950, "influentialCitationCount": 84, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Ethics in artificial intelligence: introduction to the special issue", "abstract": null, "year": 2018, "venue": "Ethics and Information Technology", "authors": [ "Virginia Dignum" ], "externalIds": { "DBLP": "journals/ethicsit/Dignum18", "MAG": "2790902688", "DOI": "10.1007/s10676-018-9450-z", "CorpusId": 3695868 }, "url": "https://www.semanticscholar.org/paper/b700356155318f6c577905c0fa1adb53aa25a270", "referenceCount": 3, "citationCount": 242, "influentialCitationCount": 8, "isOpenAccess": true, "fieldsOfStudy": [ "Sociology", "Computer Science" ] }, { "title": "Ethical Challenges in Data-Driven Dialogue Systems", "abstract": "The use of dialogue systems as a medium for human-machine interaction is an increasingly prevalent paradigm. A growing number of dialogue systems use conversation strategies that are learned from large datasets. There are well documented instances where interactions with these system have resulted in biased or even offensive conversations due to the data-driven training process. Here, we highlight potential ethical issues that arise in dialogue systems research, including: implicit biases in data-driven systems, the rise of adversarial examples, potential sources of privacy violations, safety concerns, special considerations for reinforcement learning systems, and reproducibility concerns. We also suggest areas stemming from these issues that deserve further investigation. Through this initial survey, we hope to spur research leading to robust, safe, and ethically sound dialogue systems.", "year": 2017, "venue": "AAAI/ACM Conference on AI, Ethics, and Society", "authors": [ "Peter Henderson", "Koustuv Sinha", "Nicolas Angelard-Gontier", "Nan Rosemary Ke", "G. Fried", "Ryan Lowe", "Joelle Pineau" ], "externalIds": { "MAG": "2951222957", "DBLP": "journals/corr/abs-1711-09050", "ArXiv": "1711.09050", "DOI": "10.1145/3278721.3278777", "CorpusId": 33499714 }, "url": "https://www.semanticscholar.org/paper/a24d72bd0d08d515cb3e26f94131d33ad6c861db", "referenceCount": 47, "citationCount": 154, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Fairer machine learning in the real world: Mitigating discrimination without collecting sensitive data", "abstract": "Decisions based on algorithmic, machine learning models can be unfair, reproducing biases in historical data used to train them. While computational techniques are emerging to address aspects of these concerns through communities such as discrimination-aware data mining (DADM) and fairness, accountability and transparency machine learning (FATML), their practical implementation faces real-world challenges. For legal, institutional or commercial reasons, organisations might not hold the data on sensitive attributes such as gender, ethnicity, sexuality or disability needed to diagnose and mitigate emergent indirect discrimination-by-proxy, such as redlining. Such organisations might also lack the knowledge and capacity to identify and manage fairness issues that are emergent properties of complex sociotechnical systems. This paper presents and discusses three potential approaches to deal with such knowledge and information deficits in the context of fairer machine learning. Trusted third parties could selectively store data necessary for performing discrimination discovery and incorporating fairness constraints into model-building in a privacy-preserving manner. Collaborative online platforms would allow diverse organisations to record, share and access contextual and experiential knowledge to promote fairness in machine learning systems. Finally, unsupervised learning and pedagogically interpretable algorithms might allow fairness hypotheses to be built for further selective testing and exploration. Real-world fairness challenges in machine learning are not abstract, constrained optimisation problems, but are institutionally and contextually grounded. Computational fairness tools are useful, but must be researched and developed in and with the messy contexts that will shape their deployment, rather than just for imagined situations. Not doing so risks real, near-term algorithmic harm.", "year": 2017, "venue": "Big Data & Society", "authors": [ "Michael Veale", "Reuben Binns" ], "externalIds": { "MAG": "2765146466", "DBLP": "journals/bigdatasociety/VealeB17", "DOI": "10.1177/2053951717743530", "CorpusId": 67356162 }, "url": "https://www.semanticscholar.org/paper/61d37ec780aba86806af77b36d513624ec1b66b2", "referenceCount": 114, "citationCount": 272, "influentialCitationCount": 21, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Machine Learning Models that Remember Too Much", "abstract": "Machine learning (ML) is becoming a commodity. Numerous ML frameworks and services are available to data holders who are not ML experts but want to train predictive models on their data. It is important that ML models trained on sensitive inputs (e.g., personal images or documents) not leak too much information about the training data. We consider a malicious ML provider who supplies model-training code to the data holder, does \\emph{not} observe the training, but then obtains white- or black-box access to the resulting model. In this setting, we design and implement practical algorithms, some of them very similar to standard ML techniques such as regularization and data augmentation, that \"memorize\" information about the training dataset in the model\\textemdash yet the model is as accurate and predictive as a conventionally trained model. We then explain how the adversary can extract memorized information from the model. We evaluate our techniques on standard ML tasks for image classification (CIFAR10), face recognition (LFW and FaceScrub), and text analysis (20 Newsgroups and IMDB). In all cases, we show how our algorithms create models that have high predictive power yet allow accurate extraction of subsets of their training data.", "year": 2017, "venue": "Conference on Computer and Communications Security", "authors": [ "Congzheng Song", "Thomas Ristenpart", "Vitaly Shmatikov" ], "externalIds": { "MAG": "2757528734", "DBLP": "conf/ccs/SongRS17", "ArXiv": "1709.07886", "DOI": "10.1145/3133956.3134077", "CorpusId": 2904063 }, "url": "https://www.semanticscholar.org/paper/18cfd4b9e35fb12fbebedb0fdc3f7811090372bf", "referenceCount": 66, "citationCount": 458, "influentialCitationCount": 24, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Explanation in Artificial Intelligence: Insights from the Social Sciences", "abstract": null, "year": 2017, "venue": "Artificial Intelligence", "authors": [ "Tim Miller" ], "externalIds": { "ArXiv": "1706.07269", "MAG": "2953283116", "DBLP": "journals/ai/Miller19", "DOI": "10.1016/J.ARTINT.2018.07.007", "CorpusId": 36024272 }, "url": "https://www.semanticscholar.org/paper/e89dfa306723e8ef031765e9c44e5f6f94fd8fda", "referenceCount": 200, "citationCount": 3692, "influentialCitationCount": 408, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep Reinforcement Learning from Human Preferences", "abstract": "For sophisticated reinforcement learning (RL) systems to interact usefully with real-world environments, we need to communicate complex goals to these systems. In this work, we explore goals defined in terms of (non-expert) human preferences between pairs of trajectory segments. We show that this approach can effectively solve complex RL tasks without access to the reward function, including Atari games and simulated robot locomotion, while providing feedback on less than one percent of our agent's interactions with the environment. This reduces the cost of human oversight far enough that it can be practically applied to state-of-the-art RL systems. To demonstrate the flexibility of our approach, we show that we can successfully train complex novel behaviors with about an hour of human time. These behaviors and environments are considerably more complex than any that have been previously learned from human feedback.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "P. Christiano", "J. Leike", "Tom B. Brown", "Miljan Martic", "S. Legg", "Dario Amodei" ], "externalIds": { "MAG": "2626804490", "DBLP": "journals/corr/abs-1706-03741", "ArXiv": "1706.03741", "CorpusId": 4787508 }, "url": "https://www.semanticscholar.org/paper/5bbb6f9a8204eb13070b6f033e61c84ef8ee68dd", "referenceCount": 45, "citationCount": 2301, "influentialCitationCount": 306, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Attention is All you Need", "abstract": "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "Ashish Vaswani", "Noam M. Shazeer", "Niki Parmar", "Jakob Uszkoreit", "Llion Jones", "Aidan N. Gomez", "Lukasz Kaiser", "Illia Polosukhin" ], "externalIds": { "MAG": "2963403868", "DBLP": "conf/nips/VaswaniSPUJGKP17", "ArXiv": "1706.03762", "CorpusId": 13756489 }, "url": "https://www.semanticscholar.org/paper/204e3073870fae3d05bcbc2f6a8e263d9b72e776", "referenceCount": 41, "citationCount": 104988, "influentialCitationCount": 15363, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep Mutual Learning", "abstract": "Model distillation is an effective and widely used technique to transfer knowledge from a teacher to a student network. The typical application is to transfer from a powerful large network or ensemble to a small network, in order to meet the low-memory or fast execution requirements. In this paper, we present a deep mutual learning (DML) strategy. Different from the one-way transfer between a static pre-defined teacher and a student in model distillation, with DML, an ensemble of students learn collaboratively and teach each other throughout the training process. Our experiments show that a variety of network architectures benefit from mutual learning and achieve compelling results on both category and instance recognition tasks. Surprisingly, it is revealed that no prior powerful teacher network is necessary - mutual learning of a collection of simple student networks works, and moreover outperforms distillation from a more powerful yet static teacher.", "year": 2017, "venue": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition", "authors": [ "Ying Zhang", "T. Xiang", "Timothy M. Hospedales", "Huchuan Lu" ], "externalIds": { "DBLP": "journals/corr/ZhangXHL17", "MAG": "2951168573", "ArXiv": "1706.00384", "DOI": "10.1109/CVPR.2018.00454", "CorpusId": 26071966 }, "url": "https://www.semanticscholar.org/paper/f06a12928307e17b1aff2b9f4a6c11791f19b6a7", "referenceCount": 42, "citationCount": 1490, "influentialCitationCount": 257, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Bag of Tricks for Efficient Text Classification", "abstract": "This paper explores a simple and efficient baseline for text classification. Our experiments show that our fast text classifier fastText is often on par with deep learning classifiers in terms of accuracy, and many orders of magnitude faster for training and evaluation. We can train fastText on more than one billion words in less than ten minutes using a standard multicore CPU, and classify half a million sentences among 312K classes in less than a minute.", "year": 2016, "venue": "Conference of the European Chapter of the Association for Computational Linguistics", "authors": [ "Armand Joulin", "Edouard Grave", "Piotr Bojanowski", "Tomas Mikolov" ], "externalIds": { "MAG": "2468328197", "ArXiv": "1607.01759", "DBLP": "journals/corr/JoulinGBM16", "ACL": "E17-2068", "DOI": "10.18653/V1/E17-2068", "CorpusId": 1210515 }, "url": "https://www.semanticscholar.org/paper/892e53fe5cd39f037cb2a961499f42f3002595dd", "referenceCount": 30, "citationCount": 4315, "influentialCitationCount": 426, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep Learning with Differential Privacy", "abstract": "Machine learning techniques based on neural networks are achieving remarkable results in a wide variety of domains. Often, the training of models requires large, representative datasets, which may be crowdsourced and contain sensitive information. The models should not expose private information in these datasets. Addressing this goal, we develop new algorithmic techniques for learning and a refined analysis of privacy costs within the framework of differential privacy. Our implementation and experiments demonstrate that we can train deep neural networks with non-convex objectives, under a modest privacy budget, and at a manageable cost in software complexity, training efficiency, and model quality.", "year": 2016, "venue": "Conference on Computer and Communications Security", "authors": [ "Martín Abadi", "Andy Chu", "I. Goodfellow", "H. B. McMahan", "Ilya Mironov", "Kunal Talwar", "Li Zhang" ], "externalIds": { "MAG": "3098586851", "ArXiv": "1607.00133", "DBLP": "conf/ccs/AbadiCGMMT016", "DOI": "10.1145/2976749.2978318", "CorpusId": 207241585 }, "url": "https://www.semanticscholar.org/paper/e9a986c8ff6c2f381d026fe014f6aaa865f34da7", "referenceCount": 63, "citationCount": 5238, "influentialCitationCount": 1065, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Neural Architectures for Named Entity Recognition", "abstract": "Comunicacio presentada a la 2016 Conference of the North American Chapter of the Association for Computational Linguistics, celebrada a San Diego (CA, EUA) els dies 12 a 17 de juny 2016.", "year": 2016, "venue": "North American Chapter of the Association for Computational Linguistics", "authors": [ "Guillaume Lample", "Miguel Ballesteros", "Sandeep Subramanian", "Kazuya Kawakami", "Chris Dyer" ], "externalIds": { "MAG": "2296283641", "DBLP": "conf/naacl/LampleBSKD16", "ACL": "N16-1030", "ArXiv": "1603.01360", "DOI": "10.18653/v1/N16-1030", "CorpusId": 6042994 }, "url": "https://www.semanticscholar.org/paper/f5a7da72496e2ca8edcd9f9123773012c010cfc6", "referenceCount": 49, "citationCount": 3858, "influentialCitationCount": 595, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Model Inversion Attacks that Exploit Confidence Information and Basic Countermeasures", "abstract": "Machine-learning (ML) algorithms are increasingly utilized in privacy-sensitive applications such as predicting lifestyle choices, making medical diagnoses, and facial recognition. In a model inversion attack, recently introduced in a case study of linear classifiers in personalized medicine by Fredrikson et al., adversarial access to an ML model is abused to learn sensitive genomic information about individuals. Whether model inversion attacks apply to settings outside theirs, however, is unknown. We develop a new class of model inversion attack that exploits confidence values revealed along with predictions. Our new attacks are applicable in a variety of settings, and we explore two in depth: decision trees for lifestyle surveys as used on machine-learning-as-a-service systems and neural networks for facial recognition. In both cases confidence values are revealed to those with the ability to make prediction queries to models. We experimentally show attacks that are able to estimate whether a respondent in a lifestyle survey admitted to cheating on their significant other and, in the other context, show how to recover recognizable images of people's faces given only their name and access to the ML model. We also initiate experimental exploration of natural countermeasures, investigating a privacy-aware decision tree training algorithm that is a simple variant of CART learning, as well as revealing only rounded confidence values. The lesson that emerges is that one can avoid these kinds of MI attacks with negligible degradation to utility.", "year": 2015, "venue": "Conference on Computer and Communications Security", "authors": [ "Matt Fredrikson", "S. Jha", "Thomas Ristenpart" ], "externalIds": { "DBLP": "conf/ccs/FredriksonJR15", "MAG": "2051267297", "DOI": "10.1145/2810103.2813677", "CorpusId": 207229839 }, "url": "https://www.semanticscholar.org/paper/d1b9a3b11e6c9571a1553556f82b605b2b4baec3", "referenceCount": 37, "citationCount": 2437, "influentialCitationCount": 182, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Privacy-preserving deep learning", "abstract": "Deep learning based on artificial neural networks is a very popular approach to modeling, classifying, and recognizing complex data such as images, speech, and text. The unprecedented accuracy of deep learning methods has turned them into the foundation of new AI-based services on the Internet. Commercial companies that collect user data on a large scale have been the main beneficiaries of this trend since the success of deep learning techniques is directly proportional to the amount of data available for training. Massive data collection required for deep learning presents obvious privacy issues. Users' personal, highly sensitive data such as photos and voice recordings is kept indefinitely by the companies that collect it. Users can neither delete it, nor restrict the purposes for which it is used. Furthermore, centrally kept data is subject to legal subpoenas and extrajudicial surveillance. Many data owners-for example, medical institutions that may want to apply deep learning methods to clinical records-are prevented by privacy and confidentiality concerns from sharing the data and thus benefitting from large-scale deep learning. In this paper, we present a practical system that enables multiple parties to jointly learn an accurate neural-network model for a given objective without sharing their input datasets. We exploit the fact that the optimization algorithms used in modern deep learning, namely, those based on stochastic gradient descent, can be parallelized and executed asynchronously. Our system lets participants train independently on their own datasets and selectively share small subsets of their models' key parameters during training. This offers an attractive point in the utility/privacy tradeoff space: participants preserve the privacy of their respective data while still benefitting from other participants' models and thus boosting their learning accuracy beyond what is achievable solely on their own inputs. We demonstrate the accuracy of our privacy-preserving deep learning on benchmark datasets.", "year": 2015, "venue": "Allerton Conference on Communication, Control, and Computing", "authors": [ "R. Shokri", "Vitaly Shmatikov" ], "externalIds": { "MAG": "2053637704", "DBLP": "conf/ccs/ShokriS15", "DOI": "10.1145/2810103.2813687", "CorpusId": 20714 }, "url": "https://www.semanticscholar.org/paper/f2f8f7a2ec1b2ede48cbcd189b376ab9fa0735ef", "referenceCount": 61, "citationCount": 2110, "influentialCitationCount": 169, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The Algorithmic Foundations of Differential Privacy", "abstract": "The problem of privacy-preserving data analysis has a long history spanning multiple disciplines. As electronic data about individuals becomes increasingly detailed, and as technology enables ever more powerful collection and curation of these data, the need increases for a robust, meaningful, and mathematically rigorous definition of privacy, together with a computationally rich class of algorithms that satisfy this definition. Differential Privacy is such a definition.After motivating and discussing the meaning of differential privacy, the preponderance of this monograph is devoted to fundamental techniques for achieving differential privacy, and application of these techniques in creative combinations, using the query-release problem as an ongoing example. A key point is that, by rethinking the computational goal, one can often obtain far better results than would be achieved by methodically replacing each step of a non-private computation with a differentially private implementation. Despite some astonishingly powerful computational results, there are still fundamental limitations — not just on what can be achieved with differential privacy but on what can be achieved with any method that protects against a complete breakdown in privacy. Virtually all the algorithms discussed herein maintain differential privacy against adversaries of arbitrary computational power. Certain algorithms are computationally intensive, others are efficient. Computational complexity for the adversary and the algorithm are both discussed.We then turn from fundamentals to applications other than queryrelease, discussing differentially private methods for mechanism design and machine learning. The vast majority of the literature on differentially private algorithms considers a single, static, database that is subject to many analyses. Differential privacy in other models, including distributed databases and computations on data streams is discussed.Finally, we note that this work is meant as a thorough introduction to the problems and techniques of differential privacy, but is not intended to be an exhaustive survey — there is by now a vast amount of work in differential privacy, and we can cover only a small portion of it.", "year": 2014, "venue": "Foundations and Trends® in Theoretical Computer Science", "authors": [ "C. Dwork", "Aaron Roth" ], "externalIds": { "DBLP": "journals/fttcs/DworkR14", "MAG": "2027595342", "DOI": "10.1561/0400000042", "CorpusId": 207178262 }, "url": "https://www.semanticscholar.org/paper/0023582fde36430c7e3ae81611a14e558c8f4bae", "referenceCount": 96, "citationCount": 6778, "influentialCitationCount": 1196, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Guide to Attribute Based Access Control (ABAC) Definition and Considerations", "abstract": "This document provides Federal agencies with a definition of attribute based access control (ABAC). ABAC is a logical access control methodology where authorization to perform a set of operations is determined by evaluating attributes associated with the subject, object, requested operations, and, in some cases, environment conditions against policy, rules, or relationships that describe the allowable operations for a given set of attributes. This document also provides considerations for using ABAC to improve information sharing within organizations and between organizations while maintaining control of that information.", "year": 2014, "venue": "", "authors": [ "Vincent C. Hu", "David F. Ferraiolo", "R. Kuhn", "Adam Schnitzer", "Kenneth Sandlin", "Robert Miller", "K. Scarfone" ], "externalIds": { "MAG": "2749040653", "DOI": "10.6028/NIST.SP.800-162", "CorpusId": 168659974 }, "url": "https://www.semanticscholar.org/paper/048b94e963a22c6f573dbf6ccf060ebfafd49456", "referenceCount": 22, "citationCount": 903, "influentialCitationCount": 114, "isOpenAccess": false, "fieldsOfStudy": [ "Business" ] }, { "title": "Hacking smart machines with smarter ones: How to extract meaningful data from machine learning classifiers", "abstract": "Machine Learning (ML) algorithms are used to train computers to perform a variety of complex tasks and improve with experience. Computers learn how to recognize patterns, make unintended decisions, or react to a dynamic environment. Certain trained machines may be more effective than others because they are based on more suitable ML algorithms or because they were trained through superior training sets. Although ML algorithms are known and publicly released, training sets may not be reasonably ascertainable and, indeed, may be guarded as trade secrets. While much research has been performed about the privacy of the elements of training sets, in this paper we focus our attention on ML classifiers and on the statistical information that can be unconsciously or maliciously revealed from them. We show that it is possible to infer unexpected but useful information from ML classifiers. In particular, we build a novel meta-classifier and train it to hack other classifiers, obtaining meaningful information about their training sets. This kind of information leakage can be exploited, for example, by a vendor to build more effective classifiers or to simply acquire trade secrets from a competitor's apparatus, potentially violating its intellectual property rights.", "year": 2013, "venue": "Int. J. Secur. Networks", "authors": [ "G. Ateniese", "L. Mancini", "A. Spognardi", "Antonio Villani", "Domenico Vitali", "G. Felici" ], "externalIds": { "MAG": "2962835266", "ArXiv": "1306.4447", "DBLP": "journals/corr/AtenieseFMSVV13", "DOI": "10.1504/IJSN.2015.071829", "CorpusId": 14757739 }, "url": "https://www.semanticscholar.org/paper/f63487b3fda2d96d8b3e97391448c76e00f2353c", "referenceCount": 78, "citationCount": 430, "influentialCitationCount": 37, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "A Unified Attribute-Based Access Control Model Covering DAC, MAC and RBAC", "abstract": null, "year": 2012, "venue": "Database Security", "authors": [ "Xin Jin", "R. Krishnan", "R. Sandhu" ], "externalIds": { "MAG": "2097171586", "DBLP": "conf/dbsec/JinKS12", "DOI": "10.1007/978-3-642-31540-4_4", "CorpusId": 12968879 }, "url": "https://www.semanticscholar.org/paper/da0eb5a0b11132adeadc15431ffb2d5b0dbac591", "referenceCount": 36, "citationCount": 430, "influentialCitationCount": 49, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Privacy in Social Networks", "abstract": null, "year": 2012, "venue": "Synthesis Lectures on Data Mining and Knowledge Discovery", "authors": [ "E. Zheleva", "Evimaria Terzi", "L. Getoor" ], "externalIds": { "MAG": "2106200908", "DBLP": "series/synthesis/2012Zheleva", "DOI": "10.1007/978-3-031-01901-2", "CorpusId": 31988495 }, "url": "https://www.semanticscholar.org/paper/67b77851af2a95984b5b0ac4542307e229954bd4", "referenceCount": 86, "citationCount": 47, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Survey of Data Leakage Detection and Prevention Solutions", "abstract": null, "year": 2012, "venue": "SpringerBriefs in Computer Science", "authors": [ "A. Shabtai", "Y. Elovici", "L. Rokach" ], "externalIds": { "MAG": "132347231", "DBLP": "series/sbcs/ShabtaiER12", "DOI": "10.1007/978-1-4614-2053-8", "CorpusId": 20507521 }, "url": "https://www.semanticscholar.org/paper/e018415b22c87dc95eefaefa533e30bad824ea97", "referenceCount": 72, "citationCount": 210, "influentialCitationCount": 10, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "A Systematic Review of Re-Identification Attacks on Health Data", "abstract": "Background Privacy legislation in most jurisdictions allows the disclosure of health data for secondary purposes without patient consent if it is de-identified. Some recent articles in the medical, legal, and computer science literature have argued that de-identification methods do not provide sufficient protection because they are easy to reverse. Should this be the case, it would have significant and important implications on how health information is disclosed, including: (a) potentially limiting its availability for secondary purposes such as research, and (b) resulting in more identifiable health information being disclosed. Our objectives in this systematic review were to: (a) characterize known re-identification attacks on health data and contrast that to re-identification attacks on other kinds of data, (b) compute the overall proportion of records that have been correctly re-identified in these attacks, and (c) assess whether these demonstrate weaknesses in current de-identification methods. Methods and Findings Searches were conducted in IEEE Xplore, ACM Digital Library, and PubMed. After screening, fourteen eligible articles representing distinct attacks were identified. On average, approximately a quarter of the records were re-identified across all studies (0.26 with 95% CI 0.046–0.478) and 0.34 for attacks on health data (95% CI 0–0.744). There was considerable uncertainty around the proportions as evidenced by the wide confidence intervals, and the mean proportion of records re-identified was sensitive to unpublished studies. Two of fourteen attacks were performed with data that was de-identified using existing standards. Only one of these attacks was on health data, which resulted in a success rate of 0.00013. Conclusions The current evidence shows a high re-identification rate but is dominated by small-scale studies on data that was not de-identified according to existing standards. This evidence is insufficient to draw conclusions about the efficacy of de-identification methods.", "year": 2011, "venue": "PLoS ONE", "authors": [ "Khaled El Emam", "Elizabeth Jonker", "Luk Arbuckle", "B. Malin" ], "externalIds": { "PubMedCentral": "3229505", "MAG": "2083384763", "DOI": "10.1371/journal.pone.0028071", "CorpusId": 28024953, "PubMed": "22164229" }, "url": "https://www.semanticscholar.org/paper/3803595fac5818f0fce6296a6e3afc414c280a5a", "referenceCount": 84, "citationCount": 183, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Privacy in the Digital Age: A Review of Information Privacy Research in Information Systems", "abstract": "Information privacy refers to the desire of individuals to control or have some influence over data about themselves. Advances in information technology have raised concerns about information privacy and its impacts, and have motivated Information Systems researchers to explore information privacy issues, including technical solutions to address these concerns. In this paper, we inform researchers about the current state of information privacy research in IS through a critical analysis of the IS literature that considers information privacy as a key construct. The review of the literature reveals that information privacy is a multilevel concept, but rarely studied as such. We also find that information privacy research has been heavily reliant on studentbased and USA-centric samples, which results in findings of limited generalizability. Information privacy research focuses on explaining and predicting theoretical contributions, with few studies in journal articles focusing on design and action contributions. We recommend that future research should consider different levels of analysis as well as multilevel effects of information privacy. We illustrate this with a multilevel framework for information privacy concerns. We call for research on information privacy to use a broader diversity of sampling populations, and for more design and action information privacy research to be published in journal articles that can result in IT artifacts for protection or control of information privacy.", "year": 2011, "venue": "MIS Q.", "authors": [ "F. Bélanger", "Robert E. Crossler" ], "externalIds": { "DBLP": "journals/misq/BelangerC11", "MAG": "1870146437", "DOI": "10.2307/41409971", "CorpusId": 261293914 }, "url": "https://www.semanticscholar.org/paper/b98e0e42fb045bb920e0564e1a03d6e9a9448ec9", "referenceCount": 481, "citationCount": 1206, "influentialCitationCount": 72, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Bayesian data analysis.", "abstract": "Bayesian methods have garnered huge interest in cognitive science as an approach to models of cognition and perception. On the other hand, Bayesian methods for data analysis have not yet made much headway in cognitive science against the institutionalized inertia of 20th century null hypothesis significance testing (NHST). Ironically, specific Bayesian models of cognition and perception may not long endure the ravages of empirical verification, but generic Bayesian methods for data analysis will eventually dominate. It is time that Bayesian data analysis became the norm for empirical methods in cognitive science. This article reviews a fatal flaw of NHST and introduces the reader to some benefits of Bayesian data analysis. The article presents illustrative examples of multiple comparisons in Bayesian analysis of variance and Bayesian approaches to statistical power. Copyright © 2010 John Wiley & Sons, Ltd. For further resources related to this article, please visit the WIREs website.", "year": 2010, "venue": "Wiley Interdisciplinary Reviews: Cognitive Science", "authors": [ "J. Kruschke" ], "externalIds": { "MAG": "1977234485", "DOI": "10.1002/wcs.72", "CorpusId": 7101807, "PubMed": "26271651" }, "url": "https://www.semanticscholar.org/paper/4e2f43dab69d690dc86422949e410ebf37f522d4", "referenceCount": 74, "citationCount": 10231, "influentialCitationCount": 1319, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Digital forensics research: The next 10 years", "abstract": null, "year": 2010, "venue": "Digital Investigation. The International Journal of Digital Forensics and Incident Response", "authors": [ "S. Garfinkel" ], "externalIds": { "MAG": "2156350103", "DBLP": "journals/di/Garfinkel10", "DOI": "10.1016/J.DIIN.2010.05.009", "CorpusId": 1864600 }, "url": "https://www.semanticscholar.org/paper/a5dc9c53a26e0f7262f0809d1650a387ae648b77", "referenceCount": 51, "citationCount": 669, "influentialCitationCount": 51, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Adding Attributes to Role-Based Access Control", "abstract": "Merging the best features of RBAC and attribute-based systems can provide effective access control for distributed and rapidly changing applications.", "year": 2010, "venue": "Computer", "authors": [ "D. R. Kuhn", "Edward J. Coyne", "Timothy R. Weil" ], "externalIds": { "MAG": "1992435849", "DBLP": "journals/computer/KuhnCW10", "DOI": "10.1109/MC.2010.155", "CorpusId": 17866775 }, "url": "https://www.semanticscholar.org/paper/3c647cbcbebe293712ca9678b1377d50b581a922", "referenceCount": 0, "citationCount": 445, "influentialCitationCount": 26, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Data Loss Prevention", "abstract": "In today's digital economy, data enters and leaves cyberspace at record rates. A typical enterprise sends and receives millions of email messages and downloads, saves, and transfers thousands of files via various channels on a daily basis. Enterprises also hold sensitive data that customers, business partners, regulators, and shareholders expect them to protect. Unfortunately, companies constantly fall victim to massive data loss, and high-profile data leakages involving sensitive personal and corporate data continue to appear (http://opensecurityfoundation. org). Data loss could substantially harm a company's competitiveness and reputation and could also invite lawsuits or regulatory consequences for lax security. Therefore, organizations should take measures to understand the sensitive data they hold, how it's controlled, and how to prevent it from being leaked or compromised.", "year": 2010, "venue": "IT Professional Magazine", "authors": [ "Simon Liu", "R. Kuhn" ], "externalIds": { "DBLP": "journals/itpro/LiuK10", "MAG": "2013745554", "DOI": "10.1109/MITP.2010.52", "CorpusId": 19303090 }, "url": "https://www.semanticscholar.org/paper/ec7f650cffd2dca754f6d4e71c696dfb0f0d54f9", "referenceCount": 0, "citationCount": 140, "influentialCitationCount": 7, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Real life challenges in access-control management", "abstract": "In this work we ask the question: what are the challenges of managing a physical or file system access-control policy for a large organization? To answer the question, we conducted a series of interviews with thirteen administrators who manage access-control policy for either a file system or a physical space. Based on these interviews we identified three sets of real-world requirements that are either ignored or inadequately addressed by technology: 1) policies are made/implemented by multiple people; 2) policy makers are distinct from policy implementers; and 3) access-control systems don't always have the capability to implement the desired policy. We present our interview results and propose several possible solutions to address the observed issues.", "year": 2009, "venue": "International Conference on Human Factors in Computing Systems", "authors": [ "Lujo Bauer", "L. Cranor", "R. Reeder", "M. Reiter", "Kami Vaniea" ], "externalIds": { "DBLP": "conf/chi/BauerCRRV09", "MAG": "3028407954", "DOI": "10.1145/1518701.1518838", "CorpusId": 1782529 }, "url": "https://www.semanticscholar.org/paper/843f972bd9688a3c26798c93fed09f070ca79c8b", "referenceCount": 21, "citationCount": 89, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "SP 800-60 Rev. 1. Volume I: Guide for Mapping Types of Information and Information Systems to Security Categories; Volume II: Appendices to Guide for Mapping Types of Information and Information Systems to Security Categories", "abstract": "Title III of the E-Government Act, titled the Federal Information Security Management Act (FISMA) of 2002, tasked NIST to develop (1) standards to be used by all Federal agencies to categorize information and information systems collected or maintained by or on behalf of each agency based on the objectives of providing appropriate levels of information security according to a range of risk levels; and (2) guidelines recommending the types of information and information systems to be included in each such category. Special Publication 800-60 was issued in response to the second of these tasks. The revision to Volume I contains the basic guidelines for mapping types of information and information systems to security categories. The appendices contained in Volume I include security categorization recommendations and rationale for mission-based and management and support information types.", "year": 2008, "venue": "", "authors": [ "Kevin M. Stine", "Richard Kissel", "William C. Barker", "Annabelle Lee", "James Fahlsing", "Jessica Gulick" ], "externalIds": { "MAG": "14180909", "CorpusId": 264677579 }, "url": "https://www.semanticscholar.org/paper/007274204f2895a8fa53579f6bf846a778f45f20", "referenceCount": 0, "citationCount": 22, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Forensics examination of volatile system data using virtual introspection", "abstract": "While static examination of computer systems is an important part of many digital forensics investigations, there are often important system properties present only in volatile memory that cannot be effectively recovered using static analysis techniques, such as offline hard disk acquisition and analysis. An alternative approach, involving the live analysis of target systems to uncover this volatile data, presents significant risks and challenges to forensic investigators as observation techniques are generally intrusive and can affect the system being observed. This paper provides a discussion of live digital forensics analysis through virtual introspection and presents a suite of virtual introspection tools developed for Xen (VIX tools). The VIX tools suite can be used for unobtrusive digital forensic examination of volatile system data in virtual machines, and addresses a key research area identified in the virtualization in digital forensics research agenda [22].", "year": 2008, "venue": "OPSR", "authors": [ "Bria N Hay", "K. Nance" ], "externalIds": { "MAG": "2006172326", "DOI": "10.1145/1368506.1368517", "CorpusId": 16653502 }, "url": "https://www.semanticscholar.org/paper/a7b640053b5bbdc24085e5adf04c3c8393c1e645", "referenceCount": 27, "citationCount": 157, "influentialCitationCount": 11, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A qualitative study of users' view on information security", "abstract": null, "year": 2007, "venue": "Computers & security", "authors": [ "E. Albrechtsen" ], "externalIds": { "MAG": "2025043113", "DBLP": "journals/compsec/Albrechtsen07", "DOI": "10.1016/j.cose.2006.11.004", "CorpusId": 21615652 }, "url": "https://www.semanticscholar.org/paper/c8da973eb636637f4d56b1b7b1a4993bf34db166", "referenceCount": 32, "citationCount": 380, "influentialCitationCount": 36, "isOpenAccess": false, "fieldsOfStudy": [ "Business", "Computer Science" ] }, { "title": "Embedding Information Security into the Organization", "abstract": "Risk and business have always been inseparable, but new information security risks pose unknown challenges. How should firms organize and manage to improve enterprise security? Here, the authors describe how chief information security officer (CISOs) are working to build secure organizations.", "year": 2007, "venue": "IEEE Security and Privacy", "authors": [ "M. E. Johnson", "E. Goetz" ], "externalIds": { "DBLP": "journals/ieeesp/JohnsonG07", "MAG": "2000431168", "DOI": "10.1109/MSP.2007.59", "CorpusId": 6433627 }, "url": "https://www.semanticscholar.org/paper/450a8417fc1657669c61db93492d7b9e0162e0af", "referenceCount": 12, "citationCount": 145, "influentialCitationCount": 9, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Survey of Explanations in Recommender Systems", "abstract": "This paper provides a comprehensive review of explanations in recommender systems. We highlight seven possible advantages of an explanation facility, and describe how existing measures can be used to evaluate the quality of explanations. Since explanations are not independent of the recommendation process, we consider how the ways recommendations are presented may affect explanations. Next, we look at different ways of interacting with explanations. The paper is illustrated with examples of explanations throughout, where possible from existing applications.", "year": 2007, "venue": "2007 IEEE 23rd International Conference on Data Engineering Workshop", "authors": [ "N. Tintarev", "Judith Masthoff" ], "externalIds": { "DBLP": "conf/icde/TintarevM07", "MAG": "2126159342", "DOI": "10.1109/ICDEW.2007.4401070", "CorpusId": 1674804 }, "url": "https://www.semanticscholar.org/paper/a253a4ede67b04f383d71dc60ffd91d9ac8782f7", "referenceCount": 39, "citationCount": 543, "influentialCitationCount": 56, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Privacy-Preserving Data Mining Systems", "abstract": "Although successful in many applications, data mining poses special concerns for private data. An integrated architecture takes a systemic view of the problem, implementing established protocols for data collection, inference control, and information sharing. Our goal in investigating privacy preservation issues was to take a systemic view of architectural requirements and design principles and explore possible solutions that would lead to guidelines for building practical privacy-preserving data mining systems", "year": 2007, "venue": "Computer", "authors": [ "Nan Zhang", "Wei Zhao" ], "externalIds": { "DBLP": "journals/computer/ZhangZ07", "MAG": "2150264441", "DOI": "10.1109/MC.2007.142", "CorpusId": 9918868 }, "url": "https://www.semanticscholar.org/paper/07f6d9252456cdd47f48254e1b3b7c5cc66df777", "referenceCount": 10, "citationCount": 90, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Attributed based access control (ABAC) for Web services", "abstract": "For companies and government agencies alike, the emergence of Web services technologies and the evolution of distributed systems toward service oriented architectures (SOA) have helped promote collaboration and information sharing by breaking down \"stove-piped\" systems and connecting them via loosely coupled, interoperable system-to-system interfaces. Such architectures, however, also bring about their own security challenges that require due consideration. Unfortunately, the current information security mechanisms are insufficient to address these challenges. In particular, the access control models today are mostly static and coarsely grained; they are not well-suited for the service-oriented environments where information access is dynamic and ad-hoc in nature. This paper outlines the access control challenges for Web services and SOA, and proposes an attribute based access control (ABAC) model as a new approach, which is based on subject, object, and environment attributes and supports both mandatory and discretionary access control needs. The paper describes the ABAC model in terms of its authorization architecture and policy formulation, and makes a detailed comparison between ABAC and traditional role-based models, which clearly shows the advantages of ABAC. The paper then describes how this new model can be applied to securing Web service invocations, with an implementation based on standard protocols and open-source tools. The paper concludes with a summary of the ABAC model's benefits and some future directions.", "year": 2005, "venue": "IEEE International Conference on Web Services (ICWS'05)", "authors": [ "E. Yuan", "Jin Tong" ], "externalIds": { "DBLP": "conf/icws/YuanT05", "MAG": "2070199221", "DOI": "10.1109/ICWS.2005.25", "CorpusId": 14842618 }, "url": "https://www.semanticscholar.org/paper/2c11e3fb8666058214367f924f0bbdeb5a5ec518", "referenceCount": 17, "citationCount": 747, "influentialCitationCount": 71, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Principles of Information Security", "abstract": "Explore the field of information security and assurance with this valuable resource that focuses on both the managerial and technical aspects of the discipline. Principles of Information Security, Third Edition builds on internationally recognized standards and bodies of knowledge to provide the knowledge and skills that information systems students need for their future roles as business decision-makers. Coverage includes key knowledge areas of the CISSP (Certified Information Systems Security Professional), as well as risk management, cryptography, physical security, and more. The third edition has retained the real-world examples and scenarios that made previous editions so successful, but has updated the content to reflect technologys latest capabilities and trends. With this emphasis on currency and comprehensive coverage, readers can feel confident that they are using a standards-based, content-driven resource to prepare them for their work in the field.", "year": 2004, "venue": "", "authors": [ "M. Whitman", "H. Mattord" ], "externalIds": { "MAG": "1742813727", "CorpusId": 108679949 }, "url": "https://www.semanticscholar.org/paper/2427bc7e16daced373da960bbb0591cd7740a200", "referenceCount": 0, "citationCount": 1025, "influentialCitationCount": 64, "isOpenAccess": false, "fieldsOfStudy": [ "Engineering" ] }, { "title": "Association rule hiding", "abstract": "Large repositories of data contain sensitive information that must be protected against unauthorized access. The protection of the confidentiality of this information has been a long-term goal for the database security research community and for the government statistical agencies. Recent advances in data mining and machine learning algorithms have increased the disclosure risks that one may encounter when releasing data to outside parties. A key problem, and still not sufficiently investigated, is the need to balance the confidentiality of the disclosed data with the legitimate needs of the data users. Every disclosure limitation method affects, in some way, and modifies true data values and relationships. We investigate confidentiality issues of a broad category of rules, the association rules. In particular, we present three strategies and five algorithms for hiding a group of association rules, which is characterized as sensitive. One rule is characterized as sensitive if its disclosure risk is above a certain privacy threshold. Sometimes, sensitive rules should not be disclosed to the public since, among other things, they may be used for inferring sensitive data, or they may provide business competitors with an advantage. We also perform an evaluation study of the hiding algorithms in order to analyze their time complexity and the impact that they have in the original database.", "year": 2004, "venue": "IEEE Transactions on Knowledge and Data Engineering", "authors": [ "V. Verykios", "A. Elmagarmid", "E. Bertino", "Y. Saygin", "Elena Dasseni" ], "externalIds": { "MAG": "2095576022", "DBLP": "journals/tkde/VerykiosEBSD04", "DOI": "10.1109/TKDE.2004.1269668", "CorpusId": 12571250 }, "url": "https://www.semanticscholar.org/paper/bb24f265ad1166f9b992561d165495c5c8b1d2ea", "referenceCount": 13, "citationCount": 577, "influentialCitationCount": 30, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Trust in Automation: Designing for Appropriate Reliance", "abstract": "Automation is often problematic because people fail to rely upon it appropriately. Because people respond to technology socially, trust influences reliance on automation. In particular, trust guides reliance when complexity and unanticipated situations make a complete understanding of the automation impractical. This review considers trust from the organizational, sociological, interpersonal, psychological, and neurological perspectives. It considers how the context, automation characteristics, and cognitive processes affect the appropriateness of trust. The context in which the automation is used influences automation performance and provides a goal-oriented perspective to assess automation characteristics along a dimension of attributional abstraction. These characteristics can influence trust through analytic, analogical, and affective processes. The challenges of extrapolating the concept of trust in people to trust in automation are discussed. A conceptual model integrates research regarding trust in automation and describes the dynamics of trust, the role of context, and the influence of display characteristics. Actual or potential applications of this research include improved designs of systems that require people to manage imperfect automation.", "year": 2004, "venue": "Hum. Factors", "authors": [ "John D. Lee", "Katrina A. See" ], "externalIds": { "DBLP": "journals/hf/LeeS04", "MAG": "2110171129", "DOI": "10.1518/hfes.46.1.50_30392", "CorpusId": 5210390, "PubMed": "15151155" }, "url": "https://www.semanticscholar.org/paper/7dd86508438657ac7a704a5d952a2a4422808975", "referenceCount": 206, "citationCount": 4218, "influentialCitationCount": 556, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "State-of-the-art in privacy preserving data mining", "abstract": "We provide here an overview of the new and rapidly emerging research area of privacy preserving data mining. We also propose a classification hierarchy that sets the basis for analyzing the work which has been performed in this context. A detailed review of the work accomplished in this area is also given, along with the coordinates of each work to the classification hierarchy. A brief evaluation is performed, and some initial conclusions are made.", "year": 2004, "venue": "SGMD", "authors": [ "V. Verykios", "E. Bertino", "I. N. Fovino", "L. P. Provenza", "Y. Saygin", "Y. Theodoridis" ], "externalIds": { "MAG": "2145747124", "DBLP": "journals/sigmod/VerykiosBFPST04", "DOI": "10.1145/974121.974131", "CorpusId": 11566780 }, "url": "https://www.semanticscholar.org/paper/b7afabcf040631ab917047451bde4f40ae40b76a", "referenceCount": 111, "citationCount": 988, "influentialCitationCount": 40, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Privacy as contextual integrity", "abstract": "The practices of public surveillance, which include the monitoring of individuals in public through a variety of media (e.g., video, data, online), are among the least understood and controversial challenges to privacy in an age of information technologies. The fragmentary nature of privacy policy in the United States reflects not only the oppositional pulls of diverse vested interests, but also the ambivalence of unsettled intuitions on mundane phenomena such as shopper cards, closed-circuit television, and biometrics. This Article, which extends earlier work on the problem of privacy in public, explains why some of the prominent theoretical approaches to privacy, which were developed over time to meet traditional privacy challenges, yield unsatisfactory conclusions in the case of public surveillance. It posits a new construct, “contextual integrity,” as an alternative benchmark for privacy, to capture the nature of challenges posed by information technologies. Contextual integrity ties adequate protection for privacy to norms of specific contexts, demanding that information gathering and dissemination be appropriate to that context and obey the governing norms of distribution within it. Building on the idea of “spheres of justice,” developed by political philosopher Michael Walzer, this Article argues that public surveillance violates a right to privacy because it violates contextual integrity; as such, it constitutes injustice and even tyranny.", "year": 2004, "venue": "", "authors": [ "H. Nissenbaum" ], "externalIds": { "MAG": "72496981", "CorpusId": 150528892 }, "url": "https://www.semanticscholar.org/paper/df077652e3e119e28a81d8a2e9d26f4e91c34676", "referenceCount": 24, "citationCount": 1687, "influentialCitationCount": 142, "isOpenAccess": false, "fieldsOfStudy": [ "Economics" ] }, { "title": "User Acceptance of Information Technology: Toward a Unified View", "abstract": "Information technology (IT) acceptance research has yielded many competing models, each with different sets of acceptance determinants. In this paper, we (1) review user acceptance literature and discuss eight prominent models, (2) empirically compare the eight models and their extensions, (3) formulate a unified model that integrates elements across the eight models, and (4) empirically validate the unified model. The eight models reviewed are the theory of reasoned action, the technology acceptance model, the motivational model, the theory of planned behavior, a model combining the technology acceptance model and the theory of planned behavior, the model of PC utilization, the innovation diffusion theory, and the social cognitive theory. Using data from four organizations over a six-month period with three points of measurement, the eight models explained between 17 percent and 53 percent of the variance in user intentions to use information technology. Next, a unified model, called the Unified Theory of Acceptance and Use of Technology (UTAUT), was formulated, with four core determinants of intention and usage, and up to four moderators of key relationships. UTAUT was then tested using the original data and found to outperform the eight individual models (adjusted R2 of 69 percent). UTAUT was then confirmed with data from two new organizations with similar results (adjusted R2 of 70 percent). UTAUT thus provides a useful tool for managers needing to assess the likelihood of success for new technology introductions and helps them understand the drivers of acceptance in order to proactively design interventions (including training, marketing, etc.) targeted at populations of users that may be less inclined to adopt and use new systems. The paper also makes several recommendations for future research including developing a deeper understanding of the dynamic influences studied here, refining measurement of the core constructs used in UTAUT, and understanding the organizational outcomes associated with new technology use.", "year": 2003, "venue": "MIS Q.", "authors": [ "V. Venkatesh", "Michael G. Morris", "G. Davis", "Fred D. Davis" ], "externalIds": { "MAG": "2100379340", "DBLP": "journals/misq/VenkateshMDD03", "DOI": "10.2307/30036540", "CorpusId": 14435677 }, "url": "https://www.semanticscholar.org/paper/f444aecb9a6cc1219d6baf81c55f23dfce3d9788", "referenceCount": 96, "citationCount": 34070, "influentialCitationCount": 6297, "isOpenAccess": false, "fieldsOfStudy": [ "Psychology", "Computer Science" ] }, { "title": "On-line trust: concepts, evolving themes, a model", "abstract": null, "year": 2003, "venue": "Int. J. Hum. Comput. Stud.", "authors": [ "Cynthia L. Corritore", "Beverly Kracher", "Susan Wiedenbeck" ], "externalIds": { "MAG": "2104882046", "DBLP": "journals/ijmms/CorritoreKW03a", "DOI": "10.1016/S1071-5819(03)00041-7", "CorpusId": 16791511 }, "url": "https://www.semanticscholar.org/paper/1aaf0eab33bb5589d357f0b1786575e448e385c9", "referenceCount": 103, "citationCount": 1256, "influentialCitationCount": 74, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A taxonomy for information security technologies", "abstract": null, "year": 2003, "venue": "Computers & security", "authors": [ "H. Venter", "J. Eloff" ], "externalIds": { "DBLP": "journals/compsec/VenterE03", "MAG": "1983226837", "DOI": "10.1016/S0167-4048(03)00406-1", "CorpusId": 206029441 }, "url": "https://www.semanticscholar.org/paper/1c7197e3efae91ac8af4054d9c09d8783d1aacf9", "referenceCount": 24, "citationCount": 117, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "k-Anonymity: A Model for Protecting Privacy", "abstract": "Consider a data holder, such as a hospital or a bank, that has a privately held collection of person-specific, field structured data. Suppose the data holder wants to share a version of the data with researchers. How can a data holder release a version of its private data with scientific guarantees that the individuals who are the subjects of the data cannot be re-identified while the data remain practically useful? The solution provided in this paper includes a formal protection model named k-anonymity and a set of accompanying policies for deployment. A release provides k-anonymity protection if the information for each person contained in the release cannot be distinguished from at least k-1 individuals whose information also appears in the release. This paper also examines re-identification attacks that can be realized on releases that adhere to k- anonymity unless accompanying policies are respected. The k-anonymity protection model is important because it forms the basis on which the real-world systems known as Datafly, µ-Argus and k-Similar provide guarantees of privacy protection.", "year": 2002, "venue": "Int. J. Uncertain. Fuzziness Knowl. Based Syst.", "authors": [ "L. Sweeney" ], "externalIds": { "DBLP": "journals/ijufks/Sweene02", "MAG": "2159024459", "DOI": "10.1142/S0218488502001648", "CorpusId": 361794 }, "url": "https://www.semanticscholar.org/paper/62ce35108ef0a816ba1929223f511c976079a300", "referenceCount": 28, "citationCount": 8386, "influentialCitationCount": 615, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Information security is information risk management", "abstract": "Information security is important in proportion to an organization's dependence on information technology. When an organization's information is exposed to risk, the use of information security technology is obviously appropriate. Current information security technology, however, deals with only a small fraction of the problem of information risk. In fact, the evidence increasingly suggests that information security technology does not reduce information risk very effectively.This paper argues that we must reconsider our approach to information security from the ground up if we are to deal effectively with the problem of information risk, and proposes a new model inspired by the history of medicine.", "year": 2001, "venue": "New Security Paradigms Workshop", "authors": [ "B. Blakley", "Ellen McDermott", "Daniel E. Geer" ], "externalIds": { "MAG": "2062830664", "DBLP": "conf/nspw/BlakleyMG01", "DOI": "10.1145/508171.508187", "CorpusId": 509837 }, "url": "https://www.semanticscholar.org/paper/b1ddaea408084e38026198529bc01ba174871e07", "referenceCount": 20, "citationCount": 248, "influentialCitationCount": 15, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Trust and Distrust Definitions: One Bite at a Time", "abstract": null, "year": 2000, "venue": "Trust in Cyber-societies", "authors": [ "D. McKnight", "N. L. Chervany" ], "externalIds": { "DBLP": "conf/agents/McKnightC00", "MAG": "2649135381", "DOI": "10.1007/3-540-45547-7_3", "CorpusId": 3502139 }, "url": "https://www.semanticscholar.org/paper/b48228cc730fd1e4bd841033a4ea2889b5000753", "referenceCount": 103, "citationCount": 690, "influentialCitationCount": 73, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Psychology" ] }, { "title": "Designing the user interface strategies for effective human-computer interaction", "abstract": "For courses in Human-Computer Interaction. The Sixth Edition of Designing the User Interface provides a comprehensive, authoritative, and up-to-date introduction to the dynamic field of human-computer interaction (HCI) and user experience (UX) design. This classic book has defined and charted the astonishing evolution of user interfaces for three decades. Students and professionals learn practical principles and guidelines needed to develop high quality interface designs that users can understand, predict, and control. The book covers theoretical foundations and design processes such as expert reviews and usability testing. By presenting current research andinnovations in human-computer interaction, the authors strive toinspire students, guide designers, and provoke researchers to seek solutions that improve the experiences of novice and expert users, while achieving universal usability. The authors also provide balanced presentations on controversial topics such as augmented and virtual reality, voice and natural language interfaces, and information visualization. Updates include current HCI design methods, new design examples, and totally revamped coverage of social media, search and voice interaction. Major revisions were made toEVERY chapter, changing almost every figure (170 new color figures) and substantially updating the references.", "year": 1998, "venue": "SIGB", "authors": [ "B. Shneiderman", "C. Plaisant" ], "externalIds": { "DBLP": "books/daglib/0017688", "DOI": "10.1145/25065.950626", "CorpusId": 334148 }, "url": "https://www.semanticscholar.org/paper/142d23fd6bbdca45cbfca7766e17dea3bfbfab61", "referenceCount": 0, "citationCount": 779, "influentialCitationCount": 57, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Role-Based Access Control Models", "abstract": "Security administration of large systems is complex, but it can be simplified by a role-based access control approach. This article explains why RBAC is receiving renewed attention as a method of security administration and review, describes a framework of four reference models developed to better understand RBAC and categorizes different implementations, and discusses the use of RBAC to manage itself.", "year": 1996, "venue": "Computer", "authors": [ "R. Sandhu", "E. Coyne", "H. Feinstein", "C. E. Youman" ], "externalIds": { "MAG": "2166602595", "DBLP": "journals/computer/SandhuCFY96", "DOI": "10.1109/2.485845", "CorpusId": 1958270 }, "url": "https://www.semanticscholar.org/paper/4fc849841e0df1126cc5258924af4d57023758f5", "referenceCount": 14, "citationCount": 6188, "influentialCitationCount": 607, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Support-Vector Networks", "abstract": null, "year": 1995, "venue": "Machine-mediated learning", "authors": [ "Corinna Cortes", "V. Vapnik" ], "externalIds": { "MAG": "2119821739", "DOI": "10.1023/A:1022627411411", "CorpusId": 52874011 }, "url": "https://www.semanticscholar.org/paper/52b7bf3ba59b31f362aa07f957f1543a29a4279e", "referenceCount": 26, "citationCount": 38968, "influentialCitationCount": 3372, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "An Integrative Model Of Organizational Trust", "abstract": "Scholars in various disciplines have considered the causes, nature, and effects of trust. Prior approaches to studying trust are considered, including characteristics of the trustor, the trustee, and the role of risk. A definition of trust and a model of its antecedents and outcomes are presented, which integrate research from multiple disciplines and differentiate trust from similar constructs. Several research propositions based on the model are presented.", "year": 1995, "venue": "", "authors": [ "R. Mayer", "J. H. Davis", "F. Schoorman" ], "externalIds": { "MAG": "2142175015", "DOI": "10.5465/AMR.1995.9508080335", "CorpusId": 15027176 }, "url": "https://www.semanticscholar.org/paper/b8775b8ead06b2f683a7ed21384d50d5da34d3a8", "referenceCount": 88, "citationCount": 18818, "influentialCitationCount": 1828, "isOpenAccess": true, "fieldsOfStudy": [ "Sociology" ] }, { "title": "A tutorial on hidden Markov models and selected applications in speech recognition", "abstract": "This tutorial provides an overview of the basic theory of hidden Markov models (HMMs) as originated by L.E. Baum and T. Petrie (1966) and gives practical details on methods of implementation of the theory along with a description of selected applications of the theory to distinct problems in speech recognition. Results from a number of original sources are combined to provide a single source of acquiring the background required to pursue further this area of research. The author first reviews the theory of discrete Markov chains and shows how the concept of hidden states, where the observation is a probabilistic function of the state, can be used effectively. The theory is illustrated with two simple examples, namely coin-tossing, and the classic balls-in-urns system. Three fundamental problems of HMMs are noted and several practical techniques for solving these problems are given. The various types of HMMs that have been studied, including ergodic as well as left-right models, are described. >", "year": 1989, "venue": "Proceedings of the IEEE", "authors": [ "L. Rabiner" ], "externalIds": { "MAG": "2179529395", "DBLP": "journals/pieee/Rabiner89", "DOI": "10.1109/5.18626", "CorpusId": 13618539 }, "url": "https://www.semanticscholar.org/paper/8fe2ea0a67954f1380b3387e3262f1cdb9f9b3e5", "referenceCount": 72, "citationCount": 15772, "influentialCitationCount": 2288, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Introducing Meta Llama 3: The Most Capable Openly Available LLM", "abstract": null, "year": 2024, "venue": "ai", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Introducing GPT-4o and More Tools to Chat-GPT Free Users", "abstract": null, "year": 2024, "venue": "OpenAI", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Accuracy First: Selecting a Differential Privacy Level for Production Machine Learning", "abstract": null, "year": 2022, "venue": "Proceedings on Privacy Enhancing Technologies", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Apache Tika – A Content Analysis Toolkit", "abstract": null, "year": 2021, "venue": "tika", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Proposal for a Regulation Laying Down Harmonized Rules on Artificial Intelligence (Artificial Intelligence Act)", "abstract": null, "year": 2021, "venue": "COM(", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Microsoft Presidio: An Open-Source Tool for PII Detection and Anonymization", "abstract": null, "year": 2021, "venue": "microsoft", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Apache OpenNLP: Machine Learning Based Toolkit for NLP Tasks", "abstract": null, "year": 2021, "venue": "opennlp", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "ModSecurity Web Application Firewall", "abstract": null, "year": 2021, "venue": "ModSecurity", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "ClassifyIT", "abstract": null, "year": 2020, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "AI Researchers Fight to Protect Privacy in the Age of Large Language Models", "abstract": null, "year": 2020, "venue": "The Wall Street Journal", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Data Sanitization Techniques: A Comprehensive Survey on Data Privacy, En-hancing Technologies", "abstract": null, "year": 2020, "venue": "IEEE Access", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "”A Framework for Data Discovery and Classification in Organizational Data Governance.”", "abstract": null, "year": 2020, "venue": "Journal of Information & Knowledge Management", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Florida Medical Record Disclosure Law Preempted by Health Insurance Portability and Accountability Act of 1996 (HIPAA)", "abstract": "The U.S. Court of Appeals for the 11th Circuit recently ruled that a Florida law requiring nursing homes to disclose patient records of deceased patients was preempted by the Health Insurance Portabil ity and Accountabil ity Act of 1996 (HIPAA). This case is important because it suggests that the question of whether HIPAA or state law is stricter may not always be binary or straightforward, and that in some cases state law wil l be seen as framing or contextualizing HIPAA and in other cases wil l be seen as less protective. Thus, while the HIPAA preemption principle is well-settled, the application of that principle sti l l presents some uncertainty, especially given the varied nature of state law.", "year": 2019, "venue": "", "authors": [ "J. Geetter", "James A. Saling", "M. Wil" ], "externalIds": { "CorpusId": 208175863 }, "url": "https://www.semanticscholar.org/paper/5f6a70b377ecfcfe29d482c0bb082fa8dac1575d", "referenceCount": 0, "citationCount": 69, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding", "abstract": "We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models (Peters et al., 2018a; Radford et al., 2018), BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5 (7.7 point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).", "year": 2019, "venue": "North American Chapter of the Association for Computational Linguistics", "authors": [ "Jacob Devlin", "Ming-Wei Chang", "Kenton Lee", "Kristina Toutanova" ], "externalIds": { "MAG": "2951055169", "ACL": "N19-1423", "DBLP": "journals/corr/abs-1810-04805", "ArXiv": "1810.04805", "DOI": "10.18653/v1/N19-1423", "CorpusId": 52967399 }, "url": "https://www.semanticscholar.org/paper/df2b0e26d0599ce3e70df8a9da02e51594e0e992", "referenceCount": 63, "citationCount": 81678, "influentialCitationCount": 19056, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Language Models are Unsupervised Multitask Learners", "abstract": "Natural language processing tasks, such as question answering, machine translation, reading comprehension, and summarization, are typically approached with supervised learning on taskspecific datasets. We demonstrate that language models begin to learn these tasks without any explicit supervision when trained on a new dataset of millions of webpages called WebText. When conditioned on a document plus questions, the answers generated by the language model reach 55 F1 on the CoQA dataset matching or exceeding the performance of 3 out of 4 baseline systems without using the 127,000+ training examples. The capacity of the language model is essential to the success of zero-shot task transfer and increasing it improves performance in a log-linear fashion across tasks. Our largest model, GPT-2, is a 1.5B parameter Transformer that achieves state of the art results on 7 out of 8 tested language modeling datasets in a zero-shot setting but still underfits WebText. Samples from the model reflect these improvements and contain coherent paragraphs of text. These findings suggest a promising path towards building language processing systems which learn to perform tasks from their naturally occurring demonstrations.", "year": 2019, "venue": "", "authors": [ "Alec Radford", "Jeff Wu", "R. Child", "D. Luan", "Dario Amodei", "I. Sutskever" ], "externalIds": { "MAG": "2955855238", "CorpusId": 160025533 }, "url": "https://www.semanticscholar.org/paper/9405cc0d6169988371b2755e573cc28650d14dfe", "referenceCount": 75, "citationCount": 18460, "influentialCitationCount": 3039, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Federated Machine Learning: Concept and Applications", "abstract": null, "year": 2019, "venue": "ACM Transactions on Intelligent Systems and Technology", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "DualAD: Dual-Layer Planning for Reasoning in Autonomous Driving": { "paper_title": "DualAD: Dual-Layer Planning for Reasoning in Autonomous Driving", "arxiv_id": "2409.18053v1", "keyword": "large language model", "authors": [ "Dingrui Wang", "Marc Kaufeld", "Johannes Betz" ], "references": [ { "title": "ChatGLM: A Family of Large Language Models from GLM-130B to GLM-4 All Tools", "abstract": "We introduce ChatGLM, an evolving family of large language models that we have been developing over time. This report primarily focuses on the GLM-4 language series, which includes GLM-4, GLM-4-Air, and GLM-4-9B. They represent our most capable models that are trained with all the insights and lessons gained from the preceding three generations of ChatGLM. To date, the GLM-4 models are pre-trained on ten trillions of tokens mostly in Chinese and English, along with a small set of corpus from 24 languages, and aligned primarily for Chinese and English usage. The high-quality alignment is achieved via a multi-stage post-training process, which involves supervised fine-tuning and learning from human feedback. Evaluations show that GLM-4 1) closely rivals or outperforms GPT-4 in terms of general metrics such as MMLU, GSM8K, MATH, BBH, GPQA, and HumanEval, 2) gets close to GPT-4-Turbo in instruction following as measured by IFEval, 3) matches GPT-4 Turbo (128K) and Claude 3 for long context tasks, and 4) outperforms GPT-4 in Chinese alignments as measured by AlignBench. The GLM-4 All Tools model is further aligned to understand user intent and autonomously decide when and which tool(s) touse -- including web browser, Python interpreter, text-to-image model, and user-defined functions -- to effectively complete complex tasks. In practical applications, it matches and even surpasses GPT-4 All Tools in tasks like accessing online information via web browsing and solving math problems using Python interpreter. Over the course, we have open-sourced a series of models, including ChatGLM-6B (three generations), GLM-4-9B (128K, 1M), GLM-4V-9B, WebGLM, and CodeGeeX, attracting over 10 million downloads on Hugging face in the year 2023 alone. The open models can be accessed through https://github.com/THUDM and https://huggingface.co/THUDM.", "year": 2024, "venue": "arXiv.org", "authors": [ "Team Glm Aohan Zeng", "Bin Xu", "Bowen Wang", "Chenhui Zhang", "Da Yin", "Diego Rojas", "Guanyu Feng", "Hanlin Zhao", "Hanyu Lai", "Hao Yu", "Hongning Wang", "Jiadai Sun", "Jiajie Zhang", "Jiale Cheng", "Jiayi Gui", "Jie Tang", "Jing Zhang", "Juanzi Li", "Lei Zhao", "Lindong Wu", "Lucen Zhong", "Ming-yue Liu", "Minlie Huang", "Peng Zhang", "Qinkai Zheng", "Rui Lu", "Shuaiqi Duan", "Shudan Zhang", "S. Cao", "Shuxun Yang", "W. Tam", "Wenyi Zhao", "Xiao Liu", "Xiaoyu Xia", "Xiaohan Zhang", "Xiaotao Gu", "Xin Lv", "Xinghan Liu", "Xinyi Liu", "Xinyue Yang", "Xixuan Song", "Xunkai Zhang", "Y. An", "Yifan Xu", "Yilin Niu", "Yuantao Yang", "Yueyan Li", "Yushi Bai", "Yuxiao Dong", "Zehan Qi", "Zhaoyu Wang", "Zhenyi Yang", "Zhengxiao Du", "Zhen-Ping Hou", "Zihan Wang" ], "externalIds": { "DBLP": "journals/corr/abs-2406-12793", "ArXiv": "2406.12793", "DOI": "10.48550/arXiv.2406.12793", "CorpusId": 270562306 }, "url": "https://www.semanticscholar.org/paper/c7f9706898bdfa3241601e075b1305649b174ff1", "referenceCount": 51, "citationCount": 71, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The Platonic Representation Hypothesis", "abstract": "We argue that representations in AI models, particularly deep networks, are converging. First, we survey many examples of convergence in the literature: over time and across multiple domains, the ways by which different neural networks represent data are becoming more aligned. Next, we demonstrate convergence across data modalities: as vision models and language models get larger, they measure distance between datapoints in a more and more alike way. We hypothesize that this convergence is driving toward a shared statistical model of reality, akin to Plato's concept of an ideal reality. We term such a representation the platonic representation and discuss several possible selective pressures toward it. Finally, we discuss the implications of these trends, their limitations, and counterexamples to our analysis.", "year": 2024, "venue": "International Conference on Machine Learning", "authors": [ "Minyoung Huh", "Brian Cheung", "Tongzhou Wang", "Phillip Isola" ], "externalIds": { "ArXiv": "2405.07987", "DBLP": "journals/corr/abs-2405-07987", "DOI": "10.48550/arXiv.2405.07987", "CorpusId": 269757765 }, "url": "https://www.semanticscholar.org/paper/66de49b3dcbbf0cca535335d597f94b702e2b95a", "referenceCount": 145, "citationCount": 35, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Rethinking Imitation-based Planners for Autonomous Driving", "abstract": "In recent years, imitation-based driving planners have reported considerable success. However, due to the absence of a standardized benchmark, the effectiveness of various designs remains unclear. The newly released nuPlan addresses this issue by offering a large-scale real-world dataset and a standardized closed-loop benchmark for equitable comparisons. Utilizing this platform, we conduct a comprehensive study on two fundamental yet underexplored aspects of imitation-based planners: the essential features for ego planning and the effective data augmentation techniques to reduce compounding errors. Furthermore, we highlight an imitation gap that has been overlooked by current learning systems. Finally, integrating our findings, we propose a strong baseline model—PlanTF. Our results demonstrate that a well-designed, purely imitation-based planner can achieve highly competitive performance compared to state-of-the-art methods involving hand-crafted rules and exhibit superior generalization capabilities in long-tail cases. Our models and benchmarks are publicly available. Project website https://jchengai.github.io/planTF.", "year": 2024, "venue": "IEEE International Conference on Robotics and Automation", "authors": [ "Jie Cheng", "Yingbing Chen", "Xiaodong Mei", "Bowen Yang", "Bo Li", "Ming Liu" ], "externalIds": { "DBLP": "conf/icra/ChengC0YLL24", "DOI": "10.1109/ICRA57147.2024.10611364", "CorpusId": 271798811 }, "url": "https://www.semanticscholar.org/paper/743cceb9a492a7759d75747c31fe4d8d3372b50d", "referenceCount": 52, "citationCount": 5, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ESP: Extro-Spective Prediction for Long-term Behavior Reasoning in Emergency Scenarios", "abstract": "Emergent-scene safety is the key milestone for fully autonomous driving, and reliable on-time prediction is essential to maintain safety in emergency scenarios. However, these emergency scenarios are long-tailed and hard to collect, which restricts the system from getting reliable predictions. In this paper, we build a new dataset, which aims at the longterm prediction with the inconspicuous state variation in history for the emergency event, named the Extro-Spective Prediction (ESP) problem. Based on the proposed dataset, a flexible feature encoder for ESP is introduced to various prediction methods as a seamless plug-in, and its consistent performance improvement underscores its efficacy. Furthermore, a new metric named clamped temporal error (CTE) is proposed to give a more comprehensive evaluation of prediction performance, especially in time-sensitive emergency events of subseconds. Interestingly, as our ESP features can be described in human-readable language naturally, the application of integrating into ChatGPT also shows huge potential. The ESP-dataset and all benchmarks are released at https://dingrui-wang.github.io/ESP-Dataset/.", "year": 2024, "venue": "IEEE International Conference on Robotics and Automation", "authors": [ "Dingrui Wang", "Zheyuan Lai", "Yuda Li", "Yi Wu", "Yuexin Ma", "Johannes Betz", "Ruigang Yang", "Wei Li" ], "externalIds": { "ArXiv": "2405.04100", "DBLP": "conf/icra/WangLLWMBYL24", "DOI": "10.1109/ICRA57147.2024.10610002", "CorpusId": 269614413 }, "url": "https://www.semanticscholar.org/paper/d777965231b034d3a5608f406ef08aa3a178a260", "referenceCount": 42, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DriveVLM: The Convergence of Autonomous Driving and Large Vision-Language Models", "abstract": "A primary hurdle of autonomous driving in urban environments is understanding complex and long-tail scenarios, such as challenging road conditions and delicate human behaviors. We introduce DriveVLM, an autonomous driving system leveraging Vision-Language Models (VLMs) for enhanced scene understanding and planning capabilities. DriveVLM integrates a unique combination of reasoning modules for scene description, scene analysis, and hierarchical planning. Furthermore, recognizing the limitations of VLMs in spatial reasoning and heavy computational requirements, we propose DriveVLM-Dual, a hybrid system that synergizes the strengths of DriveVLM with the traditional autonomous driving pipeline. Experiments on both the nuScenes dataset and our SUP-AD dataset demonstrate the efficacy of DriveVLM and DriveVLM-Dual in handling complex and unpredictable driving conditions. Finally, we deploy the DriveVLM-Dual on a production vehicle, verifying it is effective in real-world autonomous driving environments.", "year": 2024, "venue": "arXiv.org", "authors": [ "Xiaoyu Tian", "Junru Gu", "Bailin Li", "Yicheng Liu", "Chenxu Hu", "Yang Wang", "Kun Zhan", "Peng Jia", "Xianpeng Lang", "Hang Zhao" ], "externalIds": { "ArXiv": "2402.12289", "DBLP": "journals/corr/abs-2402-12289", "DOI": "10.48550/arXiv.2402.12289", "CorpusId": 267750682 }, "url": "https://www.semanticscholar.org/paper/758c2dc290c037a6f211ec503beee70abe2d1197", "referenceCount": 78, "citationCount": 39, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FRENETIX: A High-Performance and Modular Motion Planning Framework for Autonomous Driving", "abstract": "Our research introduces a modular motion planning framework for autonomous vehicles using a sampling-based trajectory planning algorithm. This approach effectively tackles the challenges of solution space construction and optimization in path planning. The algorithm is applicable to both real vehicles and simulations, offering a robust solution for complex autonomous navigation. Our method employs a multi-objective optimization strategy for efficient navigation in static and highly dynamic environments, focusing on optimizing trajectory comfort, safety, and path precision. The algorithm is used to analyze the algorithm performance and success rate in 1750 virtual complex urban and highway scenarios. Our results demonstrate fast calculation times (8ms for 800 trajectories), a high success rate in complex scenarios (88%), and easy adaptability with different modules presented. The most noticeable difference exhibited was the fast trajectory sampling, feasibility check, and cost evaluation step across various trajectory counts. We demonstrate the integration and execution of the framework on real vehicles by evaluating deviations from the controller using a test track. This evaluation highlights the algorithm’s robustness and reliability, ensuring it meets the stringent requirements of real-world autonomous driving scenarios. The code and the additional modules used in this research are publicly available as open-source software and can be accessed at the following link: https://github.com/TUM-AVS/Frenetix-Motion-Planner.", "year": 2024, "venue": "IEEE Access", "authors": [ "Korbinian Moller", "Rainer Trauth", "Gerald Würsching", "Johannes Betz" ], "externalIds": { "DBLP": "journals/access/TrauthMWB24", "ArXiv": "2402.01443", "DOI": "10.1109/ACCESS.2024.3436835", "CorpusId": 267406249 }, "url": "https://www.semanticscholar.org/paper/638a697dc12f4ed3b96c6ff8a405a88a72c4baf0", "referenceCount": 59, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Holistic Autonomous Driving Understanding by Bird'View Injected Multi-Modal Large Models", "abstract": "The rise of multimodal large language models (MLLMs) has spurred interest in language-based driving tasks. However, existing research typically focuses on limited tasks and often omits key multi-view and temporal information which is crucial for robust autonomous driving. To bridge these gaps, we introduce NuInstruct, a novel dataset with 91K multi-view video-QA pairs across 17 subtasks, where each task demands holistic information (e.g., temporal, multi-view, and spatial), significantly elevating the challenge level. To obtain NuInstruct, we propose a novel SQL-based method to generate instruction-response pairs automatically, which is inspired by the driving logical progression of humans. We further present BEV-InMLLM, an end-to-end method for efficiently deriving instruction-aware Bird's-Eye-View (BEV) features, language-aligned for large language models. BEV-InMLLM integrates multi-view, spatial awareness, and temporal semantics to enhance MLLMs' capabilities on NuInstruct tasks. Moreover, our proposed BEV injection module is a plug-and-play method for existing MLLMs. Our experiments on NuInstruct demonstrate that BEV-InMLLM significantly outperforms existing MLLMs, e.g. 9% improvement on various tasks. We release our NuInstruct at https://github.com/xmed-lab/NuInstruct.", "year": 2024, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Xinpeng Ding", "Jianhua Han", "Hang Xu", "Xiaodan Liang", "Wei Zhang", "Xiaomeng Li" ], "externalIds": { "ArXiv": "2401.00988", "DBLP": "journals/corr/abs-2401-00988", "DOI": "10.1109/CVPR52733.2024.01297", "CorpusId": 266725320 }, "url": "https://www.semanticscholar.org/paper/cd49101103f73d88a4a3b368898066f03984c339", "referenceCount": 56, "citationCount": 13, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DriveLLM: Charting the Path Toward Full Autonomous Driving With Large Language Models", "abstract": "Human drivers instinctively reason with commonsense knowledge to predict hazards in unfamiliar scenarios and to understand the intentions of other road users. However, this essential capability is entirely missing from traditional decision-making systems in autonomous driving. In response, this paper presents DriveLLM, a decision-making framework that integrates large language models (LLMs) with existing autonomous driving stacks. This integration allows for commonsense reasoning in decision-making. DriveLLM also features a unique cyber-physical feedback system, allowing it to learn and improve from its mistakes. In real-world case studies, the proposed framework outperforms traditional decision-making methods in complex scenarios, including difficult edge cases. Furthermore, we propose a novel approach that allows the decision-making system to interact with human inputs while guarding against adversarial attacks. Empirical evaluations demonstrate that this framework responds correctly to complex human instructions.", "year": 2024, "venue": "IEEE Transactions on Intelligent Vehicles", "authors": [ "Yaodong Cui", "Shucheng Huang", "Jiaming Zhong", "Zhenan Liu", "Yutong Wang", "Chen Sun", "Bai Li", "Xiao Wang", "A. Khajepour" ], "externalIds": { "DBLP": "journals/tiv/CuiHZLWSLWK24", "DOI": "10.1109/TIV.2023.3327715", "CorpusId": 264534614 }, "url": "https://www.semanticscholar.org/paper/0553ff6d10a8dd377d6d0c171f8612231b7211a2", "referenceCount": 52, "citationCount": 25, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DriveLM: Driving with Graph Visual Question Answering", "abstract": "We study how vision-language models (VLMs) trained on web-scale data can be integrated into end-to-end driving systems to boost generalization and enable interactivity with human users. While recent approaches adapt VLMs to driving via single-round visual question answering (VQA), human drivers reason about decisions in multiple steps. Starting from the localization of key objects, humans estimate object interactions before taking actions. The key insight is that with our proposed task, Graph VQA, where we model graph-structured reasoning through perception, prediction and planning question-answer pairs, we obtain a suitable proxy task to mimic the human reasoning process. We instantiate datasets (DriveLM-Data) built upon nuScenes and CARLA, and propose a VLM-based baseline approach (DriveLM-Agent) for jointly performing Graph VQA and end-to-end driving. The experiments demonstrate that Graph VQA provides a simple, principled framework for reasoning about a driving scene, and DriveLM-Data provides a challenging benchmark for this task. Our DriveLM-Agent baseline performs end-to-end autonomous driving competitively in comparison to state-of-the-art driving-specific architectures. Notably, its benefits are pronounced when it is evaluated zero-shot on unseen objects or sensor configurations. We hope this work can be the starting point to shed new light on how to apply VLMs for autonomous driving. To facilitate future research, all code, data, and models are available to the public.", "year": 2023, "venue": "arXiv.org", "authors": [ "Chonghao Sima", "Katrin Renz", "Kashyap Chitta", "Li Chen", "Hanxue Zhang", "Chengen Xie", "Ping Luo", "Andreas Geiger", "Hongyang Li" ], "externalIds": { "ArXiv": "2312.14150", "DBLP": "journals/corr/abs-2312-14150", "DOI": "10.48550/arXiv.2312.14150", "CorpusId": 266435584 }, "url": "https://www.semanticscholar.org/paper/3c8cc9a5ee373d51e0bf71621b6eb6901c762e8f", "referenceCount": 84, "citationCount": 79, "influentialCitationCount": 9, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "LMDrive: Closed-Loop End-to-End Driving with Large Language Models", "abstract": "Despite significant recent progress in the field of autonomous driving, modern methods still struggle and can incur serious accidents when encountering long-tail unfore-seen events and challenging urban scenarios. On the one hand, large language models (LLM) have shown impres-sive reasoning capabilities that approach “Artificial Gen-eral Intelligence”. On the other hand, previous autonomous driving methods tend to rely on limited-format inputs (e.g., sensor data and navigation waypoints), restricting the vehi-cle's ability to understand language information and inter-act with humans. To this end, this paper introduces LM-Drive, a novel language-guided, end-to-end, closed-loop autonomous driving framework. LMDrive uniquely processes and integrates multimodal sensor data with naturallanguage instructions, enabling interaction with humans and navigation software in realistic instructional settings. To facilitate research in language-based closed-loop autonomous driving, we also publicly release the corresponding dataset which includes approximately 64K instruction-following data clips, and the LangAuto benchmark that tests the system's ability to handle complex instructions and challenging driving scenarios. Extensive closed-loop experiments are conducted to demonstrate LMDrive's effectiveness. To the best of our knowledge, we're the very first work to leverage LLMs for closed-loop end-to-end autonomous driving. Code is available on our webpage.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Hao Shao", "Yuxuan Hu", "Letian Wang", "Steven L. Waslander", "Yu Liu", "Hongsheng Li" ], "externalIds": { "ArXiv": "2312.07488", "DBLP": "journals/corr/abs-2312-07488", "DOI": "10.1109/CVPR52733.2024.01432", "CorpusId": 266174267 }, "url": "https://www.semanticscholar.org/paper/e0b05e314372ed580d9612ef5f0ee672b17ad2e4", "referenceCount": 57, "citationCount": 43, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Reason2Drive: Towards Interpretable and Chain-based Reasoning for Autonomous Driving", "abstract": "Large vision-language models (VLMs) have garnered increasing interest in autonomous driving areas, due to their advanced capabilities in complex reasoning tasks essential for highly autonomous vehicle behavior. Despite their potential, research in autonomous systems is hindered by the lack of datasets with annotated reasoning chains that explain the decision-making processes in driving. To bridge this gap, we present Reason2Drive, a benchmark dataset with over 600K video-text pairs, aimed at facilitating the study of interpretable reasoning in complex driving environments. We distinctly characterize the autonomous driving process as a sequential combination of perception, prediction, and reasoning steps, and the question-answer pairs are automatically collected from a diverse range of open-source outdoor driving datasets, including nuScenes, Waymo and ONCE. Moreover, we introduce a novel aggregated evaluation metric to assess chain-based reasoning performance in autonomous systems, addressing the semantic ambiguities of existing metrics such as BLEU and CIDEr. Based on the proposed benchmark, we conduct experiments to assess various existing VLMs, revealing insights into their reasoning capabilities. Additionally, we develop an efficient approach to empower VLMs to leverage object-level perceptual elements in both feature extraction and prediction, further enhancing their reasoning accuracy. The code and dataset will be released.", "year": 2023, "venue": "arXiv.org", "authors": [ "Ming Nie", "Renyuan Peng", "Chunwei Wang", "Xinyue Cai", "Jianhua Han", "Hang Xu", "Li Zhang" ], "externalIds": { "ArXiv": "2312.03661", "DBLP": "journals/corr/abs-2312-03661", "DOI": "10.48550/arXiv.2312.03661", "CorpusId": 265688025 }, "url": "https://www.semanticscholar.org/paper/98de0a73fc32e04b58d76579aef964cf686b25da", "referenceCount": 51, "citationCount": 19, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Is Ego Status All You Need for Open-Loop End-to-End Autonomous Driving?", "abstract": "End-to-end autonomous driving recently emerged as a promising research direction to target autonomy from a full-stack perspective. Along this line, many of the latest works follow an open-loop evaluation setting on nuScenes to study the planning behavior. In this paper, we delve deeper into the problem by conducting thorough analyses and demystifying more devils in the details. We initially observed that the nuScenes dataset, characterized by relatively simple driving scenarios, leads to an under-utilization of perception information in end-to-end models incorporating ego status, such as the ego vehicle's velocity. These models tend to rely predominantly on the ego vehicle's status for future path planning. Beyond the limitations of the dataset, we also note that current metrics do not comprehensively assess the planning quality, leading to potentially biased conclusions drawn from existing benchmarks. To address this issue, we introduce a new metric to evaluate whether the predicted trajectories adhere to the road. We further propose a simple baseline able to achieve competitive results without relying on perception annotations. Given the current limitations on the benchmark and metrics, we suggest the community reassess relevant prevailing research and be cautious about whether the continued pursuit of state-of-the-art would yield convincing and universal conclusions. Code and models are available at https://github.com/NVlabs/BEV-Planner.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Zhiqi Li", "Zhiding Yu", "Shiyi Lan", "Jiahan Li", "Jan Kautz", "Tong Lu", "José M. Álvarez" ], "externalIds": { "DBLP": "journals/corr/abs-2312-03031", "ArXiv": "2312.03031", "DOI": "10.1109/CVPR52733.2024.01408", "CorpusId": 265664457 }, "url": "https://www.semanticscholar.org/paper/37262eae27907fce421698c66e48e1a996c64f19", "referenceCount": 48, "citationCount": 23, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Dolphins: Multimodal Language Model for Driving", "abstract": "The quest for fully autonomous vehicles (AVs) capable of navigating complex real-world scenarios with human-like understanding and responsiveness. In this paper, we introduce Dolphins, a novel vision-language model architected to imbibe human-like abilities as a conversational driving assistant. Dolphins is adept at processing multimodal inputs comprising video (or image) data, text instructions, and historical control signals to generate informed outputs corresponding to the provided instructions. Building upon the open-sourced pretrained Vision-Language Model, OpenFlamingo, we first enhance Dolphins's reasoning capabilities through an innovative Grounded Chain of Thought (GCoT) process. Then we tailored Dolphins to the driving domain by constructing driving-specific instruction data and conducting instruction tuning. Through the utilization of the BDD-X dataset, we designed and consolidated four distinct AV tasks into Dolphins to foster a holistic understanding of intricate driving scenarios. As a result, the distinctive features of Dolphins are characterized into two dimensions: (1) the ability to provide a comprehensive understanding of complex and long-tailed open-world driving scenarios and solve a spectrum of AV tasks, and (2) the emergence of human-like capabilities including gradient-free instant adaptation via in-context learning and error recovery via reflection.", "year": 2023, "venue": "arXiv.org", "authors": [ "Yingzi Ma", "Yulong Cao", "Jiachen Sun", "Marco Pavone", "Chaowei Xiao" ], "externalIds": { "DBLP": "journals/corr/abs-2312-00438", "ArXiv": "2312.00438", "DOI": "10.48550/arXiv.2312.00438", "CorpusId": 265551475 }, "url": "https://www.semanticscholar.org/paper/c95c4fb96868d6512c32988632a7b101a42c455d", "referenceCount": 75, "citationCount": 27, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "CamRaDepth: Semantic Guided Depth Estimation Using Monocular Camera and Sparse Radar for Automotive Perception", "abstract": "Our research aims to generate robust, dense 3-D depth maps for robotics, especially autonomous driving applications. Since cameras output 2-D images and active sensors such as LiDAR or radar produce sparse depth measurements, dense depth maps need to be estimated. Recent methods based on visual transformer networks have outperformed conventional deep learning approaches in various computer vision tasks, including depth prediction, but have focused on the use of a single camera image. This article explores the potential of visual transformers applied to the fusion of monocular images, semantic segmentation, and projected sparse radar reflections for robust monocular depth estimation. The addition of a semantic segmentation branch is used to add object-level understanding and is investigated in a supervised and unsupervised manner. We evaluate our new depth estimation approach on the nuScenes dataset where it outperforms existing state-of-the-art camera-radar depth estimation methods. We show that models can benefit from an additional segmentation branch during the training process by transfer learning even without running segmentation at inference. Further studies are needed to investigate the usage of 4-D-imaging radars and enhanced ground-truth generation in more detail. The related code is available as open-source software under https://github.com/TUMFTM/CamRaDepth.", "year": 2023, "venue": "IEEE Sensors Journal", "authors": [ "Florian Sauerbeck", "Dan Halperin", "Lukas Connert", "Johannes Betz" ], "externalIds": { "DOI": "10.1109/JSEN.2023.3321886", "CorpusId": 263840401 }, "url": "https://www.semanticscholar.org/paper/a8e668670ce4a4837d346a9879726bece1867c33", "referenceCount": 58, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Drive Anywhere: Generalizable End-to-end Autonomous Driving with Multi-modal Foundation Models", "abstract": "As autonomous driving technology matures, end-to-end methodologies have emerged as a leading strategy, promising seamless integration from perception to control via deep learning. However, existing systems grapple with challenges such as unexpected open set environments and the complexity of black-box models. At the same time, the evolution of deep learning introduces larger, multimodal foundational models, offering multi-modal visual and textual understanding. In this paper, we harness these multimodal foundation models to enhance the robustness and adaptability of autonomous driving systems. We introduce a method to extract nuanced spatial features from transformers and the incorporation of latent space simulation for improved training and policy debugging. We use pixel/patch-aligned feature descriptors to expand foundational model capabilities to create an end-to-end multimodal driving model, demonstrating unparalleled results in diverse tests. Our solution combines language with visual perception and achieves significantly greater robustness on out-of-distribution situations.", "year": 2023, "venue": "IEEE International Conference on Robotics and Automation", "authors": [ "Tsun-Hsuan Wang", "Alaa Maalouf", "Wei Xiao", "Yutong Ban", "Alexander Amini", "G. Rosman", "S. Karaman", "Daniela Rus" ], "externalIds": { "ArXiv": "2310.17642", "DBLP": "conf/icra/WangM0BARKR24", "DOI": "10.1109/ICRA57147.2024.10611590", "CorpusId": 264490392 }, "url": "https://www.semanticscholar.org/paper/84a2719338a1f1db73aaa7b5bd61ca507c63da8e", "referenceCount": 51, "citationCount": 22, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Driving with LLMs: Fusing Object-Level Vector Modality for Explainable Autonomous Driving", "abstract": "Large Language Models (LLMs) have shown promise in the autonomous driving sector, particularly in generalization and interpretability. We introduce a unique objectlevel multimodal LLM architecture that merges vectorized numeric modalities with a pre-trained LLM to improve context understanding in driving situations. We also present a new dataset of 160k QA pairs derived from 10k driving scenarios, paired with high quality control commands collected with RL agent and question answer pairs generated by teacher LLM (GPT-3.5). A distinct pretraining strategy is devised to align numeric vector modalities with static LLM representations using vector captioning language data. We also introduce an evaluation metric for Driving QA and demonstrate our LLM-driver’s proficiency in interpreting driving scenarios, answering questions, and decision-making. Our findings highlight the potential of LLM-based driving action generation in comparison to traditional behavioral cloning. We make our benchmark, datasets, and model available1 for further exploration.", "year": 2023, "venue": "IEEE International Conference on Robotics and Automation", "authors": [ "Long Chen", "Oleg Sinavski", "Jan Hünermann", "Alice Karnsund", "Andrew James Willmott", "Danny Birch", "Daniel Maund", "Jamie Shotton" ], "externalIds": { "ArXiv": "2310.01957", "DBLP": "conf/icra/ChenSHKWBMS24", "DOI": "10.1109/ICRA57147.2024.10611018", "CorpusId": 263608168 }, "url": "https://www.semanticscholar.org/paper/f01ff5acf9e086030c01beda6f433f99013ebbd4", "referenceCount": 52, "citationCount": 95, "influentialCitationCount": 8, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DriveGPT4: Interpretable End-to-End Autonomous Driving Via Large Language Model", "abstract": "Multimodallarge language models (MLLMs) have emerged as a prominent area of interest within the research community, given their proficiency in handling and reasoning with non-textual data, including images and videos. This study seeks to extend the application of MLLMs to the realm of autonomous driving by introducing DriveGPT4, a novel interpretable end-to-end autonomous driving system based on LLMs. Capable of processing multi-frame video inputs and textual queries, DriveGPT4 facilitates the interpretation of vehicle actions, offers pertinent reasoning, and effectively addresses a diverse range of questions posed by users. Furthermore, DriveGPT4 predicts low-level vehicle control signals in an end-to-end fashion. These advanced capabilities are achieved through the utilization of a bespoke visual instruction tuning dataset, specifically tailored for autonomous driving applications, in conjunction with a mix-finetuning training strategy. DriveGPT4 represents the pioneering effort to leverage LLMs for the development of an interpretable end-to-end autonomous driving solution. Evaluations conducted on the BDD-X dataset showcase the superior qualitative and quantitative performance of DriveGPT4. Additionally, the fine-tuning of domain-specific data enables DriveGPT4 to yield close or even improved results in terms of autonomous driving grounding when contrasted with GPT4-V.", "year": 2023, "venue": "IEEE Robotics and Automation Letters", "authors": [ "Zhenhua Xu", "Yujia Zhang", "Enze Xie", "Zhen Zhao", "Yong Guo", "Kwan-Yee. K. Wong", "Zhenguo Li", "Hengshuang Zhao" ], "externalIds": { "ArXiv": "2310.01412", "DBLP": "journals/corr/abs-2310-01412", "DOI": "10.1109/LRA.2024.3440097", "CorpusId": 263605524 }, "url": "https://www.semanticscholar.org/paper/ccd6f8b6544f112de632e49bfbe592a0a654537d", "referenceCount": 63, "citationCount": 120, "influentialCitationCount": 12, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "GPT-Driver: Learning to Drive with GPT", "abstract": "We present a simple yet effective approach that can transform the OpenAI GPT-3.5 model into a reliable motion planner for autonomous vehicles. Motion planning is a core challenge in autonomous driving, aiming to plan a driving trajectory that is safe and comfortable. Existing motion planners predominantly leverage heuristic methods to forecast driving trajectories, yet these approaches demonstrate insufficient generalization capabilities in the face of novel and unseen driving scenarios. In this paper, we propose a novel approach to motion planning that capitalizes on the strong reasoning capabilities and generalization potential inherent to Large Language Models (LLMs). The fundamental insight of our approach is the reformulation of motion planning as a language modeling problem, a perspective not previously explored. Specifically, we represent the planner inputs and outputs as language tokens, and leverage the LLM to generate driving trajectories through a language description of coordinate positions. Furthermore, we propose a novel prompting-reasoning-finetuning strategy to stimulate the numerical reasoning potential of the LLM. With this strategy, the LLM can describe highly precise trajectory coordinates and also its internal decision-making process in natural language. We evaluate our approach on the large-scale nuScenes dataset, and extensive experiments substantiate the effectiveness, generalization ability, and interpretability of our GPT-based motion planner. Code is now available at https://github.com/PointsCoder/GPT-Driver.", "year": 2023, "venue": "arXiv.org", "authors": [ "Jiageng Mao", "Yuxi Qian", "Hang Zhao", "Yue Wang" ], "externalIds": { "DBLP": "journals/corr/abs-2310-01415", "ArXiv": "2310.01415", "DOI": "10.48550/arXiv.2310.01415", "CorpusId": 263605637 }, "url": "https://www.semanticscholar.org/paper/958ed4830ae80a189ecb9b93ab75a6ce2e3926fc", "referenceCount": 43, "citationCount": 123, "influentialCitationCount": 13, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DiLu: A Knowledge-Driven Approach to Autonomous Driving with Large Language Models", "abstract": "Recent advancements in autonomous driving have relied on data-driven approaches, which are widely adopted but face challenges including dataset bias, overfitting, and uninterpretability. Drawing inspiration from the knowledge-driven nature of human driving, we explore the question of how to instill similar capabilities into autonomous driving systems and summarize a paradigm that integrates an interactive environment, a driver agent, as well as a memory component to address this question. Leveraging large language models (LLMs) with emergent abilities, we propose the DiLu framework, which combines a Reasoning and a Reflection module to enable the system to perform decision-making based on common-sense knowledge and evolve continuously. Extensive experiments prove DiLu's capability to accumulate experience and demonstrate a significant advantage in generalization ability over reinforcement learning-based methods. Moreover, DiLu is able to directly acquire experiences from real-world datasets which highlights its potential to be deployed on practical autonomous driving systems. To the best of our knowledge, we are the first to leverage knowledge-driven capability in decision-making for autonomous vehicles. Through the proposed DiLu framework, LLM is strengthened to apply knowledge and to reason causally in the autonomous driving domain. Project page: https://pjlab-adg.github.io/DiLu/", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Licheng Wen", "Daocheng Fu", "Xin Li", "Xinyu Cai", "Tengyu Ma", "Pinlong Cai", "Min Dou", "Botian Shi", "Liang He", "Y. Qiao" ], "externalIds": { "DBLP": "conf/iclr/WenF0C0CDS0024", "ArXiv": "2309.16292", "DOI": "10.48550/arXiv.2309.16292", "CorpusId": 263136146 }, "url": "https://www.semanticscholar.org/paper/3cbfe152220de84ecf8059fa50c47587a3134c86", "referenceCount": 45, "citationCount": 84, "influentialCitationCount": 8, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "End-to-end Autonomous Driving: Challenges and Frontiers", "abstract": "The autonomous driving community has witnessed a rapid growth in approaches that embrace an end-to-end algorithm framework, utilizing raw sensor input to generate vehicle motion plans, instead of concentrating on individual tasks such as detection and motion prediction. End-to-end systems, in comparison to modular pipelines, benefit from joint feature optimization for perception and planning. This field has flourished due to the availability of large-scale datasets, closed-loop evaluation, and the increasing need for autonomous driving algorithms to perform effectively in challenging scenarios. In this survey, we provide a comprehensive analysis of more than 270 papers, covering the motivation, roadmap, methodology, challenges, and future trends in end-to-end autonomous driving. We delve into several critical challenges, including multi-modality, interpretability, causal confusion, robustness, and world models, amongst others. Additionally, we discuss current advancements in foundation models and visual pre-training, as well as how to incorporate these techniques within the end-to-end driving framework.We maintain an active repository that contains up-to-date literature and open-source projects at https://github.com/OpenDriveLab/End-to-end-Autonomous-Driving.", "year": 2023, "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", "authors": [ "Li Chen", "Peng Wu", "Kashyap Chitta", "Bernhard Jaeger", "Andreas Geiger", "Hongyang Li" ], "externalIds": { "DBLP": "journals/corr/abs-2306-16927", "ArXiv": "2306.16927", "DOI": "10.48550/arXiv.2306.16927", "CorpusId": 259287283, "PubMed": "39078757" }, "url": "https://www.semanticscholar.org/paper/318128fa82a15888a5db28341c5c23d1147271f3", "referenceCount": 318, "citationCount": 128, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Parting with Misconceptions about Learning-based Vehicle Motion Planning", "abstract": "The release of nuPlan marks a new era in vehicle motion planning research, offering the first large-scale real-world dataset and evaluation schemes requiring both precise short-term planning and long-horizon ego-forecasting. Existing systems struggle to simultaneously meet both requirements. Indeed, we find that these tasks are fundamentally misaligned and should be addressed independently. We further assess the current state of closed-loop planning in the field, revealing the limitations of learning-based methods in complex real-world scenarios and the value of simple rule-based priors such as centerline selection through lane graph search algorithms. More surprisingly, for the open-loop sub-task, we observe that the best results are achieved when using only this centerline as scene context (i.e., ignoring all information regarding the map and other agents). Combining these insights, we propose an extremely simple and efficient planner which outperforms an extensive set of competitors, winning the nuPlan planning challenge 2023.", "year": 2023, "venue": "Conference on Robot Learning", "authors": [ "D. Dauner", "Marcel Hallgarten", "Andreas Geiger", "Kashyap Chitta" ], "externalIds": { "DBLP": "journals/corr/abs-2306-07962", "ArXiv": "2306.07962", "DOI": "10.48550/arXiv.2306.07962", "CorpusId": 259145009 }, "url": "https://www.semanticscholar.org/paper/34847169f8ec239292abdefc094b60569b4ef5bd", "referenceCount": 82, "citationCount": 64, "influentialCitationCount": 13, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ReasonNet: End-to-End Driving with Temporal and Global Reasoning", "abstract": "The large-scale deployment of autonomous vehicles is yet to come, and one of the major remaining challenges lies in urban dense traffic scenarios. In such cases, it remains challenging to predict the future evolution of the scene and future behaviors of objects, and to deal with rare adverse events such as the sudden appearance of occluded objects. In this paper, we present ReasonNet, a novel end-to-end driving framework that extensively exploits both temporal and global information of the driving scene. By reasoning on the temporal behavior of objects, our method can effectively process the interactions and relationships among features in different frames. Reasoning about the global information of the scene can also improve overall perception performance and benefit the detection of adverse events, especially the anticipation of potential danger from occluded objects. For comprehensive evaluation on occlusion events, we also release publicly a driving simulation benchmark DriveOcclusionSim consisting of diverse occlusion events. We conduct extensive experiments on multiple CARLA benchmarks, where our model outperforms all prior methods, ranking first on the sensor track of the public CARLA Leaderboard [53].", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Hao Shao", "Letian Wang", "Ruobing Chen", "Steven L. Waslander", "Hongsheng Li", "Y. Liu" ], "externalIds": { "DBLP": "journals/corr/abs-2305-10507", "ArXiv": "2305.10507", "DOI": "10.1109/CVPR52729.2023.01319", "CorpusId": 258762894 }, "url": "https://www.semanticscholar.org/paper/fc2f1d2ca7c28e75a258de484892958d1daf53be", "referenceCount": 66, "citationCount": 47, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Rethinking the Open-Loop Evaluation of End-to-End Autonomous Driving in nuScenes", "abstract": "Modern autonomous driving systems are typically divided into three main tasks: perception, prediction, and planning. The planning task involves predicting the trajectory of the ego vehicle based on inputs from both internal intention and the external environment, and manipulating the vehicle accordingly. Most existing works evaluate their performance on the nuScenes dataset using the L2 error and collision rate between the predicted trajectories and the ground truth. In this paper, we reevaluate these existing evaluation metrics and explore whether they accurately measure the superiority of different methods. Specifically, we design an MLP-based method that takes raw sensor data (e.g., past trajectory, velocity, etc.) as input and directly outputs the future trajectory of the ego vehicle, without using any perception or prediction information such as camera images or LiDAR. Our simple method achieves similar end-to-end planning performance on the nuScenes dataset with other perception-based methods, reducing the average L2 error by about 20%. Meanwhile, the perception-based methods have an advantage in terms of collision rate. We further conduct in-depth analysis and provide new insights into the factors that are critical for the success of the planning task on nuScenes dataset. Our observation also indicates that we need to rethink the current open-loop evaluation scheme of end-to-end autonomous driving in nuScenes. Codes are available at https://github.com/E2E-AD/AD-MLP.", "year": 2023, "venue": "arXiv.org", "authors": [ "Jiang-Tian Zhai", "Ze Feng", "Jinhao Du", "Yongqiang Mao", "Jian Liu", "Zichang Tan", "Yifu Zhang", "Xiaoqing Ye", "Jingdong Wang" ], "externalIds": { "DBLP": "journals/corr/abs-2305-10430", "ArXiv": "2305.10430", "DOI": "10.48550/arXiv.2305.10430", "CorpusId": 258741320 }, "url": "https://www.semanticscholar.org/paper/b11dad0a84fa93f6e5e13c31819c782dc05c2fb3", "referenceCount": 18, "citationCount": 33, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "GPT-4 Technical Report", "abstract": "We report the development of GPT-4, a large-scale, multimodal model which can accept image and text inputs and produce text outputs. While less capable than humans in many real-world scenarios, GPT-4 exhibits human-level performance on various professional and academic benchmarks, including passing a simulated bar exam with a score around the top 10% of test takers. GPT-4 is a Transformer-based model pre-trained to predict the next token in a document. The post-training alignment process results in improved performance on measures of factuality and adherence to desired behavior. A core component of this project was developing infrastructure and optimization methods that behave predictably across a wide range of scales. This allowed us to accurately predict some aspects of GPT-4's performance based on models trained with no more than 1/1,000th the compute of GPT-4.", "year": 2023, "venue": "", "authors": [ "OpenAI Josh Achiam", "Steven Adler", "Sandhini Agarwal", "Lama Ahmad", "Ilge Akkaya", "Florencia Leoni Aleman", "Diogo Almeida", "Janko Altenschmidt", "Sam Altman", "Shyamal Anadkat", "Red Avila", "Igor Babuschkin", "S. Balaji", "Valerie Balcom", "Paul Baltescu", "Haim-ing Bao", "Mo Bavarian", "Jeff Belgum", "Irwan Bello", "Jake Berdine", "Gabriel Bernadett-Shapiro", "Christopher Berner", "Lenny Bogdonoff", "Oleg Boiko", "Madelaine Boyd", "Anna-Luisa Brakman", "Greg Brockman", "Tim Brooks", "Miles Brundage", "Kevin Button", "Trevor Cai", "Rosie Campbell", "Andrew Cann", "Brittany Carey", "Chelsea Carlson", "Rory Carmichael", "Brooke Chan", "Che Chang", "Fotis Chantzis", "Derek Chen", "Sully Chen", "Ruby Chen", "Jason Chen", "Mark Chen", "B. Chess", "Chester Cho", "Casey Chu", "Hyung Won Chung", "Dave Cummings", "Jeremiah Currier", "Yunxing Dai", "Cory Decareaux", "Thomas Degry", "Noah Deutsch", "Damien Deville", "Arka Dhar", "David Dohan", "Steve Dowling", "Sheila Dunning", "Adrien Ecoffet", "Atty Eleti", "Tyna Eloundou", "David Farhi", "Liam Fedus", "Niko Felix", "Sim'on Posada Fishman", "Juston Forte", "Is-abella Fulford", "Leo Gao", "Elie Georges", "C. Gibson", "Vik Goel", "Tarun Gogineni", "Gabriel Goh", "Raphael Gontijo-Lopes", "Jonathan Gordon", "Morgan Grafstein", "Scott Gray", "Ryan Greene", "Joshua Gross", "S. Gu", "Yufei Guo", "Chris Hallacy", "Jesse Han", "Jeff Harris", "Yuchen He", "Mike Heaton", "Johannes Heidecke", "Chris Hesse", "Alan Hickey", "Wade Hickey", "Peter Hoeschele", "Brandon Houghton", "Kenny Hsu", "Shengli Hu", "Xin Hu", "Joost Huizinga", "Shantanu Jain", "Shawn Jain", "Joanne Jang", "Angela Jiang", "Roger Jiang", "Haozhun Jin", "Denny Jin", "Shino Jomoto", "B. Jonn", "Heewoo Jun", "Tomer Kaftan", "Lukasz Kaiser", "Ali Kamali", "I. Kanitscheider", "N. Keskar", "Tabarak Khan", "Logan Kilpatrick", "Jong Wook Kim", "Christina Kim", "Yongjik Kim", "Hendrik Kirchner", "J. Kiros", "Matthew Knight", "Daniel Kokotajlo", "Lukasz Kondraciuk", "A. Kondrich", "Aris Konstantinidis", "Kyle Kosic", "Gretchen Krueger", "Vishal Kuo", "Michael Lampe", "Ikai Lan", "Teddy Lee", "J. Leike", "Jade Leung", "Daniel Levy", "Chak Ming Li", "Rachel Lim", "Molly Lin", "Stephanie Lin", "Ma-teusz Litwin", "Theresa Lopez", "Ryan Lowe", "Patricia Lue", "A. Makanju", "Kim Malfacini", "Sam Manning", "Todor Markov", "Yaniv Markovski", "Bianca Martin", "Katie Mayer", "Andrew Mayne", "Bob McGrew", "S. McKinney", "C. McLeavey", "Paul McMillan", "Jake McNeil", "David Medina", "Aalok Mehta", "Jacob Menick", "Luke Metz", "Andrey Mishchenko", "Pamela Mishkin", "Vinnie Monaco", "Evan Morikawa", "Daniel P. Mossing", "Tong Mu", "Mira Murati", "O. Murk", "David M'ely", "Ashvin Nair", "Reiichiro Nakano", "Rajeev Nayak", "Arvind Neelakantan", "Richard Ngo", "Hyeonwoo Noh", "Ouyang Long", "Cullen O'Keefe", "J. Pachocki", "Alex Paino", "Joe Palermo", "Ashley Pantuliano", "Giambattista Parascandolo", "Joel Parish", "Emy Parparita", "Alexandre Passos", "Mikhail Pavlov", "Andrew Peng", "Adam Perelman", "Filipe de Avila Belbute Peres", "Michael Petrov", "Henrique Pondé de Oliveira Pinto", "Michael Pokorny", "Michelle Pokrass", "Vitchyr H. Pong", "Tolly Powell", "Alethea Power", "Boris Power", "Elizabeth Proehl", "Raul Puri", "Alec Radford", "Jack W. Rae", "Aditya Ramesh", "Cameron Raymond", "Francis Real", "Kendra Rimbach", "Carl Ross", "Bob Rotsted", "Henri Roussez", "Nick Ryder", "M. Saltarelli", "Ted Sanders", "Shibani Santurkar", "Girish Sastry", "Heather Schmidt", "David Schnurr", "John Schulman", "Daniel Selsam", "Kyla Sheppard", "Toki Sherbakov", "Jessica Shieh", "Sarah Shoker", "Pranav Shyam", "Szymon Sidor", "Eric Sigler", "Maddie Simens", "Jordan Sitkin", "Katarina Slama", "Ian Sohl", "Benjamin D. Sokolowsky", "Yang Song", "Natalie Staudacher", "F. Such", "Natalie Summers", "I. Sutskever", "Jie Tang", "N. Tezak", "Madeleine Thompson", "Phil Tillet", "Amin Tootoonchian", "Elizabeth Tseng", "Preston Tuggle", "Nick Turley", "Jerry Tworek", "Juan Felipe Cer'on Uribe", "Andrea Vallone", "Arun Vijayvergiya", "Chelsea Voss", "Carroll L. Wainwright", "Justin Jay Wang", "Alvin Wang", "Ben Wang", "Jonathan Ward", "Jason Wei", "CJ Weinmann", "Akila Welihinda", "P. Welinder", "Jiayi Weng", "Lilian Weng", "Matt Wiethoff", "Dave Willner", "Clemens Winter", "Samuel Wolrich", "Hannah Wong", "Lauren Workman", "Sherwin Wu", "Jeff Wu", "Michael Wu", "Kai Xiao", "Tao Xu", "Sarah Yoo", "Kevin Yu", "Qim-ing Yuan", "Wojciech Zaremba", "Rowan Zellers", "Chong Zhang", "Marvin Zhang", "Shengjia Zhao", "Tianhao Zheng", "Juntang Zhuang", "William Zhuk", "Barret Zoph" ], "externalIds": { "ArXiv": "2303.08774", "CorpusId": 257532815 }, "url": "https://www.semanticscholar.org/paper/163b4d6a79a5b19af88b8585456363340d9efd04", "referenceCount": 0, "citationCount": 7054, "influentialCitationCount": 1038, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MSeg3D: Multi-Modal 3D Semantic Segmentation for Autonomous Driving", "abstract": "LiDAR and camera are two modalities available for 3D semantic segmentation in autonomous driving. The popular LiDAR-only methods severely suffer from inferior segmentation on small and distant objects due to insufficient laser points, while the robust multi-modal solution is under-explored, where we investigate three crucial inherent difficulties: modality heterogeneity, limited sensor field of view intersection, and multi-modal data augmentation. We propose a multi-modal 3D semantic segmentation model (MSeg3D) with joint intra-modal feature extraction and inter-modal feature fusion to mitigate the modality heterogeneity. The multi-modal fusion in MSeg3D consists of geometry-based feature fusion GF-Phase, cross-modal feature completion, and semantic-based feature fusion SF-Phase on all visible points. The multi-modal data augmentation is reinvigorated by applying asymmetric transformations on LiDAR point cloud and multi-camera images individually, which benefits the model training with diversified augmentation transformations. MSeg3D achieves state-of-the-art results on nuScenes, Waymo, and SemanticKITTI datasets. Under the malfunctioning multi-camera input and the multi-frame point clouds input, MSeg3D still shows robustness and improves the LiDAR-only baseline. Our code is publicly available at https://github.com/jialeli1/lidarseg3d.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Jiale Li", "Hang Dai", "Hao Han", "Yong Ding" ], "externalIds": { "DBLP": "journals/corr/abs-2303-08600", "ArXiv": "2303.08600", "DOI": "10.1109/CVPR52729.2023.02078", "CorpusId": 257532589 }, "url": "https://www.semanticscholar.org/paper/0c2fb6f568ece453248f39e48bf58fc33fce5537", "referenceCount": 76, "citationCount": 42, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "GameFormer: Game-theoretic Modeling and Learning of Transformer-based Interactive Prediction and Planning for Autonomous Driving", "abstract": "Autonomous vehicles operating in complex real-world environments require accurate predictions of interactive behaviors between traffic participants. This paper tackles the interaction prediction problem by formulating it with hierarchical game theory and proposing the GameFormer model for its implementation. The model incorporates a Transformer encoder, which effectively models the relationships between scene elements, alongside a novel hierarchical Transformer decoder structure. At each decoding level, the decoder utilizes the prediction outcomes from the previous level, in addition to the shared environmental context, to iteratively refine the interaction process. Moreover, we propose a learning process that regulates an agent’s behavior at the current level to respond to other agents’ behaviors from the preceding level. Through comprehensive experiments on large-scale real-world driving datasets, we demonstrate the state-of-the-art accuracy of our model on the Waymo interaction prediction task. Additionally, we validate the model’s capacity to jointly reason about the motion plan of the ego agent and the behaviors of multiple agents in both open-loop and closed-loop planning tests, outperforming various baseline methods. Furthermore, we evaluate the efficacy of our model on the nuPlan planning benchmark, where it achieves leading performance. Project website: https://mczhi.github.io/GameFormer/", "year": 2023, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Zhiyu Huang", "Haochen Liu", "Chen Lv" ], "externalIds": { "DBLP": "journals/corr/abs-2303-05760", "ArXiv": "2303.05760", "DOI": "10.1109/ICCV51070.2023.00361", "CorpusId": 257482793 }, "url": "https://www.semanticscholar.org/paper/4c667a69a3d788e4ddbaf900dd36b78d845fd287", "referenceCount": 51, "citationCount": 54, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Planning-oriented Autonomous Driving", "abstract": "Modern autonomous driving system is characterized as modular tasks in sequential order, i.e., perception, prediction, and planning. In order to perform a wide diversity of tasks and achieve advanced-level intelligence, contemporary approaches either deploy standalone models for individual tasks, or design a multi-task paradigm with separate heads. However, they might suffer from accumulative errors or deficient task coordination. Instead, we argue that a favorable framework should be devised and optimized in pursuit of the ultimate goal, i.e., planning of the self-driving car. Oriented at this, we revisit the key components within perception and prediction, and prioritize the tasks such that all these tasks contribute to planning. We introduce Unified Autonomous Driving (UniAD), a comprehensive framework up-to-date that incorporates full-stack driving tasks in one network. It is exquisitely devised to leverage advantages of each module, and provide complementary feature abstractions for agent interaction from a global perspective. Tasks are communicated with unified query interfaces to facilitate each other toward planning. We instantiate UniAD on the challenging nuScenes benchmark. With extensive ablations, the effectiveness of using such a philosophy is proven by substantially outperforming previous state-of-the-arts in all aspects. Code and models are public.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Yi Hu", "Jiazhi Yang", "Li Chen", "Keyu Li", "Chonghao Sima", "Xizhou Zhu", "Siqi Chai", "Senyao Du", "Tianwei Lin", "Wen Wang", "Lewei Lu", "Xiaosong Jia", "Qiang Liu", "Jifeng Dai", "Yu Qiao", "Hongyang Li" ], "externalIds": { "DBLP": "conf/cvpr/HuYCLSZCDLWLJLD23", "ArXiv": "2212.10156", "DOI": "10.1109/CVPR52729.2023.01712", "CorpusId": 257687420 }, "url": "https://www.semanticscholar.org/paper/fdd7d5b0f6b8641c356e170fd264cd11f70ba657", "referenceCount": 112, "citationCount": 357, "influentialCitationCount": 40, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "BEVFormer: Learning Bird's-Eye-View Representation from Multi-Camera Images via Spatiotemporal Transformers", "abstract": "3D visual perception tasks, including 3D detection and map segmentation based on multi-camera images, are essential for autonomous driving systems. In this work, we present a new framework termed BEVFormer, which learns unified BEV representations with spatiotemporal transformers to support multiple autonomous driving perception tasks. In a nutshell, BEVFormer exploits both spatial and temporal information by interacting with spatial and temporal space through predefined grid-shaped BEV queries. To aggregate spatial information, we design spatial cross-attention that each BEV query extracts the spatial features from the regions of interest across camera views. For temporal information, we propose temporal self-attention to recurrently fuse the history BEV information. Our approach achieves the new state-of-the-art 56.9\\% in terms of NDS metric on the nuScenes \\texttt{test} set, which is 9.0 points higher than previous best arts and on par with the performance of LiDAR-based baselines. We further show that BEVFormer remarkably improves the accuracy of velocity estimation and recall of objects under low visibility conditions. The code is available at \\url{https://github.com/zhiqi-li/BEVFormer}.", "year": 2022, "venue": "European Conference on Computer Vision", "authors": [ "Zhiqi Li", "Wenhai Wang", "Hongyang Li", "Enze Xie", "Chonghao Sima", "Tong Lu", "Qiao Yu", "Jifeng Dai" ], "externalIds": { "ArXiv": "2203.17270", "DBLP": "conf/eccv/LiWLXSLQD22", "DOI": "10.48550/arXiv.2203.17270", "CorpusId": 247839336 }, "url": "https://www.semanticscholar.org/paper/a824c6e214dd0118f70af8bb05d67d94a858d076", "referenceCount": 57, "citationCount": 890, "influentialCitationCount": 231, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "AUTO-DISCERN: Autonomous Driving Using Common Sense Reasoning", "abstract": "Driving an automobile involves the tasks of observing surroundings, then making a driving decision based on these observations (steer, brake, coast, etc.). In autonomous driving, all these tasks have to be automated. Autonomous driving technology thus far has relied primarily on machine learning techniques. We argue that appropriate technology should be used for the appropriate task. That is, while machine learning technology is good for observing and automatically understanding the surroundings of an automobile, driving decisions are better automated via commonsense reasoning rather than machine learning. In this paper, we discuss (i) how commonsense reasoning can be automated using answer set programming (ASP) and the goal-directed s(CASP) ASP system, and (ii) develop the AUTO-DISCERN system using this technology for automating decision-making in driving. The goal of our research, described in this paper, is to develop an autonomous driving system that works by simulating the mind of a human driver. Since driving decisions are based on human-style reasoning, they are explainable, their ethics can be ensured, and they will always be correct, provided the system modeling and system inputs are correct.", "year": 2021, "venue": "ICLP Workshops", "authors": [ "S. Kothawade", "Vinaya Khandelwal", "Kinjal Basu", "Huaduo Wang", "Gopal Gupta" ], "externalIds": { "DBLP": "journals/corr/abs-2110-13606", "ArXiv": "2110.13606", "CorpusId": 239885415 }, "url": "https://www.semanticscholar.org/paper/0b246c95bfb86d08d7f08660b382bd69edf39b73", "referenceCount": 34, "citationCount": 18, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Urban Driver: Learning to Drive from Real-world Demonstrations Using Policy Gradients", "abstract": "In this work we are the first to present an offline policy gradient method for learning imitative policies for complex urban driving from a large corpus of real-world demonstrations. This is achieved by building a differentiable data-driven simulator on top of perception outputs and high-fidelity HD maps of the area. It allows us to synthesize new driving experiences from existing demonstrations using mid-level representations. Using this simulator we then train a policy network in closed-loop employing policy gradients. We train our proposed method on 100 hours of expert demonstrations on urban roads and show that it learns complex driving policies that generalize well and can perform a variety of driving maneuvers. We demonstrate this in simulation as well as deploy our model to self-driving vehicles in the real-world. Our method outperforms previously demonstrated state-of-the-art for urban driving scenarios -- all this without the need for complex state perturbations or collecting additional on-policy data during training. We make code and data publicly available.", "year": 2021, "venue": "Conference on Robot Learning", "authors": [ "Oliver Scheel", "Luca Bergamini", "Maciej Wołczyk", "Bla.zej Osi'nski", "Peter Ondruska" ], "externalIds": { "ArXiv": "2109.13333", "DBLP": "journals/corr/abs-2109-13333", "CorpusId": 237263814 }, "url": "https://www.semanticscholar.org/paper/6e6954001901c730ef70bf6be76dbb975cb3d088", "referenceCount": 47, "citationCount": 82, "influentialCitationCount": 12, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "nuPlan: A closed-loop ML-based planning benchmark for autonomous vehicles", "abstract": "In this work, we propose the world's first closed-loop ML-based planning benchmark for autonomous driving. While there is a growing body of ML-based motion planners, the lack of established datasets and metrics has limited the progress in this area. Existing benchmarks for autonomous vehicle motion prediction have focused on short-term motion forecasting, rather than long-term planning. This has led previous works to use open-loop evaluation with L2-based metrics, which are not suitable for fairly evaluating long-term planning. Our benchmark overcomes these limitations by introducing a large-scale driving dataset, lightweight closed-loop simulator, and motion-planning-specific metrics. We provide a high-quality dataset with 1500h of human driving data from 4 cities across the US and Asia with widely varying traffic patterns (Boston, Pittsburgh, Las Vegas and Singapore). We will provide a closed-loop simulation framework with reactive agents and provide a large set of both general and scenario-specific planning metrics. We plan to release the dataset at NeurIPS 2021 and organize benchmark challenges starting in early 2022.", "year": 2021, "venue": "arXiv.org", "authors": [ "Holger Caesar", "Juraj Kabzan", "Kok Seang Tan", "Whye Kit Fong", "Eric M. Wolff", "A. Lang", "L. Fletcher", "Oscar Beijbom", "Sammy Omari" ], "externalIds": { "DBLP": "journals/corr/abs-2106-11810", "ArXiv": "2106.11810", "CorpusId": 235593036 }, "url": "https://www.semanticscholar.org/paper/b88b38ec61a4881173ab94647d1e97500f4af15b", "referenceCount": 23, "citationCount": 183, "influentialCitationCount": 29, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Drivers’ Attention Strategies before Eyes-off-Road in Different Traffic Scenarios: Adaptation and Anticipation", "abstract": "The distribution of drivers’ visual attention prior to diverting focus from the driving task is critical for safety. The object of this study is to investigate drivers’ attention strategy before they occlude their vision for different durations under different driving scenarios. A total of 3 (scenarios) × 3 (durations) within-subjects design was applied. Twenty-three participants completed three durations of occlusion (0, 1, and 2 s) test drive in a motion-based driving simulator under three scenarios (urban, rural, motorway). Drivers’ occlusion behaviour, driving behaviour, and visual behaviour in 6 s before occlusion was analyzed and compared. The results showed that drivers tended to slow down and increased their attention on driving task to keep safety in occlusion 2 s condition. The distribution of attention differed among different driving scenarios and occlusion durations. More attention was directed to Forward position and Speedometer in occlusion conditions, and a strong shift in attention from Forward position to Road users and Speedometer was found in occlusion 2 s condition. Road users was glanced more frequently in urban road with a higher percentage of attention transitions from Forward position to Road users. While gaze switching to Speedometer with a higher intensity was found on motorway. It suggests that drivers could adapt their visual attention to driving demand and anticipate the development of upcoming situations by sampling enough driving-related information before eyes-off-road. Moreover, the adaptation and anticipation are in accordance with driving situation and expected eyes-off-road duration. Better knowledge about attentional strategies before attention away from road contributes to more efficient and safe interaction with additional tasks.", "year": 2021, "venue": "International Journal of Environmental Research and Public Health", "authors": [ "Zhuofan Liu", "W. Yuan", "Yong Ma" ], "externalIds": { "PubMedCentral": "8038146", "DOI": "10.3390/ijerph18073716", "CorpusId": 233208807, "PubMed": "33918239" }, "url": "https://www.semanticscholar.org/paper/cad0e4fa4df42c48f59efd3ca769ca81cb51cfd4", "referenceCount": 39, "citationCount": 7, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "MotionNet: Joint Perception and Motion Prediction for Autonomous Driving Based on Bird’s Eye View Maps", "abstract": "The ability to reliably perceive the environmental states, particularly the existence of objects and their motion behavior, is crucial for autonomous driving. In this work, we propose an efficient deep model, called MotionNet, to jointly perform perception and motion prediction from 3D point clouds. MotionNet takes a sequence of LiDAR sweeps as input and outputs a bird's eye view (BEV) map, which encodes the object category and motion information in each grid cell. The backbone of MotionNet is a novel spatio-temporal pyramid network, which extracts deep spatial and temporal features in a hierarchical fashion. To enforce the smoothness of predictions over both space and time, the training of MotionNet is further regularized with novel spatial and temporal consistency losses. Extensive experiments show that the proposed method overall outperforms the state-of-the-arts, including the latest scene-flow- and 3D-object-detection-based methods. This indicates the potential value of the proposed method serving as a backup to the bounding-box-based system, and providing complementary information to the motion planner in autonomous driving. Code is available at https://www.merl.com/research/license#MotionNet.", "year": 2020, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Pengxiang Wu", "Siheng Chen", "Dimitris N. Metaxas" ], "externalIds": { "MAG": "3034295100", "DBLP": "journals/corr/abs-2003-06754", "ArXiv": "2003.06754", "DOI": "10.1109/cvpr42600.2020.01140", "CorpusId": 212725237 }, "url": "https://www.semanticscholar.org/paper/5e84232f179034b039bfc4d1dae3c91c1a50bfa2", "referenceCount": 69, "citationCount": 134, "influentialCitationCount": 17, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Spatiotemporal Relationship Reasoning for Pedestrian Intent Prediction", "abstract": "Reasoning over visual data is a desirable capability for robotics and vision-based applications. Such reasoning enables forecasting the next events or actions in videos. In recent years, various models have been developed based on convolution operations for prediction or forecasting, but they lack the ability to reason over spatiotemporal data and infer the relationships of different objects in the scene. In this letter, we present a framework based on graph convolution to uncover the spatiotemporal relationships in the scene for reasoning about pedestrian intent. A scene graph is built on top of segmented object instances within and across video frames. Pedestrian intent, defined as the future action of crossing or not-crossing the street, is very crucial piece of information for autonomous vehicles to navigate safely and more smoothly. We approach the problem of intent prediction from two different perspectives and anticipate the intention-to-cross within both pedestrian-centric and location-centric scenarios. In addition, we introduce a new dataset designed specifically for autonomous-driving scenarios in areas with dense pedestrian populations: the Stanford-TRI Intent Prediction (STIP) dataset. Our experiments on STIP and another benchmark dataset show that our graph modeling framework is able to predict the intention-to-cross of the pedestrians with an accuracy of 79.10% on STIP and 79.28% on Joint Attention for Autonomous Driving (JAAD) dataset up to one second earlier than when the actual crossing happens. These results outperform baseline and previous work. Please refer to http://stip.stanford.edu/ for the dataset and code.", "year": 2020, "venue": "IEEE Robotics and Automation Letters", "authors": [ "Bingbin Liu", "E. Adeli", "Zhangjie Cao", "Kuan-Hui Lee", "Abhijeet Shenoi", "Adrien Gaidon", "Juan Carlos Niebles" ], "externalIds": { "MAG": "3008700642", "DBLP": "journals/ral/LiuACLSGN20", "ArXiv": "2002.08945", "DOI": "10.1109/LRA.2020.2976305", "CorpusId": 211205148 }, "url": "https://www.semanticscholar.org/paper/8a6196e1c2e0da3f63f600f10f8d6eaa8c9051ac", "referenceCount": 80, "citationCount": 122, "influentialCitationCount": 18, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Spatiotemporal Motion Planning with Combinatorial Reasoning for Autonomous Driving", "abstract": "Motion planning for urban environments with numerous moving agents can be viewed as a combinatorial problem. With passing an obstacle before, after, right or left, there are multiple options an autonomous vehicle could choose to execute. These combinatorial aspects need to be taken into account in the planning framework. We address this problem by proposing a novel planning approach that combines trajectory planning and maneuver reasoning. We define a classification for dynamic obstacles along a reference curve that allows us to extract tactical decision sequences. We separate longitudinal and lateral movement to speed up the optimization-based trajectory planning. To map the set of obtained trajectories to maneuver variants, we define a semantic language to describe them. This allows us to choose an optimal trajectory while also ensuring maneuver consistency over time. We demonstrate the capabilities of our approach for a scenario that is still widely considered to be challenging.", "year": 2018, "venue": "International Conference on Intelligent Transportation Systems", "authors": [ "Klemens Esterle", "Patrick Hart", "Julian Bernhard", "A. Knoll" ], "externalIds": { "MAG": "2892001109", "DBLP": "conf/itsc/EsterleHBK18", "ArXiv": "2207.04418", "DOI": "10.1109/ITSC.2018.8570003", "CorpusId": 54465187 }, "url": "https://www.semanticscholar.org/paper/34d9197fb03910ab91badfe1f7549498fe4263e0", "referenceCount": 17, "citationCount": 16, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "On Offline Evaluation of Vision-based Driving Models", "abstract": null, "year": 2018, "venue": "European Conference on Computer Vision", "authors": [ "Felipe Codevilla", "Antonio M. López", "V. Koltun", "Alexey Dosovitskiy" ], "externalIds": { "ArXiv": "1809.04843", "MAG": "2949905850", "DBLP": "conf/eccv/CodevillaLKD18", "DOI": "10.1007/978-3-030-01267-0_15", "CorpusId": 52035171 }, "url": "https://www.semanticscholar.org/paper/206c0d1c6cb90b3914a25de04c5bb17182f6696b", "referenceCount": 28, "citationCount": 96, "influentialCitationCount": 11, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Building Machines that Learn and Think Like People", "abstract": "Recent successes in artificial intelligence and machine learning have been largely driven by methods for sophisticated pattern recognition, including deep neural networks and other data-intensive methods. But human intelligence is more than just pattern recognition. And no machine system yet built has anything like the flexible, general-purpose commonsense grasp of the world that we can see in even a one-year-old human infant. I will consider how we might capture the basic learning and thinking abilities humans possess from early childhood, as one route to building more human-like forms of machine learning and thinking. At the heart of human common sense is our ability to model the physical and social environment around us: to explain and understand what we see, to imagine things we could see but haven't yet, to solve problems and plan actions to make these things real, and to build new models as we learn more about the world. I will focus on our recent work reverse-engineering these capacities using methods from probabilistic programming, program induction and program synthesis, which together with deep learning methods and video game simulation engines, provide a toolkit for the joint enterprise of modeling human intelligence and making AI systems smarter in more human-like ways.", "year": 2018, "venue": "Adaptive Agents and Multi-Agent Systems", "authors": [ "J. Tenenbaum" ], "externalIds": { "MAG": "2963305465", "DBLP": "conf/atal/Tenenbaum18", "CorpusId": 260496023 }, "url": "https://www.semanticscholar.org/paper/83040001210751239553269727b9ea53e152af71", "referenceCount": 196, "citationCount": 1408, "influentialCitationCount": 67, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Minimum Required Attention: A Human-Centered Approach to Driver Inattention", "abstract": "Objective: To propose a driver attention theory based on the notion of driving as a satisficing and partially self-paced task and, within this framework, present a definition for driver inattention. Background: Many definitions of driver inattention and distraction have been proposed, but they are difficult to operationalize, and they are either unreasonably strict and inflexible or suffer from hindsight bias. Method: Existing definitions of driver distraction are reviewed and their shortcomings identified. We then present the minimum required attention (MiRA) theory to overcome these shortcomings. Suggestions on how to operationalize MiRA are also presented. Results: MiRA describes which role the attention of the driver plays in the shared “situation awareness of the traffic system.” A driver is considered attentive when sampling sufficient information to meet the demands of the system, namely, that he or she fulfills the preconditions to be able to form and maintain a good enough mental representation of the situation. A driver should only be considered inattentive when information sampling is not sufficient, regardless of whether the driver is concurrently executing an additional task or not. Conclusions: The MiRA theory builds on well-established driver attention theories. It goes beyond available driver distraction definitions by first defining what a driver needs to be attentive to, being free from hindsight bias, and allowing the driver to adapt to the current demands of the traffic situation through satisficing and self-pacing. MiRA has the potential to provide the stepping stone for unbiased and operationalizable inattention detection and classification.", "year": 2017, "venue": "Hum. Factors", "authors": [ "K. Kircher", "Christer Ahlström" ], "externalIds": { "MAG": "2530791092", "DBLP": "journals/hf/KircherA17", "DOI": "10.1177/0018720816672756", "CorpusId": 3777021, "PubMed": "27738279" }, "url": "https://www.semanticscholar.org/paper/541e8c6df01e27c5822c4ee04aa554d4c36ab786", "referenceCount": 78, "citationCount": 78, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Psychology", "Medicine", "Computer Science" ] }, { "title": "MultiNet: Real-time Joint Semantic Reasoning for Autonomous Driving", "abstract": "While most approaches to semantic reasoning have focused on improving performance, in this paper we argue that computational times are very important in order to enable real time applications such as autonomous driving. Towards this goal, we present an approach to joint classification, detection and semantic segmentation using a unified architecture where the encoder is shared amongst the three tasks. Our approach is very simple, can be trained end-to-end and performs extremely well in the challenging KITTI dataset. Our approach is also very efficient, allowing us to perform inference at more then 23 frames per second. Training scripts and trained weights to reproduce our results can be found here: https://github.com/MarvinTeichmann/MultiNet", "year": 2016, "venue": "2018 IEEE Intelligent Vehicles Symposium (IV)", "authors": [ "Marvin Teichmann", "Michael Weber", "Johann Marius Zöllner", "R. Cipolla", "R. Urtasun" ], "externalIds": { "DBLP": "journals/corr/TeichmannWZCU16", "MAG": "2951916398", "ArXiv": "1612.07695", "DOI": "10.1109/IVS.2018.8500504", "CorpusId": 5064446 }, "url": "https://www.semanticscholar.org/paper/394b103969eac939c2dfb7ef6de518a4f57679b6", "referenceCount": 68, "citationCount": 659, "influentialCitationCount": 51, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Mastering the game of Go with deep neural networks and tree search", "abstract": null, "year": 2016, "venue": "Nature", "authors": [ "David Silver", "Aja Huang", "Chris J. Maddison", "A. Guez", "L. Sifre", "George van den Driessche", "Julian Schrittwieser", "Ioannis Antonoglou", "Vedavyas Panneershelvam", "Marc Lanctot", "S. Dieleman", "Dominik Grewe", "John Nham", "Nal Kalchbrenner", "I. Sutskever", "T. Lillicrap", "M. Leach", "K. Kavukcuoglu", "T. Graepel", "D. Hassabis" ], "externalIds": { "DBLP": "journals/nature/SilverHMGSDSAPL16", "MAG": "2257979135", "DOI": "10.1038/nature16961", "CorpusId": 515925, "PubMed": "26819042" }, "url": "https://www.semanticscholar.org/paper/846aedd869a00c09b40f1f1f35673cb22bc87490", "referenceCount": 72, "citationCount": 15882, "influentialCitationCount": 553, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Human-level control through deep reinforcement learning", "abstract": null, "year": 2015, "venue": "Nature", "authors": [ "Volodymyr Mnih", "K. Kavukcuoglu", "David Silver", "Andrei A. Rusu", "J. Veness", "Marc G. Bellemare", "Alex Graves", "Martin A. Riedmiller", "A. Fidjeland", "Georg Ostrovski", "Stig Petersen", "Charlie Beattie", "Amir Sadik", "Ioannis Antonoglou", "Helen King", "D. Kumaran", "Daan Wierstra", "S. Legg", "D. Hassabis" ], "externalIds": { "DBLP": "journals/nature/MnihKSRVBGRFOPB15", "MAG": "2145339207", "DOI": "10.1038/nature14236", "CorpusId": 205242740, "PubMed": "25719670" }, "url": "https://www.semanticscholar.org/paper/340f48901f72278f6bf78a04ee5b01df208cc508", "referenceCount": 37, "citationCount": 25075, "influentialCitationCount": 3030, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Evolution of the human brain: when bigger is better", "abstract": "Comparative studies of the brain in mammals suggest that there are general architectural principles governing its growth and evolutionary development. We are beginning to understand the geometric, biophysical and energy constraints that have governed the evolution and functional organization of the brain and its underlying neuronal network. The object of this review is to present current perspectives on primate brain evolution, especially in humans, and to examine some hypothetical organizing principles that underlie the brain's complex organization. Some of the design principles and operational modes that underlie the information processing capacity of the cerebral cortex in primates will be explored. It is shown that the development of the cortex coordinates folding with connectivity in a way that produces smaller and faster brains, then otherwise would have been possible. In view of the central importance placed on brain evolution in explaining the success of our own species, one may wonder whether there are physical limits that constrain its processing power and evolutionary potential. It will be argued that at a brain size of about 3500 cm3, corresponding to a brain volume two to three times that of modern man, the brain seems to reach its maximum processing capacity. The larger the brain grows beyond this critical size, the less efficient it will become, thus limiting any improvement in cognitive power.", "year": 2014, "venue": "Frontiers in Neuroanatomy", "authors": [ "M. Hofman" ], "externalIds": { "PubMedCentral": "3973910", "MAG": "1993889841", "DOI": "10.3389/fnana.2014.00015", "CorpusId": 2804655, "PubMed": "24723857" }, "url": "https://www.semanticscholar.org/paper/cae12791eb49b9044994489942b0c646ff96f54f", "referenceCount": 127, "citationCount": 228, "influentialCitationCount": 13, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Congested traffic states in empirical observations and microscopic simulations", "abstract": "We present data from several German freeways showing different kinds of congested traffic forming near road inhomogeneities, specifically lane closings, intersections, or uphill gradients. The states are localized or extended, homogeneous or oscillating. Combined states are observed as well, like the coexistence of moving localized clusters and clusters pinned at road inhomogeneities, or regions of oscillating congested traffic upstream of nearly homogeneous congested traffic. The experimental findings are consistent with a recently proposed theoretical phase diagram for traffic near on-ramps [D. Helbing, A. Hennecke, and M. Treiber, Phys. Rev. Lett. 82, 4360 (1999)]. We simulate these situations with a continuous microscopic single-lane model, the \"intelligent driver model,\" using empirical boundary conditions. All observations, including the coexistence of states, are qualitatively reproduced by describing inhomogeneities with local variations of one model parameter. We show that the results of the microscopic model can be understood by formulating the theoretical phase diagram for bottlenecks in a more general way. In particular, a local drop of the road capacity induced by parameter variations has essentially the same effect as an on-ramp.", "year": 2000, "venue": "Physical review. E, Statistical physics, plasmas, fluids, and related interdisciplinary topics", "authors": [ "M. Treiber", "Ansgar Hennecke", "D. Helbing" ], "externalIds": { "ArXiv": "cond-mat/0002177", "MAG": "1965455100", "DOI": "10.1103/PhysRevE.62.1805", "CorpusId": 1100293, "PubMed": "11088643" }, "url": "https://www.semanticscholar.org/paper/5d3b5d2c6b1775b575b484296cd7db67fbbfdbe4", "referenceCount": 29, "citationCount": 3636, "influentialCitationCount": 490, "isOpenAccess": true, "fieldsOfStudy": [ "Physics", "Medicine" ] }, { "title": "Driver distraction: the effects of concurrent in-vehicle tasks, road environment complexity and age on driving performance.", "abstract": null, "year": 2006, "venue": "Accident Analysis and Prevention", "authors": [ "T. Horberry", "Janet Anderson", "M. Regan", "T. Triggs", "John Brown" ], "externalIds": { "MAG": "2003540033", "DOI": "10.1016/J.AAP.2005.09.007", "CorpusId": 14780080, "PubMed": "16226211" }, "url": "https://www.semanticscholar.org/paper/2d720af9d3cec7085bd00dc1d503b833f722283e", "referenceCount": 13, "citationCount": 720, "influentialCitationCount": 28, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine", "Engineering" ] } ] }, "An Adversarial Perspective on Machine Unlearning for AI Safety": { "paper_title": "An Adversarial Perspective on Machine Unlearning for AI Safety", "arxiv_id": "2409.18025v1", "keyword": "large language model", "authors": [ "Jakub Łucki", "Boyi Wei", "Yangsibo Huang", "Peter Henderson", "Florian Tramèr", "Javier Rando" ], "references": [ { "title": "On Effects of Steering Latent Representation for Large Language Model Unlearning", "abstract": "Representation Misdirection for Unlearning (RMU), which steers model representation in the intermediate layer to a target random representation, is an effective method for large language model (LLM) unlearning. Despite its high performance, the underlying cause and explanation remain underexplored. In this paper, we first theoretically demonstrate that steering forget representations in the intermediate layer reduces token confidence, causing LLMs to generate wrong or nonsense responses. Second, we investigate how the coefficient influences the alignment of forget-sample representations with the random direction and hint at the optimal coefficient values for effective unlearning across different network layers. Third, we show that RMU unlearned models are robust against adversarial jailbreak attacks. Last, our empirical analysis shows that RMU is less effective when applied to the middle and later layers in LLMs. To resolve this drawback, we propose Adaptive RMU -- a simple yet effective alternative method that makes unlearning effective with most layers. Extensive experiments demonstrate that Adaptive RMU significantly improves the unlearning performance compared to prior art while incurring no additional computational cost.", "year": 2024, "venue": "arXiv.org", "authors": [ "Dang Huu-Tien", "Trung-Tin Pham", "Hoang Thanh-Tung", "Naoya Inoue" ], "externalIds": { "DBLP": "journals/corr/abs-2408-06223", "ArXiv": "2408.06223", "DOI": "10.48550/arXiv.2408.06223", "CorpusId": 271855327 }, "url": "https://www.semanticscholar.org/paper/2c2f9edbf199f9699297a2beb30f61fac75ae853", "referenceCount": 0, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Fluent Student-Teacher Redteaming", "abstract": "Many publicly available language models have been safety tuned to reduce the likelihood of toxic or liability-inducing text. Users or security analysts attempt to jailbreak or redteam these models with adversarial prompts which cause compliance with requests. One attack method is to apply discrete optimization techniques to the prompt. However, the resulting attack strings are often gibberish text, easily filtered by defenders due to high measured perplexity, and may fail for unseen tasks and/or well-tuned models. In this work, we improve existing algorithms (primarily GCG and BEAST) to develop powerful and fluent attacks on safety-tuned models like Llama-2 and Phi-3. Our technique centers around a new distillation-based approach that encourages the victim model to emulate a toxified finetune, either in terms of output probabilities or internal activations. To encourage human-fluent attacks, we add a multi-model perplexity penalty and a repetition penalty to the objective. We also enhance optimizer strength by allowing token insertions, token swaps, and token deletions and by using longer attack sequences. The resulting process is able to reliably jailbreak the most difficult target models with prompts that appear similar to human-written prompts. On Advbench we achieve attack success rates $>93$% for Llama-2-7B, Llama-3-8B, and Vicuna-7B, while maintaining model-measured perplexity $<33$; we achieve $95$% attack success for Phi-3, though with higher perplexity. We also find a universally-optimized single fluent prompt that induces $>88$% compliance on previously unseen tasks across Llama-2-7B, Phi-3-mini and Vicuna-7B and transfers to other black-box models.", "year": 2024, "venue": "arXiv.org", "authors": [ "T. B. Thompson", "Michael Sklar" ], "externalIds": { "DBLP": "journals/corr/abs-2407-17447", "ArXiv": "2407.17447", "DOI": "10.48550/arXiv.2407.17447", "CorpusId": 271404799 }, "url": "https://www.semanticscholar.org/paper/e9139f807a9bb6d6619335f8b1d1677ebbf11260", "referenceCount": 53, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Practical Unlearning for Large Language Models", "abstract": "While LLMs have demonstrated impressive performance across various domains and tasks, their security issues have become increasingly severe. Machine unlearning (MU) has emerged as a promising solution to address these issues by removing the influence of undesired data on the target model without compromising its utility in other aspects. MU typically assumes full access to the original training data to preserve utility, which is difficult to achieve in LLM unlearning. Existing LLM unlearning methods often assume access to data most affected by undesired data unlearning. However, this assumption underestimates the entanglement among various LLM capabilities and ignores data access limitations due to various issues. Moreover, these LLM unlearning methods do not sufficiently consider that unlearning requests in real-world scenarios are continuously emerging. To overcome these challenges and achieve practical LLM unlearning, we propose the O3 framework. The O3 framework includes an Out-Of-Distribution (OOD) detector to measure the similarity between input and unlearning data, and an Orthogonal low-rank adapter (LoRA) for continuously unlearning requested data. The OOD detector is trained with a novel contrastive entropy loss and utilizes a local-global layer-aggregated scoring mechanism. The orthogonal LoRA achieves parameter disentanglement among continual unlearning requests. During inference, our O3 framework can smartly decide whether and to what extent to load the unlearning LoRA based on the OOD detector's predictions. Notably, O3's effectiveness does not rely on any retained data. We conducted extensive experiments on O3 and state-of-the-art LLM unlearning methods across three tasks and seven datasets. The results indicate that O3 consistently achieves the best trade-off between unlearning effectiveness and utility preservation, especially when facing continuous unlearning requests.", "year": 2024, "venue": "arXiv.org", "authors": [ "Chongyang Gao", "Lixu Wang", "Chenkai Weng", "Xiao Wang", "Qi Zhu" ], "externalIds": { "DBLP": "journals/corr/abs-2407-10223", "ArXiv": "2407.10223", "DOI": "10.48550/arXiv.2407.10223", "CorpusId": 271212701 }, "url": "https://www.semanticscholar.org/paper/096f9f9a73bf84a56a444b5e995e51e754eff2ac", "referenceCount": 78, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MUSE: Machine Unlearning Six-Way Evaluation for Language Models", "abstract": "Language models (LMs) are trained on vast amounts of text data, which may include private and copyrighted content. Data owners may request the removal of their data from a trained model due to privacy or copyright concerns. However, exactly unlearning only these datapoints (i.e., retraining with the data removed) is intractable in modern-day models. This has led to the development of many approximate unlearning algorithms. The evaluation of the efficacy of these algorithms has traditionally been narrow in scope, failing to precisely quantify the success and practicality of the algorithm from the perspectives of both the model deployers and the data owners. We address this issue by proposing MUSE, a comprehensive machine unlearning evaluation benchmark that enumerates six diverse desirable properties for unlearned models: (1) no verbatim memorization, (2) no knowledge memorization, (3) no privacy leakage, (4) utility preservation on data not intended for removal, (5) scalability with respect to the size of removal requests, and (6) sustainability over sequential unlearning requests. Using these criteria, we benchmark how effectively eight popular unlearning algorithms on 7B-parameter LMs can unlearn Harry Potter books and news articles. Our results demonstrate that most algorithms can prevent verbatim memorization and knowledge memorization to varying degrees, but only one algorithm does not lead to severe privacy leakage. Furthermore, existing algorithms fail to meet deployer's expectations because they often degrade general model utility and also cannot sustainably accommodate successive unlearning requests or large-scale content removal. Our findings identify key issues with the practicality of existing unlearning algorithms on language models, and we release our benchmark to facilitate further evaluations: muse-bench.github.io", "year": 2024, "venue": "arXiv.org", "authors": [ "Weijia Shi", "Jaechan Lee", "Yangsibo Huang", "Sadhika Malladi", "Jieyu Zhao", "Ari Holtzman", "Daogao Liu", "Luke S. Zettlemoyer", "Noah A. Smith", "Chiyuan Zhang" ], "externalIds": { "ArXiv": "2407.06460", "DBLP": "journals/corr/abs-2407-06460", "DOI": "10.48550/arXiv.2407.06460", "CorpusId": 271064299 }, "url": "https://www.semanticscholar.org/paper/c757ae0cbff28b65aba40a92ba0d09b0f65a6c27", "referenceCount": 79, "citationCount": 4, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "UnUnlearning: Unlearning is not sufficient for content regulation in advanced generative AI", "abstract": "Exact unlearning was first introduced as a privacy mechanism that allowed a user to retract their data from machine learning models on request. Shortly after, inexact schemes were proposed to mitigate the impractical costs associated with exact unlearning. More recently unlearning is often discussed as an approach for removal of impermissible knowledge i.e. knowledge that the model should not possess such as unlicensed copyrighted, inaccurate, or malicious information. The promise is that if the model does not have a certain malicious capability, then it cannot be used for the associated malicious purpose. In this paper we revisit the paradigm in which unlearning is used for in Large Language Models (LLMs) and highlight an underlying inconsistency arising from in-context learning. Unlearning can be an effective control mechanism for the training phase, yet it does not prevent the model from performing an impermissible act during inference. We introduce a concept of ununlearning, where unlearned knowledge gets reintroduced in-context, effectively rendering the model capable of behaving as if it knows the forgotten knowledge. As a result, we argue that content filtering for impermissible knowledge will be required and even exact unlearning schemes are not enough for effective content regulation. We discuss feasibility of ununlearning for modern LLMs and examine broader implications.", "year": 2024, "venue": "arXiv.org", "authors": [ "Ilia Shumailov", "Jamie Hayes", "Eleni Triantafillou", "Guillermo Ortiz-Jiménez", "Nicolas Papernot", "Matthew Jagielski", "Itay Yona", "Heidi Howard", "Eugene Bagdasaryan" ], "externalIds": { "DBLP": "journals/corr/abs-2407-00106", "ArXiv": "2407.00106", "DOI": "10.48550/arXiv.2407.00106", "CorpusId": 270869978 }, "url": "https://www.semanticscholar.org/paper/edd3a430ca0f04f55e37fa9998d1b0155ca6925d", "referenceCount": 23, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Evaluating Copyright Takedown Methods for Language Models", "abstract": "Language models (LMs) derive their capabilities from extensive training on diverse data, including potentially copyrighted material. These models can memorize and generate content similar to their training data, posing potential concerns. Therefore, model creators are motivated to develop mitigation methods that prevent generating protected content. We term this procedure as copyright takedowns for LMs, noting the conceptual similarity to (but legal distinction from) the DMCA takedown This paper introduces the first evaluation of the feasibility and side effects of copyright takedowns for LMs. We propose CoTaEval, an evaluation framework to assess the effectiveness of copyright takedown methods, the impact on the model's ability to retain uncopyrightable factual knowledge from the training data whose recitation is embargoed, and how well the model maintains its general utility and efficiency. We examine several strategies, including adding system prompts, decoding-time filtering interventions, and unlearning approaches. Our findings indicate that no tested method excels across all metrics, showing significant room for research in this unique problem setting and indicating potential unresolved challenges for live policy proposals.", "year": 2024, "venue": "arXiv.org", "authors": [ "Boyi Wei", "Weijia Shi", "Yangsibo Huang", "Noah A. Smith", "Chiyuan Zhang", "Luke S. Zettlemoyer", "Kai Li", "Peter Henderson" ], "externalIds": { "ArXiv": "2406.18664", "DBLP": "journals/corr/abs-2406-18664", "DOI": "10.48550/arXiv.2406.18664", "CorpusId": 270764347 }, "url": "https://www.semanticscholar.org/paper/b33d66e788a3bc516d204bdf8a9eb7d9c3a9144a", "referenceCount": 62, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Intrinsic Evaluation of Unlearning Using Parametric Knowledge Traces", "abstract": "The task of\"unlearning\"certain concepts in large language models (LLMs) has attracted immense attention recently, due to its importance for mitigating undesirable model behaviours, such as the generation of harmful, private, or incorrect information. Current protocols to evaluate unlearning methods largely rely on behavioral tests, without monitoring the presence of unlearned knowledge within the model's parameters. This residual knowledge can be adversarially exploited to recover the erased information post-unlearning. We argue that unlearning should also be evaluated internally, by considering changes in the parametric knowledge traces of the unlearned concepts. To this end, we propose a general methodology for eliciting directions in the parameter space (termed\"concept vectors\") that encode concrete concepts, and construct ConceptVectors, a benchmark dataset containing hundreds of common concepts and their parametric knowledge traces within two open-source LLMs. Evaluation on ConceptVectors shows that existing unlearning methods minimally impact concept vectors, while directly ablating these vectors demonstrably removes the associated knowledge from the LLMs and significantly reduces their susceptibility to adversarial manipulation. Our results highlight limitations in behavioral-based unlearning evaluations and call for future work to include parametric-based evaluations. To support this, we release our code and benchmark at https://github.com/yihuaihong/ConceptVectors.", "year": 2024, "venue": "arXiv.org", "authors": [ "Yihuai Hong", "Lei Yu", "Shauli Ravfogel", "Haiqin Yang", "Mor Geva" ], "externalIds": { "ArXiv": "2406.11614", "DBLP": "journals/corr/abs-2406-11614", "DOI": "10.48550/arXiv.2406.11614", "CorpusId": 270560986 }, "url": "https://www.semanticscholar.org/paper/640a3beb69425551737e74592ae659903aa2b12c", "referenceCount": 37, "citationCount": 3, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Adversarial Perturbations Cannot Reliably Protect Artists From Generative AI", "abstract": "Artists are increasingly concerned about advancements in image generation models that can closely replicate their unique artistic styles. In response, several protection tools against style mimicry have been developed that incorporate small adversarial perturbations into artworks published online. In this work, we evaluate the effectiveness of popular protections -- with millions of downloads -- and show they only provide a false sense of security. We find that low-effort and\"off-the-shelf\"techniques, such as image upscaling, are sufficient to create robust mimicry methods that significantly degrade existing protections. Through a user study, we demonstrate that all existing protections can be easily bypassed, leaving artists vulnerable to style mimicry. We caution that tools based on adversarial perturbations cannot reliably protect artists from the misuse of generative AI, and urge the development of alternative non-technological solutions.", "year": 2024, "venue": "arXiv.org", "authors": [ "Robert Honig", "Javier Rando", "Nicholas Carlini", "Florian Tramèr" ], "externalIds": { "ArXiv": "2406.12027", "DBLP": "journals/corr/abs-2406-12027", "DOI": "10.48550/arXiv.2406.12027", "CorpusId": 270562279 }, "url": "https://www.semanticscholar.org/paper/34774d962d6ce9684245c8b4ec9d4e0886d65222", "referenceCount": 51, "citationCount": 4, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Refusal in Language Models Is Mediated by a Single Direction", "abstract": "Conversational large language models are fine-tuned for both instruction-following and safety, resulting in models that obey benign requests but refuse harmful ones. While this refusal behavior is widespread across chat models, its underlying mechanisms remain poorly understood. In this work, we show that refusal is mediated by a one-dimensional subspace, across 13 popular open-source chat models up to 72B parameters in size. Specifically, for each model, we find a single direction such that erasing this direction from the model's residual stream activations prevents it from refusing harmful instructions, while adding this direction elicits refusal on even harmless instructions. Leveraging this insight, we propose a novel white-box jailbreak method that surgically disables refusal with minimal effect on other capabilities. Finally, we mechanistically analyze how adversarial suffixes suppress propagation of the refusal-mediating direction. Our findings underscore the brittleness of current safety fine-tuning methods. More broadly, our work showcases how an understanding of model internals can be leveraged to develop practical methods for controlling model behavior.", "year": 2024, "venue": "arXiv.org", "authors": [ "Andy Arditi", "Oscar Obeso", "Aaquib Syed", "Daniel Paleka", "Nina Rimsky", "Wes Gurnee", "Neel Nanda" ], "externalIds": { "DBLP": "journals/corr/abs-2406-11717", "ArXiv": "2406.11717", "DOI": "10.48550/arXiv.2406.11717", "CorpusId": 270560489 }, "url": "https://www.semanticscholar.org/paper/88f9af8b0a28e14c42d4f70ce2d911b51a06cd16", "referenceCount": 99, "citationCount": 14, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Split, Unlearn, Merge: Leveraging Data Attributes for More Effective Unlearning in LLMs", "abstract": "Large language models (LLMs) have shown to pose social and ethical risks such as generating toxic language or facilitating malicious use of hazardous knowledge. Machine unlearning is a promising approach to improve LLM safety by directly removing harmful behaviors and knowledge. In this paper, we propose\"SPlit, UNlearn, MerGE\"(SPUNGE), a framework that can be used with any unlearning method to amplify its effectiveness. SPUNGE leverages data attributes during unlearning by splitting unlearning data into subsets based on specific attribute values, unlearning each subset separately, and merging the unlearned models. We empirically demonstrate that SPUNGE significantly improves the performance of two recent unlearning methods on state-of-the-art LLMs while maintaining their general capabilities on standard academic benchmarks.", "year": 2024, "venue": "arXiv.org", "authors": [ "S. Kadhe", "Farhan Ahmed", "Dennis Wei", "Nathalie Baracaldo", "Inkit Padhi" ], "externalIds": { "ArXiv": "2406.11780", "DBLP": "journals/corr/abs-2406-11780", "DOI": "10.48550/arXiv.2406.11780", "CorpusId": 270559985 }, "url": "https://www.semanticscholar.org/paper/57dec99099415fe47c83029801a268af64378994", "referenceCount": 51, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "RWKU: Benchmarking Real-World Knowledge Unlearning for Large Language Models", "abstract": "Large language models (LLMs) inevitably memorize sensitive, copyrighted, and harmful knowledge from the training corpus; therefore, it is crucial to erase this knowledge from the models. Machine unlearning is a promising solution for efficiently removing specific knowledge by post hoc modifying models. In this paper, we propose a Real-World Knowledge Unlearning benchmark (RWKU) for LLM unlearning. RWKU is designed based on the following three key factors: (1) For the task setting, we consider a more practical and challenging unlearning setting, where neither the forget corpus nor the retain corpus is accessible. (2) For the knowledge source, we choose 200 real-world famous people as the unlearning targets and show that such popular knowledge is widely present in various LLMs. (3) For the evaluation framework, we design the forget set and the retain set to evaluate the model's capabilities across various real-world applications. Regarding the forget set, we provide four four membership inference attack (MIA) methods and nine kinds of adversarial attack probes to rigorously test unlearning efficacy. Regarding the retain set, we assess locality and utility in terms of neighbor perturbation, general ability, reasoning ability, truthfulness, factuality, and fluency. We conduct extensive experiments across two unlearning scenarios, two models and six baseline methods and obtain some meaningful findings. We release our benchmark and code publicly at http://rwku-bench.github.io for future work.", "year": 2024, "venue": "arXiv.org", "authors": [ "Zhuoran Jin", "Pengfei Cao", "Chenhao Wang", "Zhitao He", "Hongbang Yuan", "Jiachun Li", "Yubo Chen", "Kang Liu", "Jun Zhao" ], "externalIds": { "ArXiv": "2406.10890", "DBLP": "journals/corr/abs-2406-10890", "DOI": "10.48550/arXiv.2406.10890", "CorpusId": 270559969 }, "url": "https://www.semanticscholar.org/paper/2193d01019909565a04110605b326e393d1c4158", "referenceCount": 73, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Large Language Model Unlearning via Embedding-Corrupted Prompts", "abstract": "Large language models (LLMs) have advanced to encompass extensive knowledge across diverse domains. Yet controlling what a large language model should not know is important for ensuring alignment and thus safe use. However, accurately and efficiently unlearning knowledge from an LLM remains challenging due to the potential collateral damage caused by the fuzzy boundary between retention and forgetting, and the large computational requirements for optimization across state-of-the-art models with hundreds of billions of parameters. In this work, we present Embedding-COrrupted (ECO) Prompts, a lightweight unlearning framework for large language models to address both the challenges of knowledge entanglement and unlearning efficiency. Instead of relying on the LLM itself to unlearn, we enforce an unlearned state during inference by employing a prompt classifier to identify and safeguard prompts to forget. We learn corruptions added to prompt embeddings via zeroth order optimization toward the unlearning objective offline and corrupt prompts flagged by the classifier during inference. We find that these embedding-corrupted prompts not only lead to desirable outputs that satisfy the unlearning objective but also closely approximate the output from a model that has never been trained on the data intended for forgetting. Through extensive experiments on unlearning, we demonstrate the superiority of our method in achieving promising unlearning at nearly zero side effects in general domains and domains closely related to the unlearned ones. Additionally, we highlight the scalability of our method to 100 LLMs, ranging from 0.5B to 236B parameters, incurring no additional cost as the number of parameters increases.", "year": 2024, "venue": "arXiv.org", "authors": [ "Chris Liu", "Yaxuan Wang", "Jeffrey Flanigan", "Yang Liu" ], "externalIds": { "ArXiv": "2406.07933", "DBLP": "journals/corr/abs-2406-07933", "DOI": "10.48550/arXiv.2406.07933", "CorpusId": 270392045 }, "url": "https://www.semanticscholar.org/paper/62fad39d968e60e4d3e8ad4d17b9a2c5c6078f39", "referenceCount": 0, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Improving Alignment and Robustness with Circuit Breakers", "abstract": "AI systems can take harmful actions and are highly vulnerable to adversarial attacks. We present an approach, inspired by recent advances in representation engineering, that interrupts the models as they respond with harmful outputs with\"circuit breakers.\"Existing techniques aimed at improving alignment, such as refusal training, are often bypassed. Techniques such as adversarial training try to plug these holes by countering specific attacks. As an alternative to refusal training and adversarial training, circuit-breaking directly controls the representations that are responsible for harmful outputs in the first place. Our technique can be applied to both text-only and multimodal language models to prevent the generation of harmful outputs without sacrificing utility -- even in the presence of powerful unseen attacks. Notably, while adversarial robustness in standalone image recognition remains an open challenge, circuit breakers allow the larger multimodal system to reliably withstand image\"hijacks\"that aim to produce harmful content. Finally, we extend our approach to AI agents, demonstrating considerable reductions in the rate of harmful actions when they are under attack. Our approach represents a significant step forward in the development of reliable safeguards to harmful behavior and adversarial attacks.", "year": 2024, "venue": "arXiv.org", "authors": [ "Andy Zou", "Long Phan", "Justin Wang", "Derek Duenas", "Maxwell Lin", "Maksym Andriushchenko", "Rowan Wang", "Zico Kolter", "Matt Fredrikson", "Dan Hendrycks" ], "externalIds": { "DBLP": "journals/corr/abs-2406-04313", "ArXiv": "2406.04313", "DOI": "10.48550/arXiv.2406.04313", "CorpusId": 270286008 }, "url": "https://www.semanticscholar.org/paper/ae16c93bf3a33c43e8f57293530069e77c89bcaa", "referenceCount": 69, "citationCount": 11, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Foundational Challenges in Assuring Alignment and Safety of Large Language Models", "abstract": "This work identifies 18 foundational challenges in assuring the alignment and safety of large language models (LLMs). These challenges are organized into three different categories: scientific understanding of LLMs, development and deployment methods, and sociotechnical challenges. Based on the identified challenges, we pose $200+$ concrete research questions.", "year": 2024, "venue": "arXiv.org", "authors": [ "Usman Anwar", "Abulhair Saparov", "Javier Rando", "Daniel Paleka", "Miles Turpin", "Peter Hase", "Ekdeep Singh Lubana", "Erik Jenner", "Stephen Casper", "Oliver Sourbut", "Benjamin L. Edelman", "Zhaowei Zhang", "Mario Gunther", "Anton Korinek", "J. Hernández-Orallo", "Lewis Hammond", "Eric J. Bigelow", "Alexander Pan", "L. Langosco", "Tomasz Korbak", "Heidi Zhang", "Ruiqi Zhong", "Se'an 'O h'Eigeartaigh", "Gabriel Recchia", "Giulio Corsi", "Alan Chan", "Markus Anderljung", "Lilian Edwards", "Y. Bengio", "Danqi Chen", "Samuel Albanie", "Tegan Maharaj", "Jakob N. Foerster", "Florian Tramèr", "He He", "Atoosa Kasirzadeh", "Yejin Choi", "David Krueger" ], "externalIds": { "DBLP": "journals/corr/abs-2404-09932", "ArXiv": "2404.09932", "DOI": "10.48550/arXiv.2404.09932", "CorpusId": 269149478 }, "url": "https://www.semanticscholar.org/paper/6f98525dc695257bdcb9a491e4d77f4d12bb5144", "referenceCount": 0, "citationCount": 46, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Negative Preference Optimization: From Catastrophic Collapse to Effective Unlearning", "abstract": "Large Language Models (LLMs) often memorize sensitive, private, or copyrighted data during pre-training. LLM unlearning aims to eliminate the influence of undesirable data from the pre-trained model while preserving the model's utilities on other tasks. Several practical methods have recently been proposed for LLM unlearning, mostly based on gradient ascent (GA) on the loss of undesirable data. However, on certain unlearning tasks, these methods either fail to effectively unlearn the target data or suffer from catastrophic collapse -- a drastic degradation of the model's utilities. In this paper, we propose Negative Preference Optimization (NPO), a simple alignment-inspired method that could efficiently and effectively unlearn a target dataset. We theoretically show that the progression toward catastrophic collapse by minimizing the NPO loss is exponentially slower than GA. Through experiments on synthetic data and the benchmark TOFU dataset, we demonstrate that NPO-based methods achieve a better balance between unlearning the undesirable data and maintaining the model's utilities. We also observe that NPO-based methods generate more sensible outputs than GA-based methods, whose outputs are often gibberish. Remarkably, on TOFU, NPO-based methods are the first to achieve reasonable unlearning results in forgetting 50% (or more) of the training data, whereas existing methods already struggle with forgetting 10% of training data.", "year": 2024, "venue": "arXiv.org", "authors": [ "Ruiqi Zhang", "Licong Lin", "Yu Bai", "Song Mei" ], "externalIds": { "DBLP": "journals/corr/abs-2404-05868", "ArXiv": "2404.05868", "DOI": "10.48550/arXiv.2404.05868", "CorpusId": 269009619 }, "url": "https://www.semanticscholar.org/paper/6dc1bb04eb0df303b1820ff1de15ab78f554cfff", "referenceCount": 44, "citationCount": 36, "influentialCitationCount": 10, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Jailbreaking Leading Safety-Aligned LLMs with Simple Adaptive Attacks", "abstract": "We show that even the most recent safety-aligned LLMs are not robust to simple adaptive jailbreaking attacks. First, we demonstrate how to successfully leverage access to logprobs for jailbreaking: we initially design an adversarial prompt template (sometimes adapted to the target LLM), and then we apply random search on a suffix to maximize a target logprob (e.g., of the token ``Sure''), potentially with multiple restarts. In this way, we achieve nearly 100% attack success rate -- according to GPT-4 as a judge -- on Vicuna-13B, Mistral-7B, Phi-3-Mini, Nemotron-4-340B, Llama-2-Chat-7B/13B/70B, Llama-3-Instruct-8B, Gemma-7B, GPT-3.5, GPT-4, and R2D2 from HarmBench that was adversarially trained against the GCG attack. We also show how to jailbreak all Claude models -- that do not expose logprobs -- via either a transfer or prefilling attack with a 100% success rate. In addition, we show how to use random search on a restricted set of tokens for finding trojan strings in poisoned models -- a task that shares many similarities with jailbreaking -- which is the algorithm that brought us the first place in the SaTML'24 Trojan Detection Competition. The common theme behind these attacks is that adaptivity is crucial: different models are vulnerable to different prompting templates (e.g., R2D2 is very sensitive to in-context learning prompts), some models have unique vulnerabilities based on their APIs (e.g., prefilling for Claude), and in some settings, it is crucial to restrict the token search space based on prior knowledge (e.g., for trojan detection). For reproducibility purposes, we provide the code, logs, and jailbreak artifacts in the JailbreakBench format at https://github.com/tml-epfl/llm-adaptive-attacks.", "year": 2024, "venue": "arXiv.org", "authors": [ "Maksym Andriushchenko", "Francesco Croce", "Nicolas Flammarion" ], "externalIds": { "DBLP": "journals/corr/abs-2404-02151", "ArXiv": "2404.02151", "DOI": "10.48550/arXiv.2404.02151", "CorpusId": 268857047 }, "url": "https://www.semanticscholar.org/paper/88d5634a52645f6b05a03536be1f26a2b9bba232", "referenceCount": 49, "citationCount": 48, "influentialCitationCount": 7, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Eight Methods to Evaluate Robust Unlearning in LLMs", "abstract": "Machine unlearning can be useful for removing harmful capabilities and memorized text from large language models (LLMs), but there are not yet standardized methods for rigorously evaluating it. In this paper, we first survey techniques and limitations of existing unlearning evaluations. Second, we apply a comprehensive set of tests for the robustness and competitiveness of unlearning in the\"Who's Harry Potter\"(WHP) model from Eldan and Russinovich (2023). While WHP's unlearning generalizes well when evaluated with the\"Familiarity\"metric from Eldan and Russinovich, we find i) higher-than-baseline amounts of knowledge can reliably be extracted, ii) WHP performs on par with the original model on Harry Potter Q&A tasks, iii) it represents latent knowledge comparably to the original model, and iv) there is collateral unlearning in related domains. Overall, our results highlight the importance of comprehensive unlearning evaluation that avoids ad-hoc metrics.", "year": 2024, "venue": "arXiv.org", "authors": [ "Aengus Lynch", "Phillip Guo", "Aidan Ewart", "Stephen Casper", "Dylan Hadfield-Menell" ], "externalIds": { "DBLP": "journals/corr/abs-2402-16835", "ArXiv": "2402.16835", "DOI": "10.48550/arXiv.2402.16835", "CorpusId": 268032022 }, "url": "https://www.semanticscholar.org/paper/0044140fdd4547a380b0b82052ae0b6ffd95216c", "referenceCount": 63, "citationCount": 26, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Fast Adversarial Attacks on Language Models In One GPU Minute", "abstract": "In this paper, we introduce a novel class of fast, beam search-based adversarial attack (BEAST) for Language Models (LMs). BEAST employs interpretable parameters, enabling attackers to balance between attack speed, success rate, and the readability of adversarial prompts. The computational efficiency of BEAST facilitates us to investigate its applications on LMs for jailbreaking, eliciting hallucinations, and privacy attacks. Our gradient-free targeted attack can jailbreak aligned LMs with high attack success rates within one minute. For instance, BEAST can jailbreak Vicuna-7B-v1.5 under one minute with a success rate of 89% when compared to a gradient-based baseline that takes over an hour to achieve 70% success rate using a single Nvidia RTX A6000 48GB GPU. Additionally, we discover a unique outcome wherein our untargeted attack induces hallucinations in LM chatbots. Through human evaluations, we find that our untargeted attack causes Vicuna-7B-v1.5 to produce ~15% more incorrect outputs when compared to LM outputs in the absence of our attack. We also learn that 22% of the time, BEAST causes Vicuna to generate outputs that are not relevant to the original prompt. Further, we use BEAST to generate adversarial prompts in a few seconds that can boost the performance of existing membership inference attacks for LMs. We believe that our fast attack, BEAST, has the potential to accelerate research in LM security and privacy. Our codebase is publicly available at https://github.com/vinusankars/BEAST.", "year": 2024, "venue": "International Conference on Machine Learning", "authors": [ "Vinu Sankar Sadasivan", "Shoumik Saha", "Gaurang Sriramanan", "Priyatham Kattakinda", "Atoosa Malemir Chegini", "S. Feizi" ], "externalIds": { "DBLP": "conf/icml/SadasivanSSKCF24", "ArXiv": "2402.15570", "DOI": "10.48550/arXiv.2402.15570", "CorpusId": 267938703 }, "url": "https://www.semanticscholar.org/paper/e519699816d358783f41d4bd50fd3465d9fa51bd", "referenceCount": 64, "citationCount": 11, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Rethinking Machine Unlearning for Large Language Models", "abstract": "We explore machine unlearning (MU) in the domain of large language models (LLMs), referred to as LLM unlearning. This initiative aims to eliminate undesirable data influence (e.g., sensitive or illegal information) and the associated model capabilities, while maintaining the integrity of essential knowledge generation and not affecting causally unrelated information. We envision LLM unlearning becoming a pivotal element in the life-cycle management of LLMs, potentially standing as an essential foundation for developing generative AI that is not only safe, secure, and trustworthy, but also resource-efficient without the need of full retraining. We navigate the unlearning landscape in LLMs from conceptual formulation, methodologies, metrics, and applications. In particular, we highlight the often-overlooked aspects of existing LLM unlearning research, e.g., unlearning scope, data-model interaction, and multifaceted efficacy assessment. We also draw connections between LLM unlearning and related areas such as model editing, influence functions, model explanation, adversarial training, and reinforcement learning. Furthermore, we outline an effective assessment framework for LLM unlearning and explore its applications in copyright and privacy safeguards and sociotechnical harm reduction.", "year": 2024, "venue": "arXiv.org", "authors": [ "Sijia Liu", "Yuanshun Yao", "Jinghan Jia", "Stephen Casper", "Nathalie Baracaldo", "Peter Hase", "Xiaojun Xu", "Yuguang Yao", "Chris Liu", "Hang Li", "Kush R. Varshney", "Mohit Bansal", "Sanmi Koyejo", "Yang Liu" ], "externalIds": { "DBLP": "journals/corr/abs-2402-08787", "ArXiv": "2402.08787", "DOI": "10.48550/arXiv.2402.08787", "CorpusId": 267657624 }, "url": "https://www.semanticscholar.org/paper/d0b02eb4f0d3efd884b49450efc88145bdf49abc", "referenceCount": 151, "citationCount": 29, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Assessing the Brittleness of Safety Alignment via Pruning and Low-Rank Modifications", "abstract": "Large language models (LLMs) show inherent brittleness in their safety mechanisms, as evidenced by their susceptibility to jailbreaking and even non-malicious fine-tuning. This study explores this brittleness of safety alignment by leveraging pruning and low-rank modifications. We develop methods to identify critical regions that are vital for safety guardrails, and that are disentangled from utility-relevant regions at both the neuron and rank levels. Surprisingly, the isolated regions we find are sparse, comprising about $3\\%$ at the parameter level and $2.5\\%$ at the rank level. Removing these regions compromises safety without significantly impacting utility, corroborating the inherent brittleness of the model's safety mechanisms. Moreover, we show that LLMs remain vulnerable to low-cost fine-tuning attacks even when modifications to the safety-critical regions are restricted. These findings underscore the urgent need for more robust safety strategies in LLMs.", "year": 2024, "venue": "International Conference on Machine Learning", "authors": [ "Boyi Wei", "Kaixuan Huang", "Yangsibo Huang", "Tinghao Xie", "Xiangyu Qi", "Mengzhou Xia", "Prateek Mittal", "Mengdi Wang", "Peter Henderson" ], "externalIds": { "DBLP": "conf/icml/WeiHHXQXMW024", "ArXiv": "2402.05162", "DOI": "10.48550/arXiv.2402.05162", "CorpusId": 267547755 }, "url": "https://www.semanticscholar.org/paper/aa6a03f3368cbb4a413f7e11650fb8a6a2b71de1", "referenceCount": 115, "citationCount": 38, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "TOFU: A Task of Fictitious Unlearning for LLMs", "abstract": "Large language models trained on massive corpora of data from the web can memorize and reproduce sensitive or private data raising both legal and ethical concerns. Unlearning, or tuning models to forget information present in their training data, provides us with a way to protect private data after training. Although several methods exist for such unlearning, it is unclear to what extent they result in models equivalent to those where the data to be forgotten was never learned in the first place. To address this challenge, we present TOFU, a Task of Fictitious Unlearning, as a benchmark aimed at helping deepen our understanding of unlearning. We offer a dataset of 200 diverse synthetic author profiles, each consisting of 20 question-answer pairs, and a subset of these profiles called the forget set that serves as the target for unlearning. We compile a suite of metrics that work together to provide a holistic picture of unlearning efficacy. Finally, we provide a set of baseline results from existing unlearning algorithms. Importantly, none of the baselines we consider show effective unlearning motivating continued efforts to develop approaches for unlearning that effectively tune models so that they truly behave as if they were never trained on the forget data at all.", "year": 2024, "venue": "arXiv.org", "authors": [ "Pratyush Maini", "Zhili Feng", "Avi Schwarzschild", "Zachary Chase Lipton", "J. Kolter" ], "externalIds": { "DBLP": "journals/corr/abs-2401-06121", "ArXiv": "2401.06121", "DOI": "10.48550/arXiv.2401.06121", "CorpusId": 266933371 }, "url": "https://www.semanticscholar.org/paper/6ef1ab6b6775861b7f6a78343d5b90a15226c654", "referenceCount": 44, "citationCount": 53, "influentialCitationCount": 19, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Mixtral of Experts", "abstract": "We introduce Mixtral 8x7B, a Sparse Mixture of Experts (SMoE) language model. Mixtral has the same architecture as Mistral 7B, with the difference that each layer is composed of 8 feedforward blocks (i.e. experts). For every token, at each layer, a router network selects two experts to process the current state and combine their outputs. Even though each token only sees two experts, the selected experts can be different at each timestep. As a result, each token has access to 47B parameters, but only uses 13B active parameters during inference. Mixtral was trained with a context size of 32k tokens and it outperforms or matches Llama 2 70B and GPT-3.5 across all evaluated benchmarks. In particular, Mixtral vastly outperforms Llama 2 70B on mathematics, code generation, and multilingual benchmarks. We also provide a model fine-tuned to follow instructions, Mixtral 8x7B - Instruct, that surpasses GPT-3.5 Turbo, Claude-2.1, Gemini Pro, and Llama 2 70B - chat model on human benchmarks. Both the base and instruct models are released under the Apache 2.0 license.", "year": 2024, "venue": "arXiv.org", "authors": [ "Albert Q. Jiang", "Alexandre Sablayrolles", "Antoine Roux", "A. Mensch", "Blanche Savary", "Chris Bamford", "Devendra Singh Chaplot", "Diego de Las Casas", "Emma Bou Hanna", "Florian Bressand", "Gianna Lengyel", "Guillaume Bour", "Guillaume Lample", "L'elio Renard Lavaud", "Lucile Saulnier", "Marie-Anne Lachaux", "Pierre Stock", "Sandeep Subramanian", "Sophia Yang", "Szymon Antoniak", "Teven Le Scao", "Théophile Gervet", "Thibaut Lavril", "Thomas Wang", "Timothée Lacroix", "William El Sayed" ], "externalIds": { "DBLP": "journals/corr/abs-2401-04088", "ArXiv": "2401.04088", "DOI": "10.48550/arXiv.2401.04088", "CorpusId": 266844877 }, "url": "https://www.semanticscholar.org/paper/411114f989a3d1083d90afd265103132fee94ebe", "referenceCount": 35, "citationCount": 495, "influentialCitationCount": 52, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Mechanistic Understanding of Alignment Algorithms: A Case Study on DPO and Toxicity", "abstract": "While alignment algorithms are now commonly used to tune pre-trained language models towards a user's preferences, we lack explanations for the underlying mechanisms in which models become ``aligned'', thus making it difficult to explain phenomena like jailbreaks. In this work we study a popular algorithm, direct preference optimization (DPO), and the mechanisms by which it reduces toxicity. Namely, we first study how toxicity is represented and elicited in a pre-trained language model, GPT2-medium. We then apply DPO with a carefully crafted pairwise dataset to reduce toxicity. We examine how the resulting model averts toxic outputs, and find that capabilities learned from pre-training are not removed, but rather bypassed. We use this insight to demonstrate a simple method to un-align the model, reverting it back to its toxic behavior.", "year": 2024, "venue": "International Conference on Machine Learning", "authors": [ "Andrew Lee", "Xiaoyan Bai", "Itamar Pres", "Martin Wattenberg", "Jonathan K. Kummerfeld", "Rada Mihalcea" ], "externalIds": { "DBLP": "conf/icml/LeeBPWKM24", "ArXiv": "2401.01967", "DOI": "10.48550/arXiv.2401.01967", "CorpusId": 266755904 }, "url": "https://www.semanticscholar.org/paper/26b2adbe089ea36617c3ec0aa009319929da0550", "referenceCount": 45, "citationCount": 41, "influentialCitationCount": 8, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Unveiling the Implicit Toxicity in Large Language Models", "abstract": "The open-endedness of large language models (LLMs) combined with their impressive capabilities may lead to new safety issues when being exploited for malicious use. While recent studies primarily focus on probing toxic outputs that can be easily detected with existing toxicity classifiers, we show that LLMs can generate diverse implicit toxic outputs that are exceptionally difficult to detect via simply zero-shot prompting. Moreover, we propose a reinforcement learning (RL) based attacking method to further induce the implicit toxicity in LLMs. Specifically, we optimize the language model with a reward that prefers implicit toxic outputs to explicit toxic and non-toxic ones. Experiments on five widely-adopted toxicity classifiers demonstrate that the attack success rate can be significantly improved through RL fine-tuning. For instance, the RL-finetuned LLaMA-13B model achieves an attack success rate of 90.04% on BAD and 62.85% on Davinci003. Our findings suggest that LLMs pose a significant threat in generating undetectable implicit toxic outputs. We further show that fine-tuning toxicity classifiers on the annotated examples from our attacking method can effectively enhance their ability to detect LLM-generated implicit toxic language. The code is publicly available at https://github.com/thu-coai/Implicit-Toxicity.", "year": 2023, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Jiaxin Wen", "Pei Ke", "Hao Sun", "Zhexin Zhang", "Chengfei Li", "Jinfeng Bai", "Minlie Huang" ], "externalIds": { "ArXiv": "2311.17391", "DBLP": "conf/emnlp/WenKSZLBH23", "DOI": "10.48550/arXiv.2311.17391", "CorpusId": 265498356 }, "url": "https://www.semanticscholar.org/paper/034c8d4eb031786925ef274e6d275c7c210c4f1d", "referenceCount": 46, "citationCount": 12, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Scalable Extraction of Training Data from (Production) Language Models", "abstract": "This paper studies extractable memorization: training data that an adversary can efficiently extract by querying a machine learning model without prior knowledge of the training dataset. We show an adversary can extract gigabytes of training data from open-source language models like Pythia or GPT-Neo, semi-open models like LLaMA or Falcon, and closed models like ChatGPT. Existing techniques from the literature suffice to attack unaligned models; in order to attack the aligned ChatGPT, we develop a new divergence attack that causes the model to diverge from its chatbot-style generations and emit training data at a rate 150x higher than when behaving properly. Our methods show practical attacks can recover far more data than previously thought, and reveal that current alignment techniques do not eliminate memorization.", "year": 2023, "venue": "arXiv.org", "authors": [ "Milad Nasr", "Nicholas Carlini", "Jonathan Hayase", "Matthew Jagielski", "A. F. Cooper", "Daphne Ippolito", "Christopher A. Choquette-Choo", "Eric Wallace", "Florian Tramèr", "Katherine Lee" ], "externalIds": { "ArXiv": "2311.17035", "DBLP": "journals/corr/abs-2311-17035", "DOI": "10.48550/arXiv.2311.17035", "CorpusId": 265466445 }, "url": "https://www.semanticscholar.org/paper/fc7ee1828030a818f52518022a39f6a3ada60222", "referenceCount": 0, "citationCount": 195, "influentialCitationCount": 14, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Scalable and Transferable Black-Box Jailbreaks for Language Models via Persona Modulation", "abstract": "Despite efforts to align large language models to produce harmless responses, they are still vulnerable to jailbreak prompts that elicit unrestricted behaviour. In this work, we investigate persona modulation as a black-box jailbreaking method to steer a target model to take on personalities that are willing to comply with harmful instructions. Rather than manually crafting prompts for each persona, we automate the generation of jailbreaks using a language model assistant. We demonstrate a range of harmful completions made possible by persona modulation, including detailed instructions for synthesising methamphetamine, building a bomb, and laundering money. These automated attacks achieve a harmful completion rate of 42.5% in GPT-4, which is 185 times larger than before modulation (0.23%). These prompts also transfer to Claude 2 and Vicuna with harmful completion rates of 61.0% and 35.9%, respectively. Our work reveals yet another vulnerability in commercial large language models and highlights the need for more comprehensive safeguards.", "year": 2023, "venue": "arXiv.org", "authors": [ "Rusheb Shah", "Quentin Feuillade--Montixi", "Soroush Pour", "Arush Tagade", "Stephen Casper", "Javier Rando" ], "externalIds": { "DBLP": "journals/corr/abs-2311-03348", "ArXiv": "2311.03348", "DOI": "10.48550/arXiv.2311.03348", "CorpusId": 265043220 }, "url": "https://www.semanticscholar.org/paper/bfc0e3e651cd4b715272fe68add8a180a112293c", "referenceCount": 30, "citationCount": 72, "influentialCitationCount": 9, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "LoRA Fine-tuning Efficiently Undoes Safety Training in Llama 2-Chat 70B", "abstract": "AI developers often apply safety alignment procedures to prevent the misuse of their AI systems. For example, before Meta released Llama 2-Chat - a collection of instruction fine-tuned large language models - they invested heavily in safety training, incorporating extensive red-teaming and reinforcement learning from human feedback. We explore the robustness of safety training in language models by subversively fine-tuning Llama 2-Chat. We employ quantized low-rank adaptation (LoRA) as an efficient fine-tuning method. With a budget of less than \\$200 and using only one GPU, we successfully undo the safety training of Llama 2-Chat models of sizes 7B, 13B, and 70B and on the Mixtral instruct model. Specifically, our fine-tuning technique significantly reduces the rate at which the model refuses to follow harmful instructions. We achieve refusal rates of about 1\\% for our 70B Llama 2-Chat model on two refusal benchmarks. Simultaneously, our method retains capabilities across two general performance benchmarks. We show that subversive fine-tuning is practical and effective, and hence argue that evaluating risks from fine-tuning should be a core part of risk assessments for releasing model weights. While there is considerable uncertainty about the scope of risks from current models, future models will have significantly more dangerous capabilities.", "year": 2023, "venue": "arXiv.org", "authors": [ "Simon Lermen", "Charlie Rogers-Smith", "Jeffrey Ladish" ], "externalIds": { "DBLP": "journals/corr/abs-2310-20624", "ArXiv": "2310.20624", "DOI": "10.48550/arXiv.2310.20624", "CorpusId": 264808400 }, "url": "https://www.semanticscholar.org/paper/d1b5151231a790c7a60f620e21860593dae9a1c5", "referenceCount": 31, "citationCount": 40, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Unlearn What You Want to Forget: Efficient Unlearning for LLMs", "abstract": "Large language models (LLMs) have achieved significant progress from pre-training on and memorizing a wide range of textual data, however, this process might suffer from privacy issues and violations of data protection regulations. As a result, the ability to easily remove data related to individual users from such models while not deteriorating their predictive quality after the removal becomes increasingly important. To address these issues, in this work, we propose an efficient unlearning framework that could efficiently update LLMs without having to retrain the whole model after data removals, by introducing lightweight unlearning layers learned with a selective teacher-student objective into the transformers. In addition, we introduce a fusion mechanism to effectively combine different unlearning layers that learns to forget different sets of data to handle a sequence of forgetting operations. Experiments on classification and generation tasks demonstrate the effectiveness of our proposed methods compared to the state-of-the-art baselines.", "year": 2023, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Jiaao Chen", "Diyi Yang" ], "externalIds": { "DBLP": "journals/corr/abs-2310-20150", "ArXiv": "2310.20150", "DOI": "10.48550/arXiv.2310.20150", "CorpusId": 264828972 }, "url": "https://www.semanticscholar.org/paper/0399533de2d1d21f456663d1bd5355c8b3c32a58", "referenceCount": 46, "citationCount": 81, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DEPN: Detecting and Editing Privacy Neurons in Pretrained Language Models", "abstract": "Large language models pretrained on a huge amount of data capture rich knowledge and information in the training data. The ability of data memorization and regurgitation in pretrained language models, revealed in previous studies, brings the risk of data leakage. In order to effectively reduce these risks, we propose a framework DEPN to Detect and Edit Privacy Neurons in pretrained language models, partially inspired by knowledge neurons and model editing. In DEPN, we introduce a novel method, termed as privacy neuron detector, to locate neurons associated with private information, and then edit these detected privacy neurons by setting their activations to zero. Furthermore, we propose a privacy neuron aggregator dememorize private information in a batch processing manner. Experimental results show that our method can significantly and efficiently reduce the exposure of private data leakage without deteriorating the performance of the model. Additionally, we empirically demonstrate the relationship between model memorization and privacy neurons, from multiple perspectives, including model size, training time, prompts, privacy neuron distribution, illustrating the robustness of our approach.", "year": 2023, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Xinwei Wu", "Junzhuo Li", "Minghui Xu", "Weilong Dong", "Shuangzhi Wu", "Chao Bian", "Deyi Xiong" ], "externalIds": { "ArXiv": "2310.20138", "DBLP": "journals/corr/abs-2310-20138", "DOI": "10.48550/arXiv.2310.20138", "CorpusId": 264816202 }, "url": "https://www.semanticscholar.org/paper/41a3c41ba1912e1384849e6898c241af89cc4a11", "referenceCount": 31, "citationCount": 19, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Copyright Violations and Large Language Models", "abstract": "Language models may memorize more than just facts, including entire chunks of texts seen during training. Fair use exemptions to copyright laws typically allow for limited use of copyrighted material without permission from the copyright holder, but typically for extraction of information from copyrighted materials, rather than {\\em verbatim} reproduction. This work explores the issue of copyright violations and large language models through the lens of verbatim memorization, focusing on possible redistribution of copyrighted text. We present experiments with a range of language models over a collection of popular books and coding problems, providing a conservative characterization of the extent to which language models can redistribute these materials. Overall, this research highlights the need for further examination and the potential impact on future developments in natural language processing to ensure adherence to copyright regulations. Code is at \\url{https://github.com/coastalcph/CopyrightLLMs}.", "year": 2023, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Antonia Karamolegkou", "Jiaang Li", "Li Zhou", "Anders Sogaard" ], "externalIds": { "DBLP": "journals/corr/abs-2310-13771", "ArXiv": "2310.13771", "DOI": "10.48550/arXiv.2310.13771", "CorpusId": 264426289 }, "url": "https://www.semanticscholar.org/paper/ea6c0620f4d56faa76a8f99d8963ad77fa6fbdb8", "referenceCount": 23, "citationCount": 29, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Large Language Model Unlearning", "abstract": "We study how to perform unlearning, i.e. forgetting undesirable misbehaviors, on large language models (LLMs). We show at least three scenarios of aligning LLMs with human preferences can benefit from unlearning: (1) removing harmful responses, (2) erasing copyright-protected content as requested, and (3) reducing hallucinations. Unlearning, as an alignment technique, has three advantages. (1) It only requires negative (e.g. harmful) examples, which are much easier and cheaper to collect (e.g. via red teaming or user reporting) than positive (e.g. helpful and often human-written) examples required in RLHF (RL from human feedback). (2) It is computationally efficient. (3) It is especially effective when we know which training samples cause the misbehavior. To the best of our knowledge, our work is among the first to explore LLM unlearning. We are also among the first to formulate the settings, goals, and evaluations in LLM unlearning. We show that if practitioners only have limited resources, and therefore the priority is to stop generating undesirable outputs rather than to try to generate desirable outputs, unlearning is particularly appealing. Despite only having negative samples, our ablation study shows that unlearning can still achieve better alignment performance than RLHF with just 2% of its computational time.", "year": 2023, "venue": "arXiv.org", "authors": [ "Yuanshun Yao", "Xiaojun Xu", "Yang Liu" ], "externalIds": { "DBLP": "journals/corr/abs-2310-10683", "ArXiv": "2310.10683", "DOI": "10.48550/arXiv.2310.10683", "CorpusId": 264172840 }, "url": "https://www.semanticscholar.org/paper/8fd11c6f3eb1d0aeb915369f3c4f0b1bb24cab0c", "referenceCount": 55, "citationCount": 58, "influentialCitationCount": 11, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Catastrophic Jailbreak of Open-source LLMs via Exploiting Generation", "abstract": "The rapid progress in open-source large language models (LLMs) is significantly advancing AI development. Extensive efforts have been made before model release to align their behavior with human values, with the primary goal of ensuring their helpfulness and harmlessness. However, even carefully aligned models can be manipulated maliciously, leading to unintended behaviors, known as\"jailbreaks\". These jailbreaks are typically triggered by specific text inputs, often referred to as adversarial prompts. In this work, we propose the generation exploitation attack, an extremely simple approach that disrupts model alignment by only manipulating variations of decoding methods. By exploiting different generation strategies, including varying decoding hyper-parameters and sampling methods, we increase the misalignment rate from 0% to more than 95% across 11 language models including LLaMA2, Vicuna, Falcon, and MPT families, outperforming state-of-the-art attacks with $30\\times$ lower computational cost. Finally, we propose an effective alignment method that explores diverse generation strategies, which can reasonably reduce the misalignment rate under our attack. Altogether, our study underscores a major failure in current safety evaluation and alignment procedures for open-source LLMs, strongly advocating for more comprehensive red teaming and better alignment before releasing such models. Our code is available at https://github.com/Princeton-SysML/Jailbreak_LLM.", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Yangsibo Huang", "Samyak Gupta", "Mengzhou Xia", "Kai Li", "Danqi Chen" ], "externalIds": { "DBLP": "journals/corr/abs-2310-06987", "ArXiv": "2310.06987", "DOI": "10.48550/arXiv.2310.06987", "CorpusId": 263835408 }, "url": "https://www.semanticscholar.org/paper/ac27dd71af3ee93e1129482ceececbae7dd0d0e8", "referenceCount": 51, "citationCount": 168, "influentialCitationCount": 20, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Fine-tuning Aligned Language Models Compromises Safety, Even When Users Do Not Intend To!", "abstract": "Optimizing large language models (LLMs) for downstream use cases often involves the customization of pre-trained LLMs through further fine-tuning. Meta's open release of Llama models and OpenAI's APIs for fine-tuning GPT-3.5 Turbo on custom datasets also encourage this practice. But, what are the safety costs associated with such custom fine-tuning? We note that while existing safety alignment infrastructures can restrict harmful behaviors of LLMs at inference time, they do not cover safety risks when fine-tuning privileges are extended to end-users. Our red teaming studies find that the safety alignment of LLMs can be compromised by fine-tuning with only a few adversarially designed training examples. For instance, we jailbreak GPT-3.5 Turbo's safety guardrails by fine-tuning it on only 10 such examples at a cost of less than $0.20 via OpenAI's APIs, making the model responsive to nearly any harmful instructions. Disconcertingly, our research also reveals that, even without malicious intent, simply fine-tuning with benign and commonly used datasets can also inadvertently degrade the safety alignment of LLMs, though to a lesser extent. These findings suggest that fine-tuning aligned LLMs introduces new safety risks that current safety infrastructures fall short of addressing -- even if a model's initial safety alignment is impeccable, it is not necessarily to be maintained after custom fine-tuning. We outline and critically analyze potential mitigations and advocate for further research efforts toward reinforcing safety protocols for the custom fine-tuning of aligned LLMs.", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Xiangyu Qi", "Yi Zeng", "Tinghao Xie", "Pin-Yu Chen", "Ruoxi Jia", "Prateek Mittal", "Peter Henderson" ], "externalIds": { "DBLP": "journals/corr/abs-2310-03693", "ArXiv": "2310.03693", "DOI": "10.48550/arXiv.2310.03693", "CorpusId": 263671523 }, "url": "https://www.semanticscholar.org/paper/0e0e706e13f160e74cac9556f28ab9a358c148d2", "referenceCount": 92, "citationCount": 284, "influentialCitationCount": 45, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Shadow Alignment: The Ease of Subverting Safely-Aligned Language Models", "abstract": "Warning: This paper contains examples of harmful language, and reader discretion is recommended. The increasing open release of powerful large language models (LLMs) has facilitated the development of downstream applications by reducing the essential cost of data annotation and computation. To ensure AI safety, extensive safety-alignment measures have been conducted to armor these models against malicious use (primarily hard prompt attack). However, beneath the seemingly resilient facade of the armor, there might lurk a shadow. By simply tuning on 100 malicious examples with 1 GPU hour, these safely aligned LLMs can be easily subverted to generate harmful content. Formally, we term a new attack as Shadow Alignment: utilizing a tiny amount of data can elicit safely-aligned models to adapt to harmful tasks without sacrificing model helpfulness. Remarkably, the subverted models retain their capability to respond appropriately to regular inquiries. Experiments across 8 models released by 5 different organizations (LLaMa-2, Falcon, InternLM, BaiChuan2, Vicuna) demonstrate the effectiveness of shadow alignment attack. Besides, the single-turn English-only attack successfully transfers to multi-turn dialogue and other languages. This study serves as a clarion call for a collective effort to overhaul and fortify the safety of open-source LLMs against malicious attackers.", "year": 2023, "venue": "arXiv.org", "authors": [ "Xianjun Yang", "Xiao Wang", "Qi Zhang", "L. Petzold", "William Yang Wang", "Xun Zhao", "Dahua Lin" ], "externalIds": { "ArXiv": "2310.02949", "DBLP": "journals/corr/abs-2310-02949", "DOI": "10.48550/arXiv.2310.02949", "CorpusId": 263620436 }, "url": "https://www.semanticscholar.org/paper/84b7c486c56bd3880cb8eb01de9ae90ba3ebdaed", "referenceCount": 49, "citationCount": 106, "influentialCitationCount": 8, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Who's Harry Potter? Approximate Unlearning in LLMs", "abstract": "Large language models (LLMs) are trained on massive internet corpora that often contain copyrighted content. This poses legal and ethical challenges for the developers and users of these models, as well as the original authors and publishers. In this paper, we propose a novel technique for unlearning a subset of the training data from a LLM, without having to retrain it from scratch. We evaluate our technique on the task of unlearning the Harry Potter books from the Llama2-7b model (a generative language model recently open-sourced by Meta). While the model took over 184K GPU-hours to pretrain, we show that in about 1 GPU hour of finetuning, we effectively erase the model's ability to generate or recall Harry Potter-related content, while its performance on common benchmarks (such as Winogrande, Hellaswag, arc, boolq and piqa) remains almost unaffected. We make our fine-tuned model publicly available on HuggingFace for community evaluation. To the best of our knowledge, this is the first paper to present an effective technique for unlearning in generative language models. Our technique consists of three main components: First, we use a reinforced model that is further trained on the target data to identify the tokens that are most related to the unlearning target, by comparing its logits with those of a baseline model. Second, we replace idiosyncratic expressions in the target data with generic counterparts, and leverage the model's own predictions to generate alternative labels for every token. These labels aim to approximate the next-token predictions of a model that has not been trained on the target data. Third, we finetune the model on these alternative labels, which effectively erases the original text from the model's memory whenever it is prompted with its context.", "year": 2023, "venue": "arXiv.org", "authors": [ "Ronen Eldan", "M. Russinovich" ], "externalIds": { "ArXiv": "2310.02238", "DBLP": "journals/corr/abs-2310-02238", "DOI": "10.48550/arXiv.2310.02238", "CorpusId": 263608437 }, "url": "https://www.semanticscholar.org/paper/40a7c44d1cfaa9faf1f731a6f93a889fab5426da", "referenceCount": 23, "citationCount": 92, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Can Sensitive Information Be Deleted From LLMs? Objectives for Defending Against Extraction Attacks", "abstract": "Pretrained language models sometimes possess knowledge that we do not wish them to, including memorized personal information and knowledge that could be used to harm people. They can also output toxic or harmful text. To mitigate these safety and informational issues, we propose an attack-and-defense framework for studying the task of deleting sensitive information directly from model weights. We study direct edits to model weights because (1) this approach should guarantee that particular deleted information is never extracted by future prompt attacks, and (2) it should protect against whitebox attacks, which is necessary for making claims about safety/privacy in a setting where publicly available model weights could be used to elicit sensitive information. Our threat model assumes that an attack succeeds if the answer to a sensitive question is located among a set of B generated candidates, based on scenarios where the information would be insecure if the answer is among B candidates. Experimentally, we show that even state-of-the-art model editing methods such as ROME struggle to truly delete factual information from models like GPT-J, as our whitebox and blackbox attacks can recover\"deleted\"information from an edited model 38% of the time. These attacks leverage two key observations: (1) that traces of deleted information can be found in intermediate model hidden states, and (2) that applying an editing method for one question may not delete information across rephrased versions of the question. Finally, we provide new defense methods that protect against some extraction attacks, but we do not find a single universally effective defense method. Our results suggest that truly deleting sensitive information is a tractable but difficult problem, since even relatively low attack success rates have potentially severe societal implications for real-world deployment of language models.", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Vaidehi Patil", "Peter Hase", "Mohit Bansal" ], "externalIds": { "DBLP": "conf/iclr/PatilHB24", "ArXiv": "2309.17410", "DOI": "10.48550/arXiv.2309.17410", "CorpusId": 263311025 }, "url": "https://www.semanticscholar.org/paper/46eea7d651420e60f9b1393e3f5eda14cbff7a2a", "referenceCount": 59, "citationCount": 43, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Universal and Transferable Adversarial Attacks on Aligned Language Models", "abstract": "Because\"out-of-the-box\"large language models are capable of generating a great deal of objectionable content, recent work has focused on aligning these models in an attempt to prevent undesirable generation. While there has been some success at circumventing these measures -- so-called\"jailbreaks\"against LLMs -- these attacks have required significant human ingenuity and are brittle in practice. In this paper, we propose a simple and effective attack method that causes aligned language models to generate objectionable behaviors. Specifically, our approach finds a suffix that, when attached to a wide range of queries for an LLM to produce objectionable content, aims to maximize the probability that the model produces an affirmative response (rather than refusing to answer). However, instead of relying on manual engineering, our approach automatically produces these adversarial suffixes by a combination of greedy and gradient-based search techniques, and also improves over past automatic prompt generation methods. Surprisingly, we find that the adversarial prompts generated by our approach are quite transferable, including to black-box, publicly released LLMs. Specifically, we train an adversarial attack suffix on multiple prompts (i.e., queries asking for many different types of objectionable content), as well as multiple models (in our case, Vicuna-7B and 13B). When doing so, the resulting attack suffix is able to induce objectionable content in the public interfaces to ChatGPT, Bard, and Claude, as well as open source LLMs such as LLaMA-2-Chat, Pythia, Falcon, and others. In total, this work significantly advances the state-of-the-art in adversarial attacks against aligned language models, raising important questions about how such systems can be prevented from producing objectionable information. Code is available at github.com/llm-attacks/llm-attacks.", "year": 2023, "venue": "arXiv.org", "authors": [ "Andy Zou", "Zifan Wang", "J. Z. Kolter", "Matt Fredrikson" ], "externalIds": { "DBLP": "journals/corr/abs-2307-15043", "ArXiv": "2307.15043", "CorpusId": 260202961 }, "url": "https://www.semanticscholar.org/paper/47030369e97cc44d4b2e3cf1be85da0fd134904a", "referenceCount": 49, "citationCount": 724, "influentialCitationCount": 220, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Open Problems and Fundamental Limitations of Reinforcement Learning from Human Feedback", "abstract": "Reinforcement learning from human feedback (RLHF) is a technique for training AI systems to align with human goals. RLHF has emerged as the central method used to finetune state-of-the-art large language models (LLMs). Despite this popularity, there has been relatively little public work systematizing its flaws. In this paper, we (1) survey open problems and fundamental limitations of RLHF and related methods; (2) overview techniques to understand, improve, and complement RLHF in practice; and (3) propose auditing and disclosure standards to improve societal oversight of RLHF systems. Our work emphasizes the limitations of RLHF and highlights the importance of a multi-faceted approach to the development of safer AI systems.", "year": 2023, "venue": "Trans. Mach. Learn. Res.", "authors": [ "Stephen Casper", "Xander Davies", "Claudia Shi", "T. Gilbert", "J'er'emy Scheurer", "Javier Rando", "Rachel Freedman", "Tomasz Korbak", "David Lindner", "P. Freire", "Tony Wang", "Samuel Marks", "Charbel-Raphaël Ségerie", "Micah Carroll", "Andi Peng", "Phillip J. K. Christoffersen", "Mehul Damani", "Stewart Slocum", "Usman Anwar", "Anand Siththaranjan", "Max Nadeau", "Eric J. Michaud", "J. Pfau", "Dmitrii Krasheninnikov", "Xin Chen", "L. Langosco", "Peter Hase", "Erdem Biyik", "A. Dragan", "David Krueger", "Dorsa Sadigh", "Dylan Hadfield-Menell" ], "externalIds": { "DBLP": "journals/corr/abs-2307-15217", "ArXiv": "2307.15217", "DOI": "10.48550/arXiv.2307.15217", "CorpusId": 260316010 }, "url": "https://www.semanticscholar.org/paper/6eb46737bf0ef916a7f906ec6a8da82a45ffb623", "referenceCount": 291, "citationCount": 292, "influentialCitationCount": 17, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Jailbroken: How Does LLM Safety Training Fail?", "abstract": "Large language models trained for safety and harmlessness remain susceptible to adversarial misuse, as evidenced by the prevalence of\"jailbreak\"attacks on early releases of ChatGPT that elicit undesired behavior. Going beyond recognition of the issue, we investigate why such attacks succeed and how they can be created. We hypothesize two failure modes of safety training: competing objectives and mismatched generalization. Competing objectives arise when a model's capabilities and safety goals conflict, while mismatched generalization occurs when safety training fails to generalize to a domain for which capabilities exist. We use these failure modes to guide jailbreak design and then evaluate state-of-the-art models, including OpenAI's GPT-4 and Anthropic's Claude v1.3, against both existing and newly designed attacks. We find that vulnerabilities persist despite the extensive red-teaming and safety-training efforts behind these models. Notably, new attacks utilizing our failure modes succeed on every prompt in a collection of unsafe requests from the models' red-teaming evaluation sets and outperform existing ad hoc jailbreaks. Our analysis emphasizes the need for safety-capability parity -- that safety mechanisms should be as sophisticated as the underlying model -- and argues against the idea that scaling alone can resolve these safety failure modes.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Alexander Wei", "Nika Haghtalab", "J. Steinhardt" ], "externalIds": { "DBLP": "journals/corr/abs-2307-02483", "ArXiv": "2307.02483", "DOI": "10.48550/arXiv.2307.02483", "CorpusId": 259342528 }, "url": "https://www.semanticscholar.org/paper/929305892d4ddae575a0fc23227a8139f7681632", "referenceCount": 67, "citationCount": 488, "influentialCitationCount": 73, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Direct Preference Optimization: Your Language Model is Secretly a Reward Model", "abstract": "While large-scale unsupervised language models (LMs) learn broad world knowledge and some reasoning skills, achieving precise control of their behavior is difficult due to the completely unsupervised nature of their training. Existing methods for gaining such steerability collect human labels of the relative quality of model generations and fine-tune the unsupervised LM to align with these preferences, often with reinforcement learning from human feedback (RLHF). However, RLHF is a complex and often unstable procedure, first fitting a reward model that reflects the human preferences, and then fine-tuning the large unsupervised LM using reinforcement learning to maximize this estimated reward without drifting too far from the original model. In this paper we introduce a new parameterization of the reward model in RLHF that enables extraction of the corresponding optimal policy in closed form, allowing us to solve the standard RLHF problem with only a simple classification loss. The resulting algorithm, which we call Direct Preference Optimization (DPO), is stable, performant, and computationally lightweight, eliminating the need for sampling from the LM during fine-tuning or performing significant hyperparameter tuning. Our experiments show that DPO can fine-tune LMs to align with human preferences as well as or better than existing methods. Notably, fine-tuning with DPO exceeds PPO-based RLHF in ability to control sentiment of generations, and matches or improves response quality in summarization and single-turn dialogue while being substantially simpler to implement and train.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Rafael Rafailov", "Archit Sharma", "E. Mitchell", "Stefano Ermon", "Christopher D. Manning", "Chelsea Finn" ], "externalIds": { "DBLP": "journals/corr/abs-2305-18290", "ArXiv": "2305.18290", "DOI": "10.48550/arXiv.2305.18290", "CorpusId": 258959321 }, "url": "https://www.semanticscholar.org/paper/0d1c76d45afa012ded7ab741194baf142117c495", "referenceCount": 59, "citationCount": 1530, "influentialCitationCount": 435, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "OpenAssistant Conversations - Democratizing Large Language Model Alignment", "abstract": "Aligning large language models (LLMs) with human preferences has proven to drastically improve usability and has driven rapid adoption as demonstrated by ChatGPT. Alignment techniques such as supervised fine-tuning (SFT) and reinforcement learning from human feedback (RLHF) greatly reduce the required skill and domain knowledge to effectively harness the capabilities of LLMs, increasing their accessibility and utility across various domains. However, state-of-the-art alignment techniques like RLHF rely on high-quality human feedback data, which is expensive to create and often remains proprietary. In an effort to democratize research on large-scale alignment, we release OpenAssistant Conversations, a human-generated, human-annotated assistant-style conversation corpus consisting of 161,443 messages in 35 different languages, annotated with 461,292 quality ratings, resulting in over 10,000 complete and fully annotated conversation trees. The corpus is a product of a worldwide crowd-sourcing effort involving over 13,500 volunteers. Models trained on OpenAssistant Conversations show consistent improvements on standard benchmarks over respective base models. We release our code and data under a fully permissive licence.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Andreas Kopf", "Yannic Kilcher", "Dimitri von Rutte", "Sotiris Anagnostidis", "Zhi Rui Tam", "K. Stevens", "Abdullah Barhoum", "Nguyen Minh Duc", "Oliver Stanley", "Rich'ard Nagyfi", "ES Shahul", "Sameer Suri", "David Glushkov", "Arnav Dantuluri", "Andrew Maguire", "Christoph Schuhmann", "Huu Nguyen", "A. Mattick" ], "externalIds": { "ArXiv": "2304.07327", "DBLP": "conf/nips/KopfKRATSBNSNES23", "DOI": "10.48550/arXiv.2304.07327", "CorpusId": 258179434 }, "url": "https://www.semanticscholar.org/paper/ae736662f64d56f3ab1894fbd9c45f8f37251843", "referenceCount": 53, "citationCount": 416, "influentialCitationCount": 47, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Knowledge Unlearning for Mitigating Privacy Risks in Language Models", "abstract": "Pretrained Language Models (LMs) memorize a vast amount of knowledge during initial pretraining, including information that may violate the privacy of personal lives and identities. Previous work addressing privacy issues for LMs has mostly focused on data preprocessing and differential privacy methods, both requiring re-training the underlying LM. We propose knowledge unlearning as an alternative method to reduce privacy risks for LMs post hoc. We show that simply performing gradient ascent on target token sequences is effective at forgetting them with little to no degradation of general language modeling performances for larger-sized LMs. We also find that sequential unlearning is better than trying to unlearn all the data at once and that unlearning is highly dependent on which kind of data (domain) is forgotten. By showing comparisons with previous methods known to mitigate privacy risks for LMs, we show that our approach can give a stronger empirical privacy guarantee in scenarios where the data vulnerable to extraction attacks are known a priori while being much more efficient and robust.", "year": 2022, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Joel Jang", "Dongkeun Yoon", "Sohee Yang", "Sungmin Cha", "Moontae Lee", "Lajanugen Logeswaran", "Minjoon Seo" ], "externalIds": { "DBLP": "journals/corr/abs-2210-01504", "ACL": "2023.acl-long.805", "ArXiv": "2210.01504", "DOI": "10.48550/arXiv.2210.01504", "CorpusId": 252693065 }, "url": "https://www.semanticscholar.org/paper/91fb2254c5942048425e642c8a6c8d400006150e", "referenceCount": 55, "citationCount": 111, "influentialCitationCount": 27, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Training a Helpful and Harmless Assistant with Reinforcement Learning from Human Feedback", "abstract": "We apply preference modeling and reinforcement learning from human feedback (RLHF) to finetune language models to act as helpful and harmless assistants. We find this alignment training improves performance on almost all NLP evaluations, and is fully compatible with training for specialized skills such as python coding and summarization. We explore an iterated online mode of training, where preference models and RL policies are updated on a weekly cadence with fresh human feedback data, efficiently improving our datasets and models. Finally, we investigate the robustness of RLHF training, and identify a roughly linear relation between the RL reward and the square root of the KL divergence between the policy and its initialization. Alongside our main results, we perform peripheral analyses on calibration, competing objectives, and the use of OOD detection, compare our models with human writers, and provide samples from our models using prompts appearing in recent related work. Figure These plots show that PM accuracy decreases as we focus exclusively on comparisons between pairs of samples with high score. We have normalized all preference models to have the same mean score on a held-out dataset so that they’re directly comparable, and then plotted accuracy for the comparisons where both samples have scores above a specific threshold.", "year": 2022, "venue": "arXiv.org", "authors": [ "Yuntao Bai", "Andy Jones", "Kamal Ndousse", "Amanda Askell", "Anna Chen", "Nova Dassarma", "Dawn Drain", "Stanislav Fort", "Deep Ganguli", "T. Henighan", "Nicholas Joseph", "Saurav Kadavath", "John Kernion", "Tom Conerly", "S. El-Showk", "Nelson Elhage", "Zac Hatfield-Dodds", "Danny Hernandez", "Tristan Hume", "Scott Johnston", "Shauna Kravec", "Liane Lovitt", "Neel Nanda", "Catherine Olsson", "Dario Amodei", "Tom B. Brown", "Jack Clark", "Sam McCandlish", "C. Olah", "Benjamin Mann", "Jared Kaplan" ], "externalIds": { "ArXiv": "2204.05862", "DBLP": "journals/corr/abs-2204-05862", "DOI": "10.48550/arXiv.2204.05862", "CorpusId": 248118878 }, "url": "https://www.semanticscholar.org/paper/0286b2736a114198b25fb5553c671c33aed5d477", "referenceCount": 72, "citationCount": 1483, "influentialCitationCount": 235, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Training language models to follow instructions with human feedback", "abstract": "Making language models bigger does not inherently make them better at following a user's intent. For example, large language models can generate outputs that are untruthful, toxic, or simply not helpful to the user. In other words, these models are not aligned with their users. In this paper, we show an avenue for aligning language models with user intent on a wide range of tasks by fine-tuning with human feedback. Starting with a set of labeler-written prompts and prompts submitted through the OpenAI API, we collect a dataset of labeler demonstrations of the desired model behavior, which we use to fine-tune GPT-3 using supervised learning. We then collect a dataset of rankings of model outputs, which we use to further fine-tune this supervised model using reinforcement learning from human feedback. We call the resulting models InstructGPT. In human evaluations on our prompt distribution, outputs from the 1.3B parameter InstructGPT model are preferred to outputs from the 175B GPT-3, despite having 100x fewer parameters. Moreover, InstructGPT models show improvements in truthfulness and reductions in toxic output generation while having minimal performance regressions on public NLP datasets. Even though InstructGPT still makes simple mistakes, our results show that fine-tuning with human feedback is a promising direction for aligning language models with human intent.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Long Ouyang", "Jeff Wu", "Xu Jiang", "Diogo Almeida", "Carroll L. Wainwright", "Pamela Mishkin", "Chong Zhang", "Sandhini Agarwal", "Katarina Slama", "Alex Ray", "John Schulman", "Jacob Hilton", "Fraser Kelton", "Luke E. Miller", "Maddie Simens", "Amanda Askell", "P. Welinder", "P. Christiano", "J. Leike", "Ryan J. Lowe" ], "externalIds": { "DBLP": "conf/nips/Ouyang0JAWMZASR22", "ArXiv": "2203.02155", "CorpusId": 246426909 }, "url": "https://www.semanticscholar.org/paper/d766bffc357127e0dc86dd69561d5aeb520d6f4c", "referenceCount": 83, "citationCount": 8493, "influentialCitationCount": 1115, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Data Poisoning Won't Save You From Facial Recognition", "abstract": "Data poisoning has been proposed as a compelling defense against facial recognition models trained on Web-scraped pictures. Users can perturb images they post online, so that models will misclassify future (unperturbed) pictures. We demonstrate that this strategy provides a false sense of security, as it ignores an inherent asymmetry between the parties: users' pictures are perturbed once and for all before being published (at which point they are scraped) and must thereafter fool all future models -- including models trained adaptively against the users' past attacks, or models that use technologies discovered after the attack. We evaluate two systems for poisoning attacks against large-scale facial recognition, Fawkes (500'000+ downloads) and LowKey. We demonstrate how an\"oblivious\"model trainer can simply wait for future developments in computer vision to nullify the protection of pictures collected in the past. We further show that an adversary with black-box access to the attack can (i) train a robust model that resists the perturbations of collected pictures and (ii) detect poisoned pictures uploaded online. We caution that facial recognition poisoning will not admit an\"arms race\"between attackers and defenders. Once perturbed pictures are scraped, the attack cannot be changed so any future successful defense irrevocably undermines users' privacy.", "year": 2021, "venue": "International Conference on Learning Representations", "authors": [ "Evani Radiya-Dixit", "Florian Tramèr" ], "externalIds": { "DBLP": "journals/corr/abs-2106-14851", "ArXiv": "2106.14851", "CorpusId": 235658909 }, "url": "https://www.semanticscholar.org/paper/9d8a948634204fedef929f1e0a24eb0cfc3685eb", "referenceCount": 80, "citationCount": 52, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "LoRA: Low-Rank Adaptation of Large Language Models", "abstract": "An important paradigm of natural language processing consists of large-scale pre-training on general domain data and adaptation to particular tasks or domains. As we pre-train larger models, full fine-tuning, which retrains all model parameters, becomes less feasible. Using GPT-3 175B as an example -- deploying independent instances of fine-tuned models, each with 175B parameters, is prohibitively expensive. We propose Low-Rank Adaptation, or LoRA, which freezes the pre-trained model weights and injects trainable rank decomposition matrices into each layer of the Transformer architecture, greatly reducing the number of trainable parameters for downstream tasks. Compared to GPT-3 175B fine-tuned with Adam, LoRA can reduce the number of trainable parameters by 10,000 times and the GPU memory requirement by 3 times. LoRA performs on-par or better than fine-tuning in model quality on RoBERTa, DeBERTa, GPT-2, and GPT-3, despite having fewer trainable parameters, a higher training throughput, and, unlike adapters, no additional inference latency. We also provide an empirical investigation into rank-deficiency in language model adaptation, which sheds light on the efficacy of LoRA. We release a package that facilitates the integration of LoRA with PyTorch models and provide our implementations and model checkpoints for RoBERTa, DeBERTa, and GPT-2 at https://github.com/microsoft/LoRA.", "year": 2021, "venue": "International Conference on Learning Representations", "authors": [ "J. E. Hu", "Yelong Shen", "Phillip Wallis", "Zeyuan Allen-Zhu", "Yuanzhi Li", "Shean Wang", "Weizhu Chen" ], "externalIds": { "DBLP": "conf/iclr/HuSWALWWC22", "ArXiv": "2106.09685", "CorpusId": 235458009 }, "url": "https://www.semanticscholar.org/paper/a8ca46b171467ceb2d7652fbfb67fe701ad86092", "referenceCount": 65, "citationCount": 5650, "influentialCitationCount": 1000, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Measuring Massive Multitask Language Understanding", "abstract": "We propose a new test to measure a text model's multitask accuracy. The test covers 57 tasks including elementary mathematics, US history, computer science, law, and more. To attain high accuracy on this test, models must possess extensive world knowledge and problem solving ability. We find that while most recent models have near random-chance accuracy, the very largest GPT-3 model improves over random chance by almost 20 percentage points on average. However, on every one of the 57 tasks, the best models still need substantial improvements before they can reach expert-level accuracy. Models also have lopsided performance and frequently do not know when they are wrong. Worse, they still have near-random accuracy on some socially important subjects such as morality and law. By comprehensively evaluating the breadth and depth of a model's academic and professional understanding, our test can be used to analyze models across many tasks and to identify important shortcomings.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Dan Hendrycks", "Collin Burns", "Steven Basart", "Andy Zou", "Mantas Mazeika", "D. Song", "J. Steinhardt" ], "externalIds": { "DBLP": "conf/iclr/HendrycksBBZMSS21", "ArXiv": "2009.03300", "MAG": "3083410900", "CorpusId": 221516475 }, "url": "https://www.semanticscholar.org/paper/814a4f680b9ba6baba23b93499f4b48af1a27678", "referenceCount": 35, "citationCount": 2248, "influentialCitationCount": 471, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "BAE: BERT-based Adversarial Examples for Text Classification", "abstract": "Modern text classification models are susceptible to adversarial examples, perturbed versions of the original text indiscernible by humans but which get misclassified by the model. We present BAE, a powerful black box attack for generating grammatically correct and semantically coherent adversarial examples. BAE replaces and inserts tokens in the original text by masking a portion of the text and leveraging a language model to generate alternatives for the masked tokens. Compared to prior work, we show that BAE performs a stronger attack on three widely used models for seven text classification datasets.", "year": 2020, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Siddhant Garg", "Goutham Ramakrishnan" ], "externalIds": { "MAG": "3104423855", "ACL": "2020.emnlp-main.498", "DBLP": "conf/emnlp/GargR20", "ArXiv": "2004.01970", "DOI": "10.18653/v1/2020.emnlp-main.498", "CorpusId": 214802269 }, "url": "https://www.semanticscholar.org/paper/06a427e1688f92053a38c73cb4e0da25177c89e7", "referenceCount": 35, "citationCount": 474, "influentialCitationCount": 69, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "BERT-ATTACK: Adversarial Attack against BERT Using BERT", "abstract": "Adversarial attacks for discrete data (such as text) has been proved significantly more challenging than continuous data (such as image), since it is difficult to generate adversarial samples with gradient-based methods. Currently, the successful attack methods for text usually adopt heuristic replacement strategies on character or word level, which remains challenging to find the optimal solution in the massive space of possible combination of replacements, while preserving semantic consistency and language fluency. In this paper, we propose \\textbf{BERT-Attack}, a high-quality and effective method to generate adversarial samples using pre-trained masked language models exemplified by BERT. We turn BERT against its fine-tuned models and other deep neural models for downstream tasks. Our method successfully misleads the target models to predict incorrectly, outperforming state-of-the-art attack strategies in both success rate and perturb percentage, while the generated adversarial samples are fluent and semantically preserved. Also, the cost of calculation is low, thus possible for large-scale generations.", "year": 2020, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Linyang Li", "Ruotian Ma", "Qipeng Guo", "X. Xue", "Xipeng Qiu" ], "externalIds": { "MAG": "3101449015", "DBLP": "conf/emnlp/LiMGXQ20", "ACL": "2020.emnlp-main.500", "ArXiv": "2004.09984", "DOI": "10.18653/v1/2020.emnlp-main.500", "CorpusId": 216036179 }, "url": "https://www.semanticscholar.org/paper/dc0ce66f5ab4c5173cdef951649044e4c4c05076", "referenceCount": 26, "citationCount": 595, "influentialCitationCount": 126, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "On Adaptive Attacks to Adversarial Example Defenses", "abstract": "Adaptive attacks have (rightfully) become the de facto standard for evaluating defenses to adversarial examples. We find, however, that typical adaptive evaluations are incomplete. We demonstrate that thirteen defenses recently published at ICLR, ICML and NeurIPS---and chosen for illustrative and pedagogical purposes---can be circumvented despite attempting to perform evaluations using adaptive attacks. While prior evaluation papers focused mainly on the end result---showing that a defense was ineffective---this paper focuses on laying out the methodology and the approach necessary to perform an adaptive attack. We hope that these analyses will serve as guidance on how to properly perform adaptive attacks against defenses to adversarial examples, and thus will allow the community to make further progress in building more robust models.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Florian Tramèr", "Nicholas Carlini", "Wieland Brendel", "A. Madry" ], "externalIds": { "MAG": "3007305010", "DBLP": "conf/nips/TramerCBM20", "ArXiv": "2002.08347", "CorpusId": 211171611 }, "url": "https://www.semanticscholar.org/paper/58c143069444c7dff4be53531a47efefc40be497", "referenceCount": 73, "citationCount": 770, "influentialCitationCount": 63, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Machine Unlearning", "abstract": "Once users have shared their data online, it is generally difficult for them to revoke access and ask for the data to be deleted. Machine learning (ML) exacerbates this problem because any model trained with said data may have memorized it, putting users at risk of a successful privacy attack exposing their information. Yet, having models unlearn is notoriously difficult.We introduce SISA training, a framework that expedites the unlearning process by strategically limiting the influence of a data point in the training procedure. While our framework is applicable to any learning algorithm, it is designed to achieve the largest improvements for stateful algorithms like stochastic gradient descent for deep neural networks. SISA training reduces the computational overhead associated with unlearning, even in the worst-case setting where unlearning requests are made uniformly across the training set. In some cases, the service provider may have a prior on the distribution of unlearning requests that will be issued by users. We may take this prior into account to partition and order data accordingly, and further decrease overhead from unlearning.Our evaluation spans several datasets from different domains, with corresponding motivations for unlearning. Under no distributional assumptions, for simple learning tasks, we observe that SISA training improves time to unlearn points from the Purchase dataset by 4.63×, and 2.45× for the SVHN dataset, over retraining from scratch. SISA training also provides a speed-up of 1.36× in retraining for complex learning tasks such as ImageNet classification; aided by transfer learning, this results in a small degradation in accuracy. Our work contributes to practical data governance in machine unlearning.", "year": 2019, "venue": "IEEE Symposium on Security and Privacy", "authors": [ "Lucas Bourtoule", "Varun Chandrasekaran", "Christopher A. Choquette-Choo", "Hengrui Jia", "Adelin Travers", "Baiwu Zhang", "D. Lie", "Nicolas Papernot" ], "externalIds": { "ArXiv": "1912.03817", "DBLP": "conf/sp/BourtouleCCJTZL21", "DOI": "10.1109/SP40001.2021.00019", "CorpusId": 208909851 }, "url": "https://www.semanticscholar.org/paper/8e58dc63817a2a26e5a2ddad38d8b1d19d1c3795", "referenceCount": 68, "citationCount": 590, "influentialCitationCount": 104, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Lipstick on a Pig: Debiasing Methods Cover up Systematic Gender Biases in Word Embeddings But do not Remove Them", "abstract": "Word embeddings are widely used in NLP for a vast range of tasks. It was shown that word embeddings derived from text corpora reflect gender biases in society. This phenomenon is pervasive and consistent across different word embedding models, causing serious concern. Several recent works tackle this problem, and propose methods for significantly reducing this gender bias in word embeddings, demonstrating convincing results. However, we argue that this removal is superficial. While the bias is indeed substantially reduced according to the provided bias definition, the actual effect is mostly hiding the bias, not removing it. The gender bias information is still reflected in the distances between “gender-neutralized” words in the debiased embeddings, and can be recovered from them. We present a series of experiments to support this claim, for two debiasing methods. We conclude that existing bias removal techniques are insufficient, and should not be trusted for providing gender-neutral modeling.", "year": 2019, "venue": "North American Chapter of the Association for Computational Linguistics", "authors": [ "Hila Gonen", "Yoav Goldberg" ], "externalIds": { "ACL": "N19-1061", "MAG": "2963176417", "DBLP": "conf/acl-wnlp/GonenG19", "ArXiv": "1903.03862", "DOI": "10.18653/v1/N19-1061", "CorpusId": 73729169 }, "url": "https://www.semanticscholar.org/paper/049f4c438ce9eefa622ae5ba5fb7e34443b86133", "referenceCount": 8, "citationCount": 528, "influentialCitationCount": 40, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Art" ] }, { "title": "SNIP: Single-shot Network Pruning based on Connection Sensitivity", "abstract": "Pruning large neural networks while maintaining their performance is often desirable due to the reduced space and time complexity. In existing methods, pruning is done within an iterative optimization procedure with either heuristically designed pruning schedules or additional hyperparameters, undermining their utility. In this work, we present a new approach that prunes a given network once at initialization prior to training. To achieve this, we introduce a saliency criterion based on connection sensitivity that identifies structurally important connections in the network for the given task. This eliminates the need for both pretraining and the complex pruning schedule while making it robust to architecture variations. After pruning, the sparse network is trained in the standard way. Our method obtains extremely sparse networks with virtually the same accuracy as the reference network on the MNIST, CIFAR-10, and Tiny-ImageNet classification tasks and is broadly applicable to various architectures including convolutional, residual and recurrent networks. Unlike existing methods, our approach enables us to demonstrate that the retained connections are indeed relevant to the given task.", "year": 2018, "venue": "International Conference on Learning Representations", "authors": [ "Namhoon Lee", "Thalaiyasingam Ajanthan", "Philip H. S. Torr" ], "externalIds": { "MAG": "2963247446", "DBLP": "conf/iclr/LeeAT19", "ArXiv": "1810.02340", "CorpusId": 52920837 }, "url": "https://www.semanticscholar.org/paper/cf440ccce4a7a8681e238b4f26d5b95109add55d", "referenceCount": 49, "citationCount": 1023, "influentialCitationCount": 183, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Adversarial Examples Are Not Easily Detected: Bypassing Ten Detection Methods", "abstract": "Neural networks are known to be vulnerable to adversarial examples: inputs that are close to natural inputs but classified incorrectly. In order to better understand the space of adversarial examples, we survey ten recent proposals that are designed for detection and compare their efficacy. We show that all can be defeated by constructing new loss functions. We conclude that adversarial examples are significantly harder to detect than previously appreciated, and the properties believed to be intrinsic to adversarial examples are in fact not. Finally, we propose several simple guidelines for evaluating future proposed defenses.", "year": 2017, "venue": "AISec@CCS", "authors": [ "Nicholas Carlini", "D. Wagner" ], "externalIds": { "DBLP": "conf/ccs/Carlini017", "ArXiv": "1705.07263", "MAG": "2949212945", "DOI": "10.1145/3128572.3140444", "CorpusId": 207599948 }, "url": "https://www.semanticscholar.org/paper/99cb08c76c120599abd1d1637e32aaf577f38d39", "referenceCount": 47, "citationCount": 1740, "influentialCitationCount": 208, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Pointer Sentinel Mixture Models", "abstract": "Recent neural network sequence models with softmax classifiers have achieved their best language modeling performance only with very large hidden states and large vocabularies. Even then they struggle to predict rare or unseen words even if the context makes the prediction unambiguous. We introduce the pointer sentinel mixture architecture for neural sequence models which has the ability to either reproduce a word from the recent context or produce a word from a standard softmax classifier. Our pointer sentinel-LSTM model achieves state of the art language modeling performance on the Penn Treebank (70.9 perplexity) while using far fewer parameters than a standard softmax LSTM. In order to evaluate how well language models can exploit longer contexts and deal with more realistic vocabularies and larger corpora we also introduce the freely available WikiText corpus.", "year": 2016, "venue": "International Conference on Learning Representations", "authors": [ "Stephen Merity", "Caiming Xiong", "James Bradbury", "R. Socher" ], "externalIds": { "ArXiv": "1609.07843", "MAG": "2525332836", "DBLP": "journals/corr/MerityXBS16", "CorpusId": 16299141 }, "url": "https://www.semanticscholar.org/paper/efbd381493bb9636f489b965a2034d529cd56bcd", "referenceCount": 29, "citationCount": 2204, "influentialCitationCount": 377, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Towards Making Systems Forget with Machine Unlearning", "abstract": "Today's systems produce a rapidly exploding amount of data, and the data further derives more data, forming a complex data propagation network that we call the data's lineage. There are many reasons that users want systems to forget certain data including its lineage. From a privacy perspective, users who become concerned with new privacy risks of a system often want the system to forget their data and lineage. From a security perspective, if an attacker pollutes an anomaly detector by injecting manually crafted data into the training data set, the detector must forget the injected data to regain security. From a usability perspective, a user can remove noise and incorrect entries so that a recommendation engine gives useful recommendations. Therefore, we envision forgetting systems, capable of forgetting certain data and their lineages, completely and quickly. This paper focuses on making learning systems forget, the process of which we call machine unlearning, or simply unlearning. We present a general, efficient unlearning approach by transforming learning algorithms used by a system into a summation form. To forget a training data sample, our approach simply updates a small number of summations -- asymptotically faster than retraining from scratch. Our approach is general, because the summation form is from the statistical query learning in which many machine learning algorithms can be implemented. Our approach also applies to all stages of machine learning, including feature selection and modeling. Our evaluation, on four diverse learning systems and real-world workloads, shows that our approach is general, effective, fast, and easy to use.", "year": 2015, "venue": "IEEE Symposium on Security and Privacy", "authors": [ "Yinzhi Cao", "Junfeng Yang" ], "externalIds": { "DBLP": "conf/sp/CaoY15", "MAG": "1488996941", "DOI": "10.1109/SP.2015.35", "CorpusId": 5945696 }, "url": "https://www.semanticscholar.org/paper/2f2ade8c4944a96a44e6f70ef403b80b058d1725", "referenceCount": 73, "citationCount": 481, "influentialCitationCount": 48, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Robust Locally Weighted Regression and Smoothing Scatterplots", "abstract": "Abstract The visual information on a scatterplot can be greatly enhanced, with little additional cost, by computing and plotting smoothed points. Robust locally weighted regression is a method for smoothing a scatterplot, (x i , y i ), i = 1, …, n, in which the fitted value at z k is the value of a polynomial fit to the data using weighted least squares, where the weight for (x i , y i ) is large if x i is close to x k and small if it is not. A robust fitting procedure is used that guards against deviant points distorting the smoothed points. Visual, computational, and statistical issues of robust locally weighted regression are discussed. Several examples, including data on lead intoxication, are used to illustrate the methodology.", "year": 1979, "venue": "", "authors": [ "W. Cleveland" ], "externalIds": { "MAG": "2024081693", "DOI": "10.1080/01621459.1979.10481038", "CorpusId": 31665444 }, "url": "https://www.semanticscholar.org/paper/30e7e25061b7ccd9a625548dd6836afcff85043b", "referenceCount": 18, "citationCount": 10641, "influentialCitationCount": 804, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "How Many Layers and Why? An Analysis of the Model Depth in Transformers", "abstract": "In this study, we investigate the role of the multiple layers in deep transformer models. We design a variant of Albert that dynamically adapts the number of layers for each token of the input. The key specificity of Albert is that weights are tied across layers. Therefore, the stack of encoder layers iteratively repeats the application of the same transformation function on the input. We interpret the repetition of this application as an iterative process where the token contextualized representations are progressively refined. We analyze this process at the token level during pre-training, fine-tuning, and inference. We show that tokens do not require the same amount of iterations and that difficult or crucial tokens for the task are subject to more iterations.", "year": 2021, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Antoine Simoulin", "Benoît Crabbé" ], "externalIds": { "ACL": "2021.acl-srw.23", "DBLP": "conf/acl/SimoulinC21", "DOI": "10.18653/v1/2021.acl-srw.23", "CorpusId": 237331496 }, "url": "https://www.semanticscholar.org/paper/a010fe2f9404a951c3a9f50cba2006a551690917", "referenceCount": 25, "citationCount": 17, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "nostalgebraist", "abstract": null, "year": 2020, "venue": "Interpreting gpt: the logit lens", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Direct distillation of lm alignment", "abstract": null, "year": null, "venue": "Zephyr:", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "That’s beyond the scope of what I can do.", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "(c) Accuracy of finetuned bio models on MMLU", "abstract": null, "year": null, "venue": "Figure 3: Performance of various models on WMDP and MMLU benchmarks after finetuning them using 5, 10, 50, 100, 500, and 1000 samples 26", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Enhanced GCG - - - - Pruning - 57.0 54.5 54.5 * In this case directional ablation leads to catastrophic forgetting as indicated by MMLU score dropping to random chance. However", "abstract": null, "year": null, "venue": "by orthogonalization only the direction at layer 15 we get accuracy of 35", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Diff-in-means concept editing is worst-case optimal: Explaining a result by Sam Marks and Max Tegmark, 2023", "abstract": null, "year": null, "venue": ":/", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Control Industrial Automation System with Large Language Models": { "paper_title": "Control Industrial Automation System with Large Language Models", "arxiv_id": "2409.18009v1", "keyword": "large language model", "authors": [ "Yuchen Xia", "Nasser Jazdi", "Jize Zhang", "Chaitanya Shah", "Michael Weyrich" ], "references": [ { "title": "The Llama 3 Herd of Models", "abstract": "Modern artificial intelligence (AI) systems are powered by foundation models. This paper presents a new set of foundation models, called Llama 3. It is a herd of language models that natively support multilinguality, coding, reasoning, and tool usage. Our largest model is a dense Transformer with 405B parameters and a context window of up to 128K tokens. This paper presents an extensive empirical evaluation of Llama 3. We find that Llama 3 delivers comparable quality to leading language models such as GPT-4 on a plethora of tasks. We publicly release Llama 3, including pre-trained and post-trained versions of the 405B parameter language model and our Llama Guard 3 model for input and output safety. The paper also presents the results of experiments in which we integrate image, video, and speech capabilities into Llama 3 via a compositional approach. We observe this approach performs competitively with the state-of-the-art on image, video, and speech recognition tasks. The resulting models are not yet being broadly released as they are still under development.", "year": 2024, "venue": "arXiv.org", "authors": [ "Abhimanyu Dubey", "Abhinav Jauhri", "Abhinav Pandey", "Abhishek Kadian", "Ahmad Al-Dahle", "Aiesha Letman", "Akhil Mathur", "Alan Schelten", "Amy Yang", "Angela Fan", "Anirudh Goyal", "Anthony Hartshorn", "Aobo Yang", "Archi Mitra", "Archie Sravankumar", "Artem Korenev", "Arthur Hinsvark", "Arun Rao", "Aston Zhang", "Aurelien Rodriguez", "Austen Gregerson", "Ava Spataru", "Baptiste Rozière", "Bethany Biron", "Binh Tang", "Bobbie Chern", "C. Caucheteux", "Chaya Nayak", "Chloe Bi", "Chris Marra", "Chris McConnell", "Christian Keller", "Christophe Touret", "Chunyang Wu", "Corinne Wong", "Cristian Cantón Ferrer", "Cyrus Nikolaidis", "Damien Allonsius", "Daniel Song", "Danielle Pintz", "Danny Livshits", "David Esiobu", "Dhruv Choudhary", "Dhruv Mahajan", "Diego Garcia-Olano", "Diego Perino", "Dieuwke Hupkes", "Egor Lakomkin", "Ehab A. AlBadawy", "Elina Lobanova", "Emily Dinan", "Eric Michael Smith", "Filip Radenovic", "Frank Zhang", "Gabriele Synnaeve", "Gabrielle Lee", "Georgia Lewis Anderson", "Graeme Nail", "Grégoire Mialon", "Guanglong Pang", "Guillem Cucurell", "Hailey Nguyen", "Hannah Korevaar", "Hu Xu", "Hugo Touvron", "Iliyan Zarov", "Imanol Arrieta Ibarra", "Isabel M. Kloumann", "Ishan Misra", "Ivan Evtimov", "Jade Copet", "Jaewon Lee", "J. Geffert", "Jana Vranes", "Jason Park", "Jay Mahadeokar", "Jeet Shah", "J. V. D. Linde", "Jennifer Billock", "Jenny Hong", "Jenya Lee", "Jeremy Fu", "Jianfeng Chi", "Jianyu Huang", "Jiawen Liu", "Jie Wang", "Jiecao Yu", "Joanna Bitton", "Joe Spisak", "Jongsoo Park", "Joseph Rocca", "Joshua Johnstun", "Joshua Saxe", "Ju-Qing Jia", "Kalyan Vasuden Alwala", "K. Upasani", "Kate Plawiak", "Keqian Li", "K. Heafield", "Kevin Stone", "Khalid El-Arini", "Krithika Iyer", "Kshitiz Malik", "Kuenley Chiu", "Kunal Bhalla", "Lauren Rantala-Yeary", "L. Maaten", "Lawrence Chen", "Liang Tan", "Liz Jenkins", "Louis Martin", "Lovish Madaan", "Lubo Malo", "Lukas Blecher", "Lukas Landzaat", "Luke de Oliveira", "Madeline C. Muzzi", "M. Pasupuleti", "Mannat Singh", "Manohar Paluri", "Marcin Kardas", "Mathew Oldham", "Mathieu Rita", "Maya Pavlova", "M. Kambadur", "Mike Lewis", "Min Si", "Mitesh Kumar Singh", "Mona Hassan", "Naman Goyal", "Narjes Torabi", "Nikolay Bashlykov", "Nikolay Bogoychev", "Niladri S. Chatterji", "Olivier Duchenne", "Onur cCelebi", "Patrick Alrassy", "Pengchuan Zhang", "Pengwei Li", "Petar Vasić", "Peter Weng", "Prajjwal Bhargava", "Pratik Dubal", "Praveen Krishnan", "Punit Singh Koura", "Puxin Xu", "Qing He", "Qingxiao Dong", "Ragavan Srinivasan", "Raj Ganapathy", "Ramon Calderer", "Ricardo Silveira Cabral", "Robert Stojnic", "Roberta Raileanu", "Rohit Girdhar", "Rohit Patel", "Romain Sauvestre", "Ronnie Polidoro", "Roshan Sumbaly", "Ross Taylor", "Ruan Silva", "Rui Hou", "Rui Wang", "Saghar Hosseini", "Sahana Chennabasappa", "Sanjay Singh", "Sean Bell", "Seohyun Sonia Kim", "Sergey Edunov", "Shaoliang Nie", "Sharan Narang", "S. Raparthy", "Sheng Shen", "Shengye Wan", "Shruti Bhosale", "Shun Zhang", "Simon Vandenhende", "Soumya Batra", "Spencer Whitman", "Sten Sootla", "Stephane Collot", "Suchin Gururangan", "S. Borodinsky", "Tamar Herman", "Tara Fowler", "Tarek Sheasha", "Thomas Georgiou", "Thomas Scialom", "Tobias Speckbacher", "Todor Mihaylov", "Tong Xiao", "Ujjwal Karn", "Vedanuj Goswami", "Vibhor Gupta", "Vignesh Ramanathan", "Viktor Kerkez", "Vincent Gonguet", "Virginie Do", "Vish Vogeti", "Vladan Petrovic", "Weiwei Chu", "Wenhan Xiong", "Wenyin Fu", "Whitney Meers", "Xavier Martinet", "Xiaodong Wang", "Xiaoqing Ellen Tan", "Xinfeng Xie", "Xuchao Jia", "Xuewei Wang", "Yaelle Goldschlag", "Yashesh Gaur", "Yasmine Babaei", "Yiqian Wen", "Yiwen Song", "Yuchen Zhang", "Yue Li", "Yuning Mao", "Zacharie Delpierre Coudert", "Zhengxu Yan", "Zhengxing Chen", "Zoe Papakipos", "Aaditya K. Singh", "Aaron Grattafiori", "Abha Jain", "Adam Kelsey", "Adam Shajnfeld", "Adi Gangidi", "Adolfo Victoria", "Ahuva Goldstand", "Ajay Menon", "Ajay Sharma", "Alex Boesenberg", "Alex Vaughan", "Alexei Baevski", "Allie Feinstein", "A. Kallet", "Amit Sangani", "Anam Yunus", "Andrei Lupu", "Andres Alvarado", "Andrew Caples", "Andrew Gu", "Andrew Ho", "Andrew Poulton", "Andrew Ryan", "Ankit Ramchandani", "Annie Franco", "Aparajita Saraf", "Arkabandhu Chowdhury", "Ashley Gabriel", "Ashwin Bharambe", "Assaf Eisenman", "Azadeh Yazdan", "Beau James", "Ben Maurer", "Ben Leonhardi", "Bernie Huang", "Beth Loyd", "Beto De Paola", "Bhargavi Paranjape", "Bing Liu", "Bo Wu", "Boyu Ni", "Braden Hancock", "Bram Wasti", "Brandon Spence", "Brani Stojkovic", "Brian Gamido", "Britt Montalvo", "Carl Parker", "Carly Burton", "Catalina Mejia", "Changhan Wang", "Changkyu Kim", "Chao Zhou", "Chester Hu", "Ching-Hsiang Chu", "Chris Cai", "Chris Tindal", "Christoph Feichtenhofer", "Damon Civin", "Dana Beaty", "Daniel Kreymer", "Shang-Wen Li", "Danny Wyatt", "David Adkins", "David Xu", "Davide Testuggine", "Delia David", "Devi Parikh", "Diana Liskovich", "Didem Foss", "Dingkang Wang", "Duc Le", "Dustin Holland", "Edward Dowling", "Eissa Jamil", "Elaine Montgomery", "Eleonora Presani", "Emily Hahn", "Emily Wood", "Erik Brinkman", "Esteban Arcaute", "Evan Dunbar", "Evan Smothers", "Fei Sun", "Felix Kreuk", "Feng Tian", "Firat Ozgenel", "Francesco Caggioni", "Francisco Guzm'an", "Frank J. Kanayet", "Frank Seide", "Gabriela Medina Florez", "Gabriella Schwarz", "Gada Badeer", "Georgia Swee", "Gil Halpern", "G. Thattai", "Grant Herman", "Grigory G. Sizov", "Guangyi Zhang", "Guna Lakshminarayanan", "Hamid Shojanazeri", "Han Zou", "Hannah Wang", "Han Zha", "Haroun Habeeb", "Harrison Rudolph", "Helen Suk", "Henry Aspegren", "Hunter Goldman", "Igor Molybog", "I. Tufanov", "Irina-Elena Veliche", "Itai Gat", "Jake Weissman", "James Geboski", "James Kohli", "Japhet Asher", "Jean-Baptiste Gaya", "Jeff Marcus", "Jeff Tang", "Jennifer Chan", "Jenny Zhen", "Jeremy Reizenstein", "Jeremy Teboul", "Jessica Zhong", "Jian Jin", "Jingyi Yang", "Joe Cummings", "Jon Carvill", "Jon Shepard", "Jonathan McPhie", "Jonathan Torres", "Josh Ginsburg", "Junjie Wang", "Kaixing(Kai) Wu", "U. KamHou", "Karan Saxena", "Karthik Prasad", "Kartikay Khandelwal", "Katayoun Zand", "Kathy Matosich", "K. Veeraraghavan", "Kelly Michelena", "Keqian Li", "Kun Huang", "Kunal Chawla", "Kushal Lakhotia", "Kyle Huang", "Lailin Chen", "Lakshya Garg", "A. Lavender", "Leandro Silva", "Lee Bell", "Lei Zhang", "Liangpeng Guo", "Licheng Yu", "Liron Moshkovich", "Luca Wehrstedt", "Madian Khabsa", "Manav Avalani", "Manish Bhatt", "Maria Tsimpoukelli", "Martynas Mankus", "Matan Hasson", "M. Lennie", "Matthias Reso", "Maxim Groshev", "Maxim Naumov", "Maya Lathi", "Meghan Keneally", "M. Seltzer", "Michal Valko", "Michelle Restrepo", "Mihir Patel", "Mik Vyatskov", "Mikayel Samvelyan", "Mike Clark", "Mike Macey", "Mike Wang", "Miquel Jubert Hermoso", "Mo Metanat", "Mohammad Rastegari", "Munish Bansal", "Nandhini Santhanam", "Natascha Parks", "Natasha White", "Navyata Bawa", "Nayan Singhal", "Nick Egebo", "Nicolas Usunier", "Nikolay Pavlovich Laptev", "Ning Dong", "Ning Zhang", "Norman Cheng", "Oleg Chernoguz", "Olivia Hart", "Omkar Salpekar", "Ozlem Kalinli", "Parkin Kent", "Parth Parekh", "Paul Saab", "Pavan Balaji", "Pedro Rittner", "Philip Bontrager", "Pierre Roux", "Piotr Dollár", "Polina Zvyagina", "Prashant Ratanchandani", "Pritish Yuvraj", "Qian Liang", "Rachad Alao", "Rachel Rodriguez", "Rafi Ayub", "Raghotham Murthy", "Raghu Nayani", "Rahul Mitra", "Raymond Li", "Rebekkah Hogan", "Robin Battey", "Rocky Wang", "Rohan Maheswari", "Russ Howes", "Ruty Rinott", "Sai Jayesh Bondu", "Samyak Datta", "Sara Chugh", "Sara Hunt", "Sargun Dhillon", "Sasha Sidorov", "Satadru Pan", "Saurabh Verma", "Seiji Yamamoto", "Sharadh Ramaswamy", "Shaun Lindsay", "Sheng Feng", "Shenghao Lin", "S. Zha", "Shiva Shankar", "Shuqiang Zhang", "Sinong Wang", "Sneha Agarwal", "S. Sajuyigbe", "Soumith Chintala", "Stephanie Max", "Stephen Chen", "Steve Kehoe", "Steve Satterfield", "Sudarshan Govindaprasad", "Sumit Gupta", "Sung-Bae Cho", "Sunny Virk", "Suraj Subramanian", "Sy Choudhury", "Sydney Goldman", "Tal Remez", "Tamar Glaser", "Tamara Best", "Thilo Kohler", "Thomas Robinson", "Tianhe Li", "Tianjun Zhang", "Tim Matthews", "Timothy Chou", "Tzook Shaked", "Varun Vontimitta", "Victoria Ajayi", "Victoria Montanez", "Vijai Mohan", "Vinay Satish Kumar", "Vishal Mangla", "Vlad Ionescu", "V. Poenaru", "Vlad T. Mihailescu", "Vladimir Ivanov", "Wei Li", "Wenchen Wang" ], "externalIds": { "DBLP": "journals/corr/abs-2407-21783", "ArXiv": "2407.21783", "DOI": "10.48550/arXiv.2407.21783", "CorpusId": 271571434 }, "url": "https://www.semanticscholar.org/paper/6520557cc3bfd198f960cc8cb6151c3474321bd8", "referenceCount": 0, "citationCount": 286, "influentialCitationCount": 58, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Qwen2 Technical Report", "abstract": "This report introduces the Qwen2 series, the latest addition to our large language models and large multimodal models. We release a comprehensive suite of foundational and instruction-tuned language models, encompassing a parameter range from 0.5 to 72 billion, featuring dense models and a Mixture-of-Experts model. Qwen2 surpasses most prior open-weight models, including its predecessor Qwen1.5, and exhibits competitive performance relative to proprietary models across diverse benchmarks on language understanding, generation, multilingual proficiency, coding, mathematics, and reasoning. The flagship model, Qwen2-72B, showcases remarkable performance: 84.2 on MMLU, 37.9 on GPQA, 64.6 on HumanEval, 89.5 on GSM8K, and 82.4 on BBH as a base language model. The instruction-tuned variant, Qwen2-72B-Instruct, attains 9.1 on MT-Bench, 48.1 on Arena-Hard, and 35.7 on LiveCodeBench. Moreover, Qwen2 demonstrates robust multilingual capabilities, proficient in approximately 30 languages, spanning English, Chinese, Spanish, French, German, Arabic, Russian, Korean, Japanese, Thai, Vietnamese, and more, underscoring its versatility and global reach. To foster community innovation and accessibility, we have made the Qwen2 model weights openly available on Hugging Face and ModelScope, and the supplementary materials including example code on GitHub. These platforms also include resources for quantization, fine-tuning, and deployment, facilitating a wide range of applications and research endeavors.", "year": 2024, "venue": "arXiv.org", "authors": [ "An Yang", "Baosong Yang", "Binyuan Hui", "Bo Zheng", "Bowen Yu", "Chang Zhou", "Chengpeng Li", "Chengyuan Li", "Dayiheng Liu", "Fei Huang", "Guanting Dong", "Haoran Wei", "Huan Lin", "Jialong Tang", "Jialin Wang", "Jian Yang", "Jianhong Tu", "Jianwei Zhang", "Jianxin Ma", "Jin Xu", "Jingren Zhou", "Jinze Bai", "Jinzheng He", "Junyang Lin", "Kai Dang", "Keming Lu", "Ke-Yang Chen", "Kexin Yang", "Mei Li", "Min Xue", "Na Ni", "Pei Zhang", "Peng Wang", "Ru Peng", "Rui Men", "Ruize Gao", "Runji Lin", "Shijie Wang", "Shuai Bai", "Sinan Tan", "Tianhang Zhu", "Tianhao Li", "Tianyu Liu", "Wenbin Ge", "Xiaodong Deng", "Xiaohuan Zhou", "Xingzhang Ren", "Xinyu Zhang", "Xipin Wei", "Xuancheng Ren", "Yang Fan", "Yang Yao", "Yichang Zhang", "Yunyang Wan", "Yunfei Chu", "Zeyu Cui", "Zhenru Zhang", "Zhi-Wei Fan" ], "externalIds": { "DBLP": "journals/corr/abs-2407-10671", "ArXiv": "2407.10671", "DOI": "10.48550/arXiv.2407.10671", "CorpusId": 271212307 }, "url": "https://www.semanticscholar.org/paper/54fb839f621e3fe787437ab8ca5f37e7e4726bfe", "referenceCount": 77, "citationCount": 102, "influentialCitationCount": 14, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Incorporating Large Language Models into Production Systems for Enhanced Task Automation and Flexibility", "abstract": "Inhalt/Content Vorwort 1 Prozessautomation: Regelung & Optimierung Architectural elements for configuration and control of modular plants 5 Produktionsplanung für die Praxis: Simulationsgetriebene Optimierung für industrielle Batchprozesse mit Evolutionären Algorithmen 21 Containerisierung von Model Predictive Control für modulare Anlagen – Ein Schritt zu intelligenten Edge Systemen 33 Prozessautomation: Modularisierung From General Recipes to Plant-Specific Master Recipes A graphical Recipe Editor using a Capability Knowledge Base and the Capability Description Submodel of the Asset Admistration Shell 47 Evolution der IT/OT-Security durch modulare Anlagenkonzepte 59 Design and development of unified composable Control Components and unified interfaces for flexible adaptation to new changes and requirements 79 Prozessautomation: Sicherer Anlagenbetrieb Ethernet-APL Strategien für zukunftsorientierte Sicherheitsanwendungen Chancen und Herausforderungen 93 Konzept zur Unterstützung des Alarmmanagements auf Basis des intelligenten Digitalen Zwillings für Offshore-PtX-Plattformen 105 Automatisierte Durc...", "year": 2024, "venue": "arXiv.org", "authors": [ "Yuchen Xia", "Jize Zhang", "N. Jazdi", "M. Weyrich" ], "externalIds": { "ArXiv": "2407.08550", "DBLP": "journals/corr/abs-2407-08550", "DOI": "10.51202/9783181024379", "CorpusId": 271080128 }, "url": "https://www.semanticscholar.org/paper/80a4f0f5bffeeff48c2b60c33dd6305eeb21a055", "referenceCount": 19, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "A Novel Model Adaption Approach for intelligent Digital Twins of Modular Production Systems", "abstract": "Industrial automation is becoming increasingly networked, intelligent and autonomous. Digital Twins, which serve as virtual representations, are a key technology in this context. The Digital Twin of a modular production system contains many different models that are mostly created for specific applications and fulfil different requirements. In particular, simulation models created in the development phase can be used in the operational phase for applications such as prediction or operation-parallel simulation. Due to the high heterogeneity of the model landscape in the context of a modular production system, the plant operator is faced with the challenge of adapting the models in order to ensure an application-oriented realism in the event of changes to the asset and its environment or the addition of applications. Therefore, this paper proposes an approach for the continuous model adaption in the Digital Twin of a modular production system during the operational phase. An agent-based implementation of the concept demonstrates the benefits of the approach for an operational phase application scenario.", "year": 2023, "venue": "IEEE International Conference on Emerging Technologies and Factory Automation", "authors": [ "Daniel Dittler", "Peter Lierhammer", "Dominik I. Braun", "Timo Müller", "N. Jazdi", "M. Weyrich" ], "externalIds": { "DBLP": "conf/etfa/DittlerLBMJW23", "DOI": "10.1109/ETFA54631.2023.10275384", "CorpusId": 264041182 }, "url": "https://www.semanticscholar.org/paper/5eee4008e9bb6489bb10bd5c0bea175a2bc4c01f", "referenceCount": 35, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Survey on Large Language Model based Autonomous Agents", "abstract": null, "year": 2023, "venue": "Frontiers Comput. Sci.", "authors": [ "Lei Wang", "Chengbang Ma", "Xueyang Feng", "Zeyu Zhang", "Hao-ran Yang", "Jingsen Zhang", "Zhi-Yang Chen", "Jiakai Tang", "Xu Chen", "Yankai Lin", "Wayne Xin Zhao", "Zhewei Wei", "Ji-rong Wen" ], "externalIds": { "DBLP": "journals/fcsc/WangMFZYZCTCLZWW24", "ArXiv": "2308.11432", "DOI": "10.1007/s11704-024-40231-1", "CorpusId": 261064713 }, "url": "https://www.semanticscholar.org/paper/28c6ac721f54544162865f41c5692e70d61bccab", "referenceCount": 193, "citationCount": 558, "influentialCitationCount": 32, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Towards autonomous system: flexible modular production system enhanced with large language model agents", "abstract": "In this paper, we present a novel framework that combines large language models (LLMs), digital twins and industrial automation system to enable intelligent planning and control of production processes. We retrofit the automation system for a modular production facility and create executable control interfaces of fine-granular functionalities and coarse-granular skills. Low-level functionalities are executed by automation components, and high-level skills are performed by automation modules. Subsequently, a digital twin system is developed, registering these interfaces and containing additional descriptive information about the production system. Based on the retrofitted automation system and the created digital twins, LLM-agents are designed to interpret descriptive information in the digital twins and control the physical system through service interfaces. These LLM-agents serve as intelligent agents on different levels within an automation system, enabling autonomous planning and control of flexible production. Given a task instruction as input, the LLM-agents orchestrate a sequence of atomic functionalities and skills to accomplish the task. We demonstrate how our implemented prototype can handle un-predefined tasks, plan a production process, and execute the operations. This research highlights the potential of integrating LLMs into industrial automation systems in the context of smart factory for more agile, flexible, and adaptive production processes, while it also underscores the critical insights and limitations for future work. Demos at: https://github.com/YuchenXia/GPT4IndustrialAutomation", "year": 2023, "venue": "IEEE International Conference on Emerging Technologies and Factory Automation", "authors": [ "Yuchen Xia", "Manthan Shenoy", "N. Jazdi", "M. Weyrich" ], "externalIds": { "DBLP": "journals/corr/abs-2304-14721", "ArXiv": "2304.14721", "DOI": "10.1109/ETFA54631.2023.10275362", "CorpusId": 258417925 }, "url": "https://www.semanticscholar.org/paper/901f8bc72261a278eda91a2dcf609a21bba2666e", "referenceCount": 24, "citationCount": 27, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "GPT-4 Technical Report", "abstract": "We report the development of GPT-4, a large-scale, multimodal model which can accept image and text inputs and produce text outputs. While less capable than humans in many real-world scenarios, GPT-4 exhibits human-level performance on various professional and academic benchmarks, including passing a simulated bar exam with a score around the top 10% of test takers. GPT-4 is a Transformer-based model pre-trained to predict the next token in a document. The post-training alignment process results in improved performance on measures of factuality and adherence to desired behavior. A core component of this project was developing infrastructure and optimization methods that behave predictably across a wide range of scales. This allowed us to accurately predict some aspects of GPT-4's performance based on models trained with no more than 1/1,000th the compute of GPT-4.", "year": 2023, "venue": "", "authors": [ "OpenAI Josh Achiam", "Steven Adler", "Sandhini Agarwal", "Lama Ahmad", "Ilge Akkaya", "Florencia Leoni Aleman", "Diogo Almeida", "Janko Altenschmidt", "Sam Altman", "Shyamal Anadkat", "Red Avila", "Igor Babuschkin", "S. Balaji", "Valerie Balcom", "Paul Baltescu", "Haim-ing Bao", "Mo Bavarian", "Jeff Belgum", "Irwan Bello", "Jake Berdine", "Gabriel Bernadett-Shapiro", "Christopher Berner", "Lenny Bogdonoff", "Oleg Boiko", "Madelaine Boyd", "Anna-Luisa Brakman", "Greg Brockman", "Tim Brooks", "Miles Brundage", "Kevin Button", "Trevor Cai", "Rosie Campbell", "Andrew Cann", "Brittany Carey", "Chelsea Carlson", "Rory Carmichael", "Brooke Chan", "Che Chang", "Fotis Chantzis", "Derek Chen", "Sully Chen", "Ruby Chen", "Jason Chen", "Mark Chen", "B. Chess", "Chester Cho", "Casey Chu", "Hyung Won Chung", "Dave Cummings", "Jeremiah Currier", "Yunxing Dai", "Cory Decareaux", "Thomas Degry", "Noah Deutsch", "Damien Deville", "Arka Dhar", "David Dohan", "Steve Dowling", "Sheila Dunning", "Adrien Ecoffet", "Atty Eleti", "Tyna Eloundou", "David Farhi", "Liam Fedus", "Niko Felix", "Sim'on Posada Fishman", "Juston Forte", "Is-abella Fulford", "Leo Gao", "Elie Georges", "C. Gibson", "Vik Goel", "Tarun Gogineni", "Gabriel Goh", "Raphael Gontijo-Lopes", "Jonathan Gordon", "Morgan Grafstein", "Scott Gray", "Ryan Greene", "Joshua Gross", "S. Gu", "Yufei Guo", "Chris Hallacy", "Jesse Han", "Jeff Harris", "Yuchen He", "Mike Heaton", "Johannes Heidecke", "Chris Hesse", "Alan Hickey", "Wade Hickey", "Peter Hoeschele", "Brandon Houghton", "Kenny Hsu", "Shengli Hu", "Xin Hu", "Joost Huizinga", "Shantanu Jain", "Shawn Jain", "Joanne Jang", "Angela Jiang", "Roger Jiang", "Haozhun Jin", "Denny Jin", "Shino Jomoto", "B. Jonn", "Heewoo Jun", "Tomer Kaftan", "Lukasz Kaiser", "Ali Kamali", "I. Kanitscheider", "N. Keskar", "Tabarak Khan", "Logan Kilpatrick", "Jong Wook Kim", "Christina Kim", "Yongjik Kim", "Hendrik Kirchner", "J. Kiros", "Matthew Knight", "Daniel Kokotajlo", "Lukasz Kondraciuk", "A. Kondrich", "Aris Konstantinidis", "Kyle Kosic", "Gretchen Krueger", "Vishal Kuo", "Michael Lampe", "Ikai Lan", "Teddy Lee", "J. Leike", "Jade Leung", "Daniel Levy", "Chak Ming Li", "Rachel Lim", "Molly Lin", "Stephanie Lin", "Ma-teusz Litwin", "Theresa Lopez", "Ryan Lowe", "Patricia Lue", "A. Makanju", "Kim Malfacini", "Sam Manning", "Todor Markov", "Yaniv Markovski", "Bianca Martin", "Katie Mayer", "Andrew Mayne", "Bob McGrew", "S. McKinney", "C. McLeavey", "Paul McMillan", "Jake McNeil", "David Medina", "Aalok Mehta", "Jacob Menick", "Luke Metz", "Andrey Mishchenko", "Pamela Mishkin", "Vinnie Monaco", "Evan Morikawa", "Daniel P. Mossing", "Tong Mu", "Mira Murati", "O. Murk", "David M'ely", "Ashvin Nair", "Reiichiro Nakano", "Rajeev Nayak", "Arvind Neelakantan", "Richard Ngo", "Hyeonwoo Noh", "Ouyang Long", "Cullen O'Keefe", "J. Pachocki", "Alex Paino", "Joe Palermo", "Ashley Pantuliano", "Giambattista Parascandolo", "Joel Parish", "Emy Parparita", "Alexandre Passos", "Mikhail Pavlov", "Andrew Peng", "Adam Perelman", "Filipe de Avila Belbute Peres", "Michael Petrov", "Henrique Pondé de Oliveira Pinto", "Michael Pokorny", "Michelle Pokrass", "Vitchyr H. Pong", "Tolly Powell", "Alethea Power", "Boris Power", "Elizabeth Proehl", "Raul Puri", "Alec Radford", "Jack W. Rae", "Aditya Ramesh", "Cameron Raymond", "Francis Real", "Kendra Rimbach", "Carl Ross", "Bob Rotsted", "Henri Roussez", "Nick Ryder", "M. Saltarelli", "Ted Sanders", "Shibani Santurkar", "Girish Sastry", "Heather Schmidt", "David Schnurr", "John Schulman", "Daniel Selsam", "Kyla Sheppard", "Toki Sherbakov", "Jessica Shieh", "Sarah Shoker", "Pranav Shyam", "Szymon Sidor", "Eric Sigler", "Maddie Simens", "Jordan Sitkin", "Katarina Slama", "Ian Sohl", "Benjamin D. Sokolowsky", "Yang Song", "Natalie Staudacher", "F. Such", "Natalie Summers", "I. Sutskever", "Jie Tang", "N. Tezak", "Madeleine Thompson", "Phil Tillet", "Amin Tootoonchian", "Elizabeth Tseng", "Preston Tuggle", "Nick Turley", "Jerry Tworek", "Juan Felipe Cer'on Uribe", "Andrea Vallone", "Arun Vijayvergiya", "Chelsea Voss", "Carroll L. Wainwright", "Justin Jay Wang", "Alvin Wang", "Ben Wang", "Jonathan Ward", "Jason Wei", "CJ Weinmann", "Akila Welihinda", "P. Welinder", "Jiayi Weng", "Lilian Weng", "Matt Wiethoff", "Dave Willner", "Clemens Winter", "Samuel Wolrich", "Hannah Wong", "Lauren Workman", "Sherwin Wu", "Jeff Wu", "Michael Wu", "Kai Xiao", "Tao Xu", "Sarah Yoo", "Kevin Yu", "Qim-ing Yuan", "Wojciech Zaremba", "Rowan Zellers", "Chong Zhang", "Marvin Zhang", "Shengjia Zhao", "Tianhao Zheng", "Juntang Zhuang", "William Zhuk", "Barret Zoph" ], "externalIds": { "ArXiv": "2303.08774", "CorpusId": 257532815 }, "url": "https://www.semanticscholar.org/paper/163b4d6a79a5b19af88b8585456363340d9efd04", "referenceCount": 0, "citationCount": 7054, "influentialCitationCount": 1038, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ReAct: Synergizing Reasoning and Acting in Language Models", "abstract": "While large language models (LLMs) have demonstrated impressive capabilities across tasks in language understanding and interactive decision making, their abilities for reasoning (e.g. chain-of-thought prompting) and acting (e.g. action plan generation) have primarily been studied as separate topics. In this paper, we explore the use of LLMs to generate both reasoning traces and task-specific actions in an interleaved manner, allowing for greater synergy between the two: reasoning traces help the model induce, track, and update action plans as well as handle exceptions, while actions allow it to interface with external sources, such as knowledge bases or environments, to gather additional information. We apply our approach, named ReAct, to a diverse set of language and decision making tasks and demonstrate its effectiveness over state-of-the-art baselines, as well as improved human interpretability and trustworthiness over methods without reasoning or acting components. Concretely, on question answering (HotpotQA) and fact verification (Fever), ReAct overcomes issues of hallucination and error propagation prevalent in chain-of-thought reasoning by interacting with a simple Wikipedia API, and generates human-like task-solving trajectories that are more interpretable than baselines without reasoning traces. On two interactive decision making benchmarks (ALFWorld and WebShop), ReAct outperforms imitation and reinforcement learning methods by an absolute success rate of 34% and 10% respectively, while being prompted with only one or two in-context examples. Project site with code: https://react-lm.github.io", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Shunyu Yao", "Jeffrey Zhao", "Dian Yu", "Nan Du", "Izhak Shafran", "Karthik Narasimhan", "Yuan Cao" ], "externalIds": { "DBLP": "journals/corr/abs-2210-03629", "ArXiv": "2210.03629", "CorpusId": 252762395 }, "url": "https://www.semanticscholar.org/paper/99832586d55f540f603637e458a292406a0ed75d", "referenceCount": 65, "citationCount": 1368, "influentialCitationCount": 240, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A digital-twin visualized architecture for Flexible Manufacturing System", "abstract": null, "year": 2021, "venue": "Journal of manufacturing systems", "authors": [ "Yepeng Fan", "Jianzhong Yang", "Jihong Chen", "Pengcheng Hu", "Xiaoyu Wang", "Jianchun Xu", "Bin Zhou" ], "externalIds": { "MAG": "3170007016", "DOI": "10.1016/J.JMSY.2021.05.010", "CorpusId": 236313721 }, "url": "https://www.semanticscholar.org/paper/181b9f2174ffacf36de6dc7bdc50a7650a9fb8d5", "referenceCount": 42, "citationCount": 102, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "LoRA: Low-Rank Adaptation of Large Language Models", "abstract": "An important paradigm of natural language processing consists of large-scale pre-training on general domain data and adaptation to particular tasks or domains. As we pre-train larger models, full fine-tuning, which retrains all model parameters, becomes less feasible. Using GPT-3 175B as an example -- deploying independent instances of fine-tuned models, each with 175B parameters, is prohibitively expensive. We propose Low-Rank Adaptation, or LoRA, which freezes the pre-trained model weights and injects trainable rank decomposition matrices into each layer of the Transformer architecture, greatly reducing the number of trainable parameters for downstream tasks. Compared to GPT-3 175B fine-tuned with Adam, LoRA can reduce the number of trainable parameters by 10,000 times and the GPU memory requirement by 3 times. LoRA performs on-par or better than fine-tuning in model quality on RoBERTa, DeBERTa, GPT-2, and GPT-3, despite having fewer trainable parameters, a higher training throughput, and, unlike adapters, no additional inference latency. We also provide an empirical investigation into rank-deficiency in language model adaptation, which sheds light on the efficacy of LoRA. We release a package that facilitates the integration of LoRA with PyTorch models and provide our implementations and model checkpoints for RoBERTa, DeBERTa, and GPT-2 at https://github.com/microsoft/LoRA.", "year": 2021, "venue": "International Conference on Learning Representations", "authors": [ "J. E. Hu", "Yelong Shen", "Phillip Wallis", "Zeyuan Allen-Zhu", "Yuanzhi Li", "Shean Wang", "Weizhu Chen" ], "externalIds": { "DBLP": "conf/iclr/HuSWALWWC22", "ArXiv": "2106.09685", "CorpusId": 235458009 }, "url": "https://www.semanticscholar.org/paper/a8ca46b171467ceb2d7652fbfb67fe701ad86092", "referenceCount": 65, "citationCount": 5650, "influentialCitationCount": 1000, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "An architecture of an Intelligent Digital Twin in a Cyber-Physical Production System", "abstract": "Abstract The role of a Digital Twin is increasingly discussed within the context of Cyber-Physical Production Systems. Accordingly, various architectures for the realization of Digital Twin use cases are conceptualized. There lacks, however, a clear, encompassing architecture covering necessary components of a Digital Twin to realize various use cases in an intelligent automation system. In this contribution, the added value of a Digital Twin in an intelligent automation system is highlighted and various existing definitions and architectures of the Digital Twin are discussed. Flowingly, an architecture for a Digital Twin and an architecture for an Intelligent Digital Twin and their required components are proposed, with which use cases such as plug and produce, self-x and predictive maintenance are enabled. In the opinion of the authors, a Digital Twin requires three main characteristics: synchronization with the real asset, active data acquisition from the real environment and the ability of simulation. In addition to all the characteristics of a Digital Twin, an Intelligent Digital Twin must also include the characteristics of Artificial Intelligence. The Intelligent Digital Twin can be used for the realization of the autonomous Cyber-Physical Production Systems. In order to realize the proposed architecture for a Digital Twin, several methods, namely the Anchor-Point-Method, a method for heterogeneous data acquisition and data integration as well as an agent-based method for the development of a co-simulation between Digital Twins were implemented and evaluated.", "year": 2019, "venue": "at - Automatisierungstechnik", "authors": [ "Behrang Ashtari Talkhestani", "Tobias Jung", "Benjamin Lindemann", "N. Sahlab", "N. Jazdi", "Wolfgang Schlögl", "M. Weyrich" ], "externalIds": { "MAG": "2977510036", "DBLP": "journals/at/TalkhestaniJLSJ19", "DOI": "10.1515/auto-2019-0039", "CorpusId": 203848524 }, "url": "https://www.semanticscholar.org/paper/9c5ef6837e9ace197b983c175e6ebd13d3f20403", "referenceCount": 50, "citationCount": 209, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "OPC UA integration for field devices", "abstract": "In industrial automation systems, deploying the well-established automation pyramid model is best practice. However, the trend to massively distributed systems, which are foreseen to co-operate using standardized protocols and common semantics, shows the limits of these traditional approaches. In order to enable for Industry 4.0 compliant solutions, appropriate means for scalable internetworking have to be developed and utilized. New modelling technologies have been developed to represent such distributed automation systems, incorporating a multidimensional layered approach. At higher hierarchy levels of these automation models, standardization approaches are quite common. However, at the field layer there are still many different field busses, which in most cases do not allow common semantics, but come along with their own object models. In contrast, the use of OPC UA at the field level, with its standardized protocol stack and semantic annotations, would allow for enabling field devives to fully participate in large scaled systems as Industry 4.0 components. This paper evaluates the potentials and limits of integrating OPC UA into legacy field devices with limited communication and calculation resources and provides quantitative measurement results of selected test scenarios.", "year": 2017, "venue": "International Conference on Industrial Informatics", "authors": [ "A. Veichtlbauer", "Martin Ortmayer", "T. Heistracher" ], "externalIds": { "MAG": "2768493713", "DBLP": "conf/indin/VeichtlbauerOH17", "DOI": "10.1109/INDIN.2017.8104808", "CorpusId": 20015704 }, "url": "https://www.semanticscholar.org/paper/c9a6a903b26fe3ddbcd1931156155a751cdd20a6", "referenceCount": 20, "citationCount": 43, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "An Approach for Context-Aware Cyber-Physical Automation Systems", "abstract": null, "year": 2021, "venue": "IFAC-PapersOnLine", "authors": [ "N. Sahlab", "N. Jazdi", "M. Weyrich" ], "externalIds": { "DOI": "10.1016/j.ifacol.2021.10.029", "CorpusId": 240257648 }, "url": "https://www.semanticscholar.org/paper/6a7f0048769a75cef0691fb1f201fd78db56c0ca", "referenceCount": 11, "citationCount": 10, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "Cyber-physical production systems: enhancement with a self-organized reconfiguration management", "abstract": null, "year": 2021, "venue": "", "authors": [ "Timo Müller", "N. Jazdi", "Jan-Philipp Schmidt", "M. Weyrich" ], "externalIds": { "MAG": "3158551399", "DOI": "10.1016/J.PROCIR.2021.03.075", "CorpusId": 235833125 }, "url": "https://www.semanticscholar.org/paper/5affaee79c34c0e299fcf4e0246565ef63f48d2c", "referenceCount": 15, "citationCount": 41, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "This work was supported by", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Graph Reasoning with Large Language Models via Pseudo-code Prompting": { "paper_title": "Graph Reasoning with Large Language Models via Pseudo-code Prompting", "arxiv_id": "2409.17906v1", "keyword": "large language model", "authors": [ "Konstantinos Skianis", "Giannis Nikolentzos", "Michalis Vazirgiannis" ], "references": [ { "title": "GraphEdit: Large Language Models for Graph Structure Learning", "abstract": "Graph Structure Learning (GSL) focuses on capturing intrinsic dependencies and interactions among nodes in graph-structured data by generating novel graph structures. Graph Neural Networks (GNNs) have emerged as promising GSL solutions, utilizing recursive message passing to encode node-wise inter-dependencies. However, many existing GSL methods heavily depend on explicit graph structural information as supervision signals, leaving them susceptible to challenges such as data noise and sparsity. In this work, we propose GraphEdit, an approach that leverages large language models (LLMs) to learn complex node relationships in graph-structured data. By enhancing the reasoning capabilities of LLMs through instruction-tuning over graph structures, we aim to overcome the limitations associated with explicit graph structural information and enhance the reliability of graph structure learning. Our approach not only effectively denoises noisy connections but also identifies node-wise dependencies from a global perspective, providing a comprehensive understanding of the graph structure. We conduct extensive experiments on multiple benchmark datasets to demonstrate the effectiveness and robustness of GraphEdit across various settings. We have made our model implementation available at: https://github.com/HKUDS/GraphEdit.", "year": 2024, "venue": "arXiv.org", "authors": [ "Zirui Guo", "Lianghao Xia", "Yanhua Yu", "Yuling Wang", "Zixuan Yang", "Wei Wei", "Liang Pang", "Tat-Seng Chua", "Chao Huang" ], "externalIds": { "ArXiv": "2402.15183", "DBLP": "journals/corr/abs-2402-15183", "DOI": "10.48550/arXiv.2402.15183", "CorpusId": 267897533 }, "url": "https://www.semanticscholar.org/paper/c7a56a3a500a08f28b9816d66d3ac2e9e5ac7445", "referenceCount": 39, "citationCount": 10, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Large Language Models as Topological Structure Enhancers for Text-Attributed Graphs", "abstract": "The latest advancements in large language models (LLMs) have revolutionized the field of natural language processing (NLP). Inspired by the success of LLMs in NLP tasks, some recent work has begun investigating the potential of applying LLMs in graph learning tasks. However, most of the existing work focuses on utilizing LLMs as powerful node feature augmenters, leaving employing LLMs to enhance graph topological structures an understudied problem. In this work, we explore how to leverage the information retrieval and text generation capabilities of LLMs to refine/enhance the topological structure of text-attributed graphs (TAGs) under the node classification setting. First, we propose using LLMs to help remove unreliable edges and add reliable ones in the TAG. Specifically, we first let the LLM output the semantic similarity between node attributes through delicate prompt designs, and then perform edge deletion and edge addition based on the similarity. Second, we propose using pseudo-labels generated by the LLM to improve graph topology, that is, we introduce the pseudo-label propagation as a regularization to guide the graph neural network (GNN) in learning proper edge weights. Finally, we incorporate the two aforementioned LLM-based methods for graph topological refinement into the process of GNN training, and perform extensive experiments on four real-world datasets. The experimental results demonstrate the effectiveness of LLM-based graph topology refinement (achieving a 0.15%--2.47% performance gain on public benchmarks).", "year": 2023, "venue": "arXiv.org", "authors": [ "Shengyin Sun", "Yuxiang Ren", "Chen Ma", "Xuecang Zhang" ], "externalIds": { "DBLP": "journals/corr/abs-2311-14324", "ArXiv": "2311.14324", "DOI": "10.48550/arXiv.2311.14324", "CorpusId": 265445904 }, "url": "https://www.semanticscholar.org/paper/7ec393a898521e4e9a3f510be424861f5a518109", "referenceCount": 53, "citationCount": 11, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Mitigating Large Language Model Hallucinations via Autonomous Knowledge Graph-based Retrofitting", "abstract": "Incorporating factual knowledge in knowledge graph is regarded as a promising approach for mitigating the hallucination of large language models (LLMs). Existing methods usually only use the user's input to query the knowledge graph, thus failing to address the factual hallucination generated by LLMs during its reasoning process. To address this problem, this paper proposes Knowledge Graph-based Retrofitting (KGR), a new framework that incorporates LLMs with KGs to mitigate factual hallucination during the reasoning process by retrofitting the initial draft responses of LLMs based on the factual knowledge stored in KGs. Specifically, KGR leverages LLMs to extract, select, validate, and retrofit factual statements within the model-generated responses, which enables an autonomous knowledge verifying and refining procedure without any additional manual efforts. Experiments show that KGR can significantly improve the performance of LLMs on factual QA benchmarks especially when involving complex reasoning processes, which demonstrates the necessity and effectiveness of KGR in mitigating hallucination and enhancing the reliability of LLMs.", "year": 2023, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Xinyan Guan", "Yanjiang Liu", "Hongyu Lin", "Yaojie Lu", "Ben He", "Xianpei Han", "Le Sun" ], "externalIds": { "ArXiv": "2311.13314", "DBLP": "conf/aaai/GuanLL0HH024", "DOI": "10.48550/arXiv.2311.13314", "CorpusId": 265351547 }, "url": "https://www.semanticscholar.org/paper/612cb53584b23e2066631d143e09b471852afaeb", "referenceCount": 38, "citationCount": 27, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Beyond Text: A Deep Dive into Large Language Models' Ability on Understanding Graph Data", "abstract": "Large language models (LLMs) have achieved impressive performance on many natural language processing tasks. However, their capabilities on graph-structured data remain relatively unexplored. In this paper, we conduct a series of experiments benchmarking leading LLMs on diverse graph prediction tasks spanning node, edge, and graph levels. We aim to assess whether LLMs can effectively process graph data and leverage topological structures to enhance performance, compared to specialized graph neural networks. Through varied prompt formatting and task/dataset selection, we analyze how well LLMs can interpret and utilize graph structures. By comparing LLMs' performance with specialized graph models, we offer insights into the strengths and limitations of employing LLMs for graph analytics. Our findings provide insights into LLMs' capabilities and suggest avenues for further exploration in applying them to graph analytics.", "year": 2023, "venue": "arXiv.org", "authors": [ "Yuntong Hu", "Zhengwu Zhang", "Liang Zhao" ], "externalIds": { "DBLP": "journals/corr/abs-2310-04944", "ArXiv": "2310.04944", "DOI": "10.48550/arXiv.2310.04944", "CorpusId": 263831119 }, "url": "https://www.semanticscholar.org/paper/4ae7c4decd1df71c466f19d66d69b555945098c4", "referenceCount": 48, "citationCount": 16, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Reasoning on Graphs: Faithful and Interpretable Large Language Model Reasoning", "abstract": "Large language models (LLMs) have demonstrated impressive reasoning abilities in complex tasks. However, they lack up-to-date knowledge and experience hallucinations during reasoning, which can lead to incorrect reasoning processes and diminish their performance and trustworthiness. Knowledge graphs (KGs), which capture vast amounts of facts in a structured format, offer a reliable source of knowledge for reasoning. Nevertheless, existing KG-based LLM reasoning methods only treat KGs as factual knowledge bases and overlook the importance of their structural information for reasoning. In this paper, we propose a novel method called reasoning on graphs (RoG) that synergizes LLMs with KGs to enable faithful and interpretable reasoning. Specifically, we present a planning-retrieval-reasoning framework, where RoG first generates relation paths grounded by KGs as faithful plans. These plans are then used to retrieve valid reasoning paths from the KGs for LLMs to conduct faithful reasoning. Furthermore, RoG not only distills knowledge from KGs to improve the reasoning ability of LLMs through training but also allows seamless integration with any arbitrary LLMs during inference. Extensive experiments on two benchmark KGQA datasets demonstrate that RoG achieves state-of-the-art performance on KG reasoning tasks and generates faithful and interpretable reasoning results.", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Linhao Luo", "Yuan-Fang Li", "Gholamreza Haffari", "Shirui Pan" ], "externalIds": { "DBLP": "journals/corr/abs-2310-01061", "ArXiv": "2310.01061", "DOI": "10.48550/arXiv.2310.01061", "CorpusId": 263605944 }, "url": "https://www.semanticscholar.org/paper/b47e96762351b2dbf7e863ece4640df6194bcc0c", "referenceCount": 76, "citationCount": 91, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "GraphText: Graph Reasoning in Text Space", "abstract": "Large Language Models (LLMs) have gained the ability to assimilate human knowledge and facilitate natural language interactions with both humans and other LLMs. However, despite their impressive achievements, LLMs have not made significant advancements in the realm of graph machine learning. This limitation arises because graphs encapsulate distinct relational data, making it challenging to transform them into natural language that LLMs understand. In this paper, we bridge this gap with a novel framework, GraphText, that translates graphs into natural language. GraphText derives a graph-syntax tree for each graph that encapsulates both the node attributes and inter-node relationships. Traversal of the tree yields a graph text sequence, which is then processed by an LLM to treat graph tasks as text generation tasks. Notably, GraphText offers multiple advantages. It introduces training-free graph reasoning: even without training on graph data, GraphText with ChatGPT can achieve on par with, or even surpassing, the performance of supervised-trained graph neural networks through in-context learning (ICL). Furthermore, GraphText paves the way for interactive graph reasoning, allowing both humans and LLMs to communicate with the model seamlessly using natural language. These capabilities underscore the vast, yet-to-be-explored potential of LLMs in the domain of graph machine learning.", "year": 2023, "venue": "arXiv.org", "authors": [ "Jianan Zhao", "Le Zhuo", "Yikang Shen", "Meng Qu", "Kai Liu", "Michael Bronstein", "Zhaocheng Zhu", "Jian Tang" ], "externalIds": { "DBLP": "journals/corr/abs-2310-01089", "ArXiv": "2310.01089", "DOI": "10.48550/arXiv.2310.01089", "CorpusId": 263605738 }, "url": "https://www.semanticscholar.org/paper/55367fbade73f96181ffcf52169d0471d4c014a2", "referenceCount": 55, "citationCount": 47, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "SimTeG: A Frustratingly Simple Approach Improves Textual Graph Learning", "abstract": "Textual graphs (TGs) are graphs whose nodes correspond to text (sentences or documents), which are widely prevalent. The representation learning of TGs involves two stages: (i) unsupervised feature extraction and (ii) supervised graph representation learning. In recent years, extensive efforts have been devoted to the latter stage, where Graph Neural Networks (GNNs) have dominated. However, the former stage for most existing graph benchmarks still relies on traditional feature engineering techniques. More recently, with the rapid development of language models (LMs), researchers have focused on leveraging LMs to facilitate the learning of TGs, either by jointly training them in a computationally intensive framework (merging the two stages), or designing complex self-supervised training tasks for feature extraction (enhancing the first stage). In this work, we present SimTeG, a frustratingly Simple approach for Textual Graph learning that does not innovate in frameworks, models, and tasks. Instead, we first perform supervised parameter-efficient fine-tuning (PEFT) on a pre-trained LM on the downstream task, such as node classification. We then generate node embeddings using the last hidden states of finetuned LM. These derived features can be further utilized by any GNN for training on the same task. We evaluate our approach on two fundamental graph representation learning tasks: node classification and link prediction. Through extensive experiments, we show that our approach significantly improves the performance of various GNNs on multiple graph benchmarks.", "year": 2023, "venue": "arXiv.org", "authors": [ "Keyu Duan", "Qian Liu", "Tat-seng Chua", "Shuicheng Yan", "Wei Tsang Ooi", "Qizhe Xie", "Junxian He" ], "externalIds": { "DBLP": "journals/corr/abs-2308-02565", "ArXiv": "2308.02565", "DOI": "10.48550/arXiv.2308.02565", "CorpusId": 260681726 }, "url": "https://www.semanticscholar.org/paper/303b7d0a81395562e3a46578a89d6821ce564a8b", "referenceCount": 44, "citationCount": 42, "influentialCitationCount": 8, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Unifying Large Language Models and Knowledge Graphs: A Roadmap", "abstract": "Large language models (LLMs), such as ChatGPT and GPT4, are making new waves in the field of natural language processing and artificial intelligence, due to their emergent ability and generalizability. However, LLMs are black-box models, which often fall short of capturing and accessing factual knowledge. In contrast, Knowledge Graphs (KGs), Wikipedia, and Huapu for example, are structured knowledge models that explicitly store rich factual knowledge. KGs can enhance LLMs by providing external knowledge for inference and interpretability. Meanwhile, KGs are difficult to construct and evolve by nature, which challenges the existing methods in KGs to generate new facts and represent unseen knowledge. Therefore, it is complementary to unify LLMs and KGs together and, simultaneously, leverage their advantages. In this article, we present a forward-looking roadmap for the unification of LLMs and KGs. Our roadmap consists of three general frameworks, namely: 1) KG-enhanced LLMs, which incorporate KGs during the pre-training and inference phases of LLMs, or for the purpose of enhancing understanding of the knowledge learned by LLMs; 2) LLM-augmented KGs, that leverage LLMs for different KG tasks such as embedding, completion, construction, graph-to-text generation, and question answering; and 3) Synergized LLMs + KGs, in which LLMs and KGs play equal roles and work in a mutually beneficial way to enhance both LLMs and KGs for bidirectional reasoning driven by both data and knowledge. We review and summarize existing efforts within these three frameworks in our roadmap and pinpoint their future research directions.", "year": 2023, "venue": "IEEE Transactions on Knowledge and Data Engineering", "authors": [ "Shirui Pan", "Linhao Luo", "Yufei Wang", "Chen Chen", "Jiapu Wang", "Xindong Wu" ], "externalIds": { "DBLP": "journals/corr/abs-2306-08302", "ArXiv": "2306.08302", "DOI": "10.1109/TKDE.2024.3352100", "CorpusId": 259165563 }, "url": "https://www.semanticscholar.org/paper/9e8b7b0d4c628c12b6a65ab56ac5f33a35eff2e6", "referenceCount": 301, "citationCount": 404, "influentialCitationCount": 21, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "GPT4Graph: Can Large Language Models Understand Graph Structured Data ? An Empirical Evaluation and Benchmarking", "abstract": "Large language models~(LLM) like ChatGPT have become indispensable to artificial general intelligence~(AGI), demonstrating excellent performance in various natural language processing tasks. In the real world, graph data is ubiquitous and an essential part of AGI and prevails in domains like social network analysis, bioinformatics and recommender systems. The training corpus of large language models often includes some algorithmic components, which allows them to achieve certain effects on some graph data-related problems. However, there is still little research on their performance on a broader range of graph-structured data. In this study, we conduct an extensive investigation to assess the proficiency of LLMs in comprehending graph data, employing a diverse range of structural and semantic-related tasks. Our analysis encompasses 10 distinct tasks that evaluate the LLMs' capabilities in graph understanding. Through our study, we not only uncover the current limitations of language models in comprehending graph structures and performing associated reasoning tasks but also emphasize the necessity for further advancements and novel approaches to enhance their graph processing capabilities. Our findings contribute valuable insights towards bridging the gap between language models and graph understanding, paving the way for more effective graph mining and knowledge extraction.", "year": 2023, "venue": "arXiv.org", "authors": [ "Jiayan Guo", "Lun Du", "Hengyu Liu" ], "externalIds": { "DBLP": "journals/corr/abs-2305-15066", "ArXiv": "2305.15066", "DOI": "10.48550/arXiv.2305.15066", "CorpusId": 258865990 }, "url": "https://www.semanticscholar.org/paper/2b967d82b25088566980aaaf5a7062d90b2fb14f", "referenceCount": 44, "citationCount": 94, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Prompting with Pseudo-Code Instructions", "abstract": "Prompting with natural language instructions has recently emerged as a popular method of harnessing the capabilities of large language models. Given the inherent ambiguity present in natural language, it is intuitive to consider the possible advantages of prompting with less ambiguous prompt styles, such as the use of pseudo-code. In this paper we explore if prompting via pseudo-code instructions helps improve the performance of pre-trained language models. We manually create a dataset of pseudo-code prompts for 132 different tasks spanning classification, QA and generative language tasks, sourced from the Super-NaturalInstructions dataset. Using these prompts along with their counterparts in natural language, we study their performance on two LLM families - BLOOM and CodeGen. Our experiments show that using pseudo-code instructions leads to better results, with an average increase (absolute) of 7-16 points in F1 scores for classification tasks and an improvement (relative) of 12-38% in aggregate ROUGE-L scores across all tasks. We include detailed ablation studies which indicate that code comments, docstrings, and the structural clues encoded in pseudo-code all contribute towards the improvement in performance. To the best of our knowledge our work is the first to demonstrate how pseudo-code prompts can be helpful in improving the performance of pre-trained LMs.", "year": 2023, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Mayank Mishra", "Prince Kumar", "Riyaz Ahmad Bhat", "V. Rudramurthy", "Danish Contractor", "Srikanth G. Tamilselvam" ], "externalIds": { "ArXiv": "2305.11790", "DBLP": "conf/emnlp/MishraKBVCT23", "DOI": "10.48550/arXiv.2305.11790", "CorpusId": 258823202 }, "url": "https://www.semanticscholar.org/paper/05526b42336c74298ca4ccbcb792107900574062", "referenceCount": 107, "citationCount": 11, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Can Language Models Solve Graph Problems in Natural Language?", "abstract": "Large language models (LLMs) are increasingly adopted for a variety of tasks with implicit graphical structures, such as planning in robotics, multi-hop question answering or knowledge probing, structured commonsense reasoning, and more. While LLMs have advanced the state-of-the-art on these tasks with structure implications, whether LLMs could explicitly process textual descriptions of graphs and structures, map them to grounded conceptual spaces, and perform structured operations remains underexplored. To this end, we propose NLGraph (Natural Language Graph), a comprehensive benchmark of graph-based problem solving designed in natural language. NLGraph contains 29,370 problems, covering eight graph reasoning tasks with varying complexity from simple tasks such as connectivity and shortest path up to complex problems such as maximum flow and simulating graph neural networks. We evaluate LLMs (GPT-3/4) with various prompting approaches on the NLGraph benchmark and find that 1) language models do demonstrate preliminary graph reasoning abilities, 2) the benefit of advanced prompting and in-context learning diminishes on more complex graph problems, while 3) LLMs are also (un)surprisingly brittle in the face of spurious correlations in graph and problem settings. We then propose Build-a-Graph Prompting and Algorithmic Prompting, two instruction-based approaches to enhance LLMs in solving natural language graph problems. Build-a-Graph and Algorithmic prompting improve the performance of LLMs on NLGraph by 3.07% to 16.85% across multiple tasks and settings, while how to solve the most complicated graph reasoning tasks in our setup with language models remains an open research question. The NLGraph benchmark and evaluation code are available at https://github.com/Arthur-Heng/NLGraph.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Heng Wang", "Shangbin Feng", "Tianxing He", "Zhaoxuan Tan", "Xiaochuang Han", "Yulia Tsvetkov" ], "externalIds": { "DBLP": "journals/corr/abs-2305-10037", "ArXiv": "2305.10037", "DOI": "10.48550/arXiv.2305.10037", "CorpusId": 258740923 }, "url": "https://www.semanticscholar.org/paper/df2beaae63e4d68ef8e762bcd4704c9f11f856d9", "referenceCount": 40, "citationCount": 106, "influentialCitationCount": 20, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Sparks of Artificial General Intelligence: Early experiments with GPT-4", "abstract": "Artificial intelligence (AI) researchers have been developing and refining large language models (LLMs) that exhibit remarkable capabilities across a variety of domains and tasks, challenging our understanding of learning and cognition. The latest model developed by OpenAI, GPT-4, was trained using an unprecedented scale of compute and data. In this paper, we report on our investigation of an early version of GPT-4, when it was still in active development by OpenAI. We contend that (this early version of) GPT-4 is part of a new cohort of LLMs (along with ChatGPT and Google's PaLM for example) that exhibit more general intelligence than previous AI models. We discuss the rising capabilities and implications of these models. We demonstrate that, beyond its mastery of language, GPT-4 can solve novel and difficult tasks that span mathematics, coding, vision, medicine, law, psychology and more, without needing any special prompting. Moreover, in all of these tasks, GPT-4's performance is strikingly close to human-level performance, and often vastly surpasses prior models such as ChatGPT. Given the breadth and depth of GPT-4's capabilities, we believe that it could reasonably be viewed as an early (yet still incomplete) version of an artificial general intelligence (AGI) system. In our exploration of GPT-4, we put special emphasis on discovering its limitations, and we discuss the challenges ahead for advancing towards deeper and more comprehensive versions of AGI, including the possible need for pursuing a new paradigm that moves beyond next-word prediction. We conclude with reflections on societal influences of the recent technological leap and future research directions.", "year": 2023, "venue": "arXiv.org", "authors": [ "Sébastien Bubeck", "Varun Chandrasekaran", "Ronen Eldan", "J. Gehrke", "Eric Horvitz", "Ece Kamar", "Peter Lee", "Y. Lee", "Yuan-Fang Li", "Scott M. Lundberg", "Harsha Nori", "Hamid Palangi", "Marco Tulio Ribeiro", "Yi Zhang" ], "externalIds": { "ArXiv": "2303.12712", "DBLP": "journals/corr/abs-2303-12712", "CorpusId": 257663729 }, "url": "https://www.semanticscholar.org/paper/8dbd57469bb32e6d57f23f5e765bf1c9ac8e080c", "referenceCount": 0, "citationCount": 2273, "influentialCitationCount": 163, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Program of Thoughts Prompting: Disentangling Computation from Reasoning for Numerical Reasoning Tasks", "abstract": "Recently, there has been significant progress in teaching language models to perform step-by-step reasoning to solve complex numerical reasoning tasks. Chain-of-thoughts prompting (CoT) is by far the state-of-art method for these tasks. CoT uses language models to perform both reasoning and computation in the multi-step `thought' process. To disentangle computation from reasoning, we propose `Program of Thoughts' (PoT), which uses language models (mainly Codex) to express the reasoning process as a program. The computation is relegated to an external computer, which executes the generated programs to derive the answer. We evaluate PoT on five math word problem datasets (GSM, AQuA, SVAMP, TabMWP, MultiArith) and three financial-QA datasets (FinQA, ConvFinQA, TATQA) for both few-shot and zero-shot setups. Under both few-shot and zero-shot settings, PoT can show an average performance gain over CoT by around 12\\% across all the evaluated datasets. By combining PoT with self-consistency decoding, we can achieve SoTA performance on all math problem datasets and near-SoTA performance on financial datasets. All of our data and code are released in Github https://github.com/wenhuchen/Program-of-Thoughts", "year": 2022, "venue": "Trans. Mach. Learn. Res.", "authors": [ "Wenhu Chen", "Xueguang Ma", "Xinyi Wang", "William W. Cohen" ], "externalIds": { "ArXiv": "2211.12588", "DBLP": "journals/tmlr/ChenM0C23", "DOI": "10.48550/arXiv.2211.12588", "CorpusId": 253801709 }, "url": "https://www.semanticscholar.org/paper/6c943670dca38bfc7c8b477ae7c2d1fba1ad3691", "referenceCount": 49, "citationCount": 504, "influentialCitationCount": 64, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Pure Transformers are Powerful Graph Learners", "abstract": "We show that standard Transformers without graph-specific modifications can lead to promising results in graph learning both in theory and practice. Given a graph, we simply treat all nodes and edges as independent tokens, augment them with token embeddings, and feed them to a Transformer. With an appropriate choice of token embeddings, we prove that this approach is theoretically at least as expressive as an invariant graph network (2-IGN) composed of equivariant linear layers, which is already more expressive than all message-passing Graph Neural Networks (GNN). When trained on a large-scale graph dataset (PCQM4Mv2), our method coined Tokenized Graph Transformer (TokenGT) achieves significantly better results compared to GNN baselines and competitive results compared to Transformer variants with sophisticated graph-specific inductive bias. Our implementation is available at https://github.com/jw9730/tokengt.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Jinwoo Kim", "Tien Dat Nguyen", "Seonwoo Min", "Sungjun Cho", "Moontae Lee", "Honglak Lee", "Seunghoon Hong" ], "externalIds": { "DBLP": "journals/corr/abs-2207-02505", "ArXiv": "2207.02505", "DOI": "10.48550/arXiv.2207.02505", "CorpusId": 250311113 }, "url": "https://www.semanticscholar.org/paper/5eda60d4940d4185df45c5703e103458171d465d", "referenceCount": 96, "citationCount": 147, "influentialCitationCount": 18, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Maieutic Prompting: Logically Consistent Reasoning with Recursive Explanations", "abstract": "Pre-trained language models (LMs) struggle with consistent reasoning; recently, prompting LMs to generate explanations that self-guide the inference has emerged as a promising direction to amend this. However, these approaches are fundamentally bounded by the correctness of explanations, which themselves are often noisy and inconsistent. In this work, we develop Maieutic Prompting, which aims to infer a correct answer to a question even from the unreliable generations of LM. Maieutic Prompting induces a tree of explanations abductively (e.g. X is true, because ...) and recursively, then frames the inference as a satisfiability problem over these explanations and their logical relations. We test Maieutic Prompting for true/false QA on three challenging benchmarks that require complex commonsense reasoning. Maieutic Prompting achieves up to 20% better accuracy than state-of-the-art prompting methods, and as a fully unsupervised approach, performs competitively with supervised models. We also show that Maieutic Prompting improves robustness in inference while providing interpretable rationales.", "year": 2022, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Jaehun Jung", "Lianhui Qin", "S. Welleck", "Faeze Brahman", "Chandra Bhagavatula", "Ronan Le Bras", "Yejin Choi" ], "externalIds": { "ACL": "2022.emnlp-main.82", "DBLP": "conf/emnlp/JungQWBB0C22", "ArXiv": "2205.11822", "DOI": "10.48550/arXiv.2205.11822", "CorpusId": 249017524 }, "url": "https://www.semanticscholar.org/paper/50b0c6ee2b3d53ba5af69d6c00b5d60888a9026f", "referenceCount": 54, "citationCount": 160, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Least-to-Most Prompting Enables Complex Reasoning in Large Language Models", "abstract": "Chain-of-thought prompting has demonstrated remarkable performance on various natural language reasoning tasks. However, it tends to perform poorly on tasks which requires solving problems harder than the exemplars shown in the prompts. To overcome this challenge of easy-to-hard generalization, we propose a novel prompting strategy, least-to-most prompting. The key idea in this strategy is to break down a complex problem into a series of simpler subproblems and then solve them in sequence. Solving each subproblem is facilitated by the answers to previously solved subproblems. Our experimental results on tasks related to symbolic manipulation, compositional generalization, and math reasoning reveal that least-to-most prompting is capable of generalizing to more difficult problems than those seen in the prompts. A notable finding is that when the GPT-3 code-davinci-002 model is used with least-to-most prompting, it can solve the compositional generalization benchmark SCAN in any split (including length split) with an accuracy of at least 99% using just 14 exemplars, compared to only 16% accuracy with chain-of-thought prompting. This is particularly noteworthy because neural-symbolic models in the literature that specialize in solving SCAN are trained on the entire training set containing over 15,000 examples. We have included prompts for all the tasks in the Appendix.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Denny Zhou", "Nathanael Scharli", "Le Hou", "Jason Wei", "Nathan Scales", "Xuezhi Wang", "D. Schuurmans", "O. Bousquet", "Quoc Le", "E. Chi" ], "externalIds": { "ArXiv": "2205.10625", "DBLP": "conf/iclr/ZhouSHWS0SCBLC23", "DOI": "10.48550/arXiv.2205.10625", "CorpusId": 248986239 }, "url": "https://www.semanticscholar.org/paper/5437e8adab596d7294124c0e798708e050e25321", "referenceCount": 74, "citationCount": 764, "influentialCitationCount": 57, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Self-Consistency Improves Chain of Thought Reasoning in Language Models", "abstract": "Chain-of-thought prompting combined with pre-trained large language models has achieved encouraging results on complex reasoning tasks. In this paper, we propose a new decoding strategy, self-consistency, to replace the naive greedy decoding used in chain-of-thought prompting. It first samples a diverse set of reasoning paths instead of only taking the greedy one, and then selects the most consistent answer by marginalizing out the sampled reasoning paths. Self-consistency leverages the intuition that a complex reasoning problem typically admits multiple different ways of thinking leading to its unique correct answer. Our extensive empirical evaluation shows that self-consistency boosts the performance of chain-of-thought prompting with a striking margin on a range of popular arithmetic and commonsense reasoning benchmarks, including GSM8K (+17.9%), SVAMP (+11.0%), AQuA (+12.2%), StrategyQA (+6.4%) and ARC-challenge (+3.9%).", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Xuezhi Wang", "Jason Wei", "D. Schuurmans", "Quoc Le", "E. Chi", "Denny Zhou" ], "externalIds": { "DBLP": "journals/corr/abs-2203-11171", "ArXiv": "2203.11171", "DOI": "10.48550/arXiv.2203.11171", "CorpusId": 247595263 }, "url": "https://www.semanticscholar.org/paper/5f19ae1135a9500940978104ec15a5b8751bc7d2", "referenceCount": 80, "citationCount": 2045, "influentialCitationCount": 335, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Training language models to follow instructions with human feedback", "abstract": "Making language models bigger does not inherently make them better at following a user's intent. For example, large language models can generate outputs that are untruthful, toxic, or simply not helpful to the user. In other words, these models are not aligned with their users. In this paper, we show an avenue for aligning language models with user intent on a wide range of tasks by fine-tuning with human feedback. Starting with a set of labeler-written prompts and prompts submitted through the OpenAI API, we collect a dataset of labeler demonstrations of the desired model behavior, which we use to fine-tune GPT-3 using supervised learning. We then collect a dataset of rankings of model outputs, which we use to further fine-tune this supervised model using reinforcement learning from human feedback. We call the resulting models InstructGPT. In human evaluations on our prompt distribution, outputs from the 1.3B parameter InstructGPT model are preferred to outputs from the 175B GPT-3, despite having 100x fewer parameters. Moreover, InstructGPT models show improvements in truthfulness and reductions in toxic output generation while having minimal performance regressions on public NLP datasets. Even though InstructGPT still makes simple mistakes, our results show that fine-tuning with human feedback is a promising direction for aligning language models with human intent.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Long Ouyang", "Jeff Wu", "Xu Jiang", "Diogo Almeida", "Carroll L. Wainwright", "Pamela Mishkin", "Chong Zhang", "Sandhini Agarwal", "Katarina Slama", "Alex Ray", "John Schulman", "Jacob Hilton", "Fraser Kelton", "Luke E. Miller", "Maddie Simens", "Amanda Askell", "P. Welinder", "P. Christiano", "J. Leike", "Ryan J. Lowe" ], "externalIds": { "DBLP": "conf/nips/Ouyang0JAWMZASR22", "ArXiv": "2203.02155", "CorpusId": 246426909 }, "url": "https://www.semanticscholar.org/paper/d766bffc357127e0dc86dd69561d5aeb520d6f4c", "referenceCount": 83, "citationCount": 8493, "influentialCitationCount": 1115, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning Knowledge Graph-based World Models of Textual Environments", "abstract": "World models improve a learning agent's ability to efficiently operate in interactive and situated environments. This work focuses on the task of building world models of text-based game environments. Text-based games, or interactive narratives, are reinforcement learning environments in which agents perceive and interact with the world using textual natural language. These environments contain long, multi-step puzzles or quests woven through a world that is filled with hundreds of characters, locations, and objects. Our world model learns to simultaneously: (1) predict changes in the world caused by an agent's actions when representing the world as a knowledge graph; and (2) generate the set of contextually relevant natural language actions required to operate in the world. We frame this task as a Set of Sequences generation problem by exploiting the inherent structure of knowledge graphs and actions and introduce both a transformer-based multi-task architecture and a loss function to train it. A zero-shot ablation study on never-before-seen textual worlds shows that our methodology significantly outperforms existing textual world modeling techniques as well as the importance of each of our contributions.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Prithviraj Ammanabrolu", "Mark O. Riedl" ], "externalIds": { "DBLP": "journals/corr/abs-2106-09608", "ArXiv": "2106.09608", "CorpusId": 235458286 }, "url": "https://www.semanticscholar.org/paper/c8fc50bc2cd673fa4e5c5ac581f6fe3fcdaf6b8d", "referenceCount": 49, "citationCount": 26, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Language Models are Few-Shot Learners", "abstract": "Recent work has demonstrated substantial gains on many NLP tasks and benchmarks by pre-training on a large corpus of text followed by fine-tuning on a specific task. While typically task-agnostic in architecture, this method still requires task-specific fine-tuning datasets of thousands or tens of thousands of examples. By contrast, humans can generally perform a new language task from only a few examples or from simple instructions - something which current NLP systems still largely struggle to do. Here we show that scaling up language models greatly improves task-agnostic, few-shot performance, sometimes even reaching competitiveness with prior state-of-the-art fine-tuning approaches. Specifically, we train GPT-3, an autoregressive language model with 175 billion parameters, 10x more than any previous non-sparse language model, and test its performance in the few-shot setting. For all tasks, GPT-3 is applied without any gradient updates or fine-tuning, with tasks and few-shot demonstrations specified purely via text interaction with the model. GPT-3 achieves strong performance on many NLP datasets, including translation, question-answering, and cloze tasks, as well as several tasks that require on-the-fly reasoning or domain adaptation, such as unscrambling words, using a novel word in a sentence, or performing 3-digit arithmetic. At the same time, we also identify some datasets where GPT-3's few-shot learning still struggles, as well as some datasets where GPT-3 faces methodological issues related to training on large web corpora. Finally, we find that GPT-3 can generate samples of news articles which human evaluators have difficulty distinguishing from articles written by humans. We discuss broader societal impacts of this finding and of GPT-3 in general.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Tom B. Brown", "Benjamin Mann", "Nick Ryder", "Melanie Subbiah", "J. Kaplan", "Prafulla Dhariwal", "Arvind Neelakantan", "Pranav Shyam", "Girish Sastry", "Amanda Askell", "Sandhini Agarwal", "Ariel Herbert-Voss", "Gretchen Krueger", "T. Henighan", "R. Child", "A. Ramesh", "Daniel M. Ziegler", "Jeff Wu", "Clemens Winter", "Christopher Hesse", "Mark Chen", "Eric Sigler", "Ma-teusz Litwin", "Scott Gray", "B. Chess", "Jack Clark", "Christopher Berner", "Sam McCandlish", "Alec Radford", "I. Sutskever", "Dario Amodei" ], "externalIds": { "ArXiv": "2005.14165", "DBLP": "conf/nips/BrownMRSKDNSSAA20", "MAG": "3030163527", "CorpusId": 218971783 }, "url": "https://www.semanticscholar.org/paper/90abbc2cf38462b954ae1b772fac9532e2ccd8b0", "referenceCount": 146, "citationCount": 30854, "influentialCitationCount": 3529, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Graph Neural Networks: A Review of Methods and Applications", "abstract": null, "year": 2018, "venue": "AI Open", "authors": [ "Jie Zhou", "Ganqu Cui", "Zhengyan Zhang", "Cheng Yang", "Zhiyuan Liu", "Maosong Sun" ], "externalIds": { "MAG": "3152893301", "DBLP": "journals/aiopen/ZhouCHZYLWLS20", "ArXiv": "1812.08434", "DOI": "10.1016/J.AIOPEN.2021.01.001", "CorpusId": 56517517 }, "url": "https://www.semanticscholar.org/paper/ea5dd6a3d8f210d05e53a7b6fa5e16f1b115f693", "referenceCount": 301, "citationCount": 4537, "influentialCitationCount": 183, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Attention is All you Need", "abstract": "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "Ashish Vaswani", "Noam M. Shazeer", "Niki Parmar", "Jakob Uszkoreit", "Llion Jones", "Aidan N. Gomez", "Lukasz Kaiser", "Illia Polosukhin" ], "externalIds": { "MAG": "2963403868", "DBLP": "conf/nips/VaswaniSPUJGKP17", "ArXiv": "1706.03762", "CorpusId": 13756489 }, "url": "https://www.semanticscholar.org/paper/204e3073870fae3d05bcbc2f6a8e263d9b72e776", "referenceCount": 41, "citationCount": 104988, "influentialCitationCount": 15363, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding", "abstract": "We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models (Peters et al., 2018a; Radford et al., 2018), BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5 (7.7 point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).", "year": 2019, "venue": "North American Chapter of the Association for Computational Linguistics", "authors": [ "Jacob Devlin", "Ming-Wei Chang", "Kenton Lee", "Kristina Toutanova" ], "externalIds": { "MAG": "2951055169", "ACL": "N19-1423", "DBLP": "journals/corr/abs-1810-04805", "ArXiv": "1810.04805", "DOI": "10.18653/v1/N19-1423", "CorpusId": 52967399 }, "url": "https://www.semanticscholar.org/paper/df2b0e26d0599ce3e70df8a9da02e51594e0e992", "referenceCount": 63, "citationCount": 81678, "influentialCitationCount": 19056, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "2023. Pre-train, Prompt, and Predict: A Systematic Survey of Prompting Methods in Natural Language Processing", "abstract": null, "year": null, "venue": "ACM Computing Surveys", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2024. Talk like a Graph: Encoding Graphs for Large Language Models", "abstract": null, "year": null, "venue": "The 12th International Conference on Learning Representations", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2024. Let Your Graph Do the Talking: En-coding Structured Data for LLMs", "abstract": null, "year": null, "venue": "arXiv preprint", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "on Graphs", "abstract": null, "year": null, "venue": "ACM SIGKDD Explorations Newsletter", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2023. Large language models in medicine", "abstract": null, "year": null, "venue": "Nature medicine", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2024. Exploring the Potential of Large Language Models (LLMs) in Learning", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2024. Harnessing Explanations: LLM-to-LM Interpreter for Enhanced Text-Attributed Graph Representation Learning", "abstract": null, "year": null, "venue": "The 12th International Conference on Learning Representations", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2023 Tree of Thoughts: Deliberate Problem Solving with Large Language Models", "abstract": null, "year": null, "venue": "Advances in Neural Information Processing Systems", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2022. Synchromesh: Reliable Code Generation from Pre-trained Language Models", "abstract": null, "year": null, "venue": "The 10th International Conference on Learning Represen-tations", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Learning to Love Edge Cases in Formative Math Assessment: Using the AMMORE Dataset and Chain-of-Thought Prompting to Improve Grading Accuracy": { "paper_title": "Learning to Love Edge Cases in Formative Math Assessment: Using the AMMORE Dataset and Chain-of-Thought Prompting to Improve Grading Accuracy", "arxiv_id": "2409.17904v1", "keyword": "large language model", "authors": [ "Owen Henkel", "Hannah Horne-Robinson", "Maria Dyshel", "Nabil Ch", "Baptiste Moreau-Pernet", "Ralph Abood" ], "references": [ { "title": "A Chain-of-Thought Prompting Approach with LLMs for Evaluating Students' Formative Assessment Responses in Science", "abstract": "This paper explores the use of large language models (LLMs) to score and explain short-answer assessments in K-12 science. While existing methods can score more structured math and computer science assessments, they often do not provide explanations for the scores. Our study focuses on employing GPT-4 for automated assessment in middle school Earth Science, combining few-shot and active learning with chain-of-thought reasoning. Using a human-in-the-loop approach, we successfully score and provide meaningful explanations for formative assessment responses. A systematic analysis of our method's pros and cons sheds light on the potential for human-in-the-loop techniques to enhance automated grading for open-ended science assessments.", "year": 2024, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Clayton Cohn", "Nicole M. Hutchins", "Tuan Le", "Gautam Biswas" ], "externalIds": { "DBLP": "journals/corr/abs-2403-14565", "ArXiv": "2403.14565", "DOI": "10.1609/aaai.v38i21.30364", "CorpusId": 268553761 }, "url": "https://www.semanticscholar.org/paper/2b118069a3344ef0678993dbe6b2c4d9abe75acc", "referenceCount": 39, "citationCount": 9, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Multiple Choice vs. Fill-In Problems: The Trade-off Between Scalability and Learning", "abstract": "Learning experience designers consistently balance the trade-off between open and close-ended activities. The growth and scalability of Computer Based Learning Platforms (CBLPs) have only magnified the importance of these design trade-offs. CBLPs often utilize close-ended activities (i.e. Multiple-Choice Questions [MCQs]) due to feasibility constraints associated with the use of open-ended activities. MCQs offer certain affordances, such as immediate grading and the use of distractors, setting them apart from open-ended activities. Our current study examines the effectiveness of Fill-In problems as an alternative to MCQs for middle school mathematics. We report on a randomized study conducted from 2017 to 2022, with a total of 6,768 students from middle schools across the US. We observe that, on average, Fill-In problems lead to better post-test performance than MCQs; albeit deeper explorations indicate differences between the two design paradigms to be more nuanced. We find evidence that students with higher math knowledge benefit more from Fill-In problems than those with lower math knowledge.", "year": 2024, "venue": "International Conference on Learning Analytics and Knowledge", "authors": [ "Ashish Gurung", "Kirk P. Vanacore", "Andrew A. Mcreynolds", "Korinn S. Ostrow", "Eamon Worden", "Adam C. Sales", "Neil T. Heffernan" ], "externalIds": { "DBLP": "conf/lak/GurungVMOWSH24", "DOI": "10.1145/3636555.3636908", "CorpusId": 268315313 }, "url": "https://www.semanticscholar.org/paper/678ef7d1c6c28dc2716a0d531c2098628fc0dcd9", "referenceCount": 49, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Examining the Efficacy of ChatGPT in Marking Short-Answer Assessments in an Undergraduate Medical Program", "abstract": "Traditional approaches to marking short-answer questions face limitations in timeliness, scalability, inter-rater reliability, and faculty time costs. Harnessing generative artificial intelligence (AI) to address some of these shortcomings is attractive. This study aims to validate the use of ChatGPT for evaluating short-answer assessments in an undergraduate medical program. Ten questions from the pre-clerkship medical curriculum were randomly chosen, and for each, six previously marked student answers were collected. These sixty answers were evaluated by ChatGPT in July 2023 under four conditions: with both a rubric and standard, with only a standard, with only a rubric, and with neither. ChatGPT displayed good Spearman correlations with a single human assessor (r = 0.6–0.7, p < 0.001) across all conditions, with the absence of a standard or rubric yielding the best correlation. Scoring differences were common (65–80%), but score adjustments of more than one point were less frequent (20–38%). Notably, the absence of a rubric resulted in systematically higher scores (p < 0.001, partial η2 = 0.33). Our findings demonstrate that ChatGPT is a viable, though imperfect, assistant to human assessment, performing comparably to a single expert assessor. This study serves as a foundation for future research on AI-based assessment techniques with potential for further optimization and increased reliability.", "year": 2024, "venue": "International Medical Education", "authors": [ "Leo Morjaria", "Levi Burns", "Keyna Bracken", "Anthony J Levinson", "Quang N. Ngo", "Mark Lee", "Matthew Sibbald" ], "externalIds": { "DOI": "10.3390/ime3010004", "CorpusId": 267137509 }, "url": "https://www.semanticscholar.org/paper/ce630c477b8af157a8b671736d29007d212d9574", "referenceCount": 35, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "Performance of the Pre-Trained Large Language Model GPT-4 on Automated Short Answer Grading", "abstract": "Automated Short Answer Grading (ASAG) has been an active area of machine-learning research for over a decade. It promises to let educators grade and give feedback on free-form responses in large-enrollment courses in spite of limited availability of human graders. Over the years, carefully trained models have achieved increasingly higher levels of performance. More recently, pre-trained Large Language Models (LLMs) emerged as a commodity, and an intriguing question is how a general-purpose tool without additional training compares to specialized models. We studied the performance of GPT-4 on the standard benchmark 2-way and 3-way datasets SciEntsBank and Beetle, where in addition to the standard task of grading the alignment of the student answer with a reference answer, we also investigated withholding the reference answer. We found that overall, the performance of the pre-trained general-purpose GPT-4 LLM is comparable to hand-engineered models, but worse than pre-trained LLMs that had specialized training.", "year": 2023, "venue": "Discover Artificial Intelligence", "authors": [ "Gerd Kortemeyer" ], "externalIds": { "DBLP": "journals/dai/Kortemeyer24", "ArXiv": "2309.09338", "DOI": "10.48550/arXiv.2309.09338", "CorpusId": 262045158 }, "url": "https://www.semanticscholar.org/paper/6c16ffcdd208f0dbbbb30bfb518d9214befe6804", "referenceCount": 33, "citationCount": 8, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Towards LLM-based Autograding for Short Textual Answers", "abstract": "Grading exams is an important, labor-intensive, subjective, repetitive, and frequently challenging task. The feasibility of autograding textual responses has greatly increased thanks to the availability of large language models (LLMs) such as ChatGPT and the substantial influx of data brought about by digitalization. However, entrusting AI models with decision-making roles raises ethical considerations, mainly stemming from potential biases and issues related to generating false information. Thus, in this manuscript, we provide an evaluation of a large language model for the purpose of autograding, while also highlighting how LLMs can support educators in validating their grading procedures. Our evaluation is targeted towards automatic short textual answers grading (ASAG), spanning various languages and examinations from two distinct courses. Our findings suggest that while\"out-of-the-box\"LLMs provide a valuable tool to provide a complementary perspective, their readiness for independent automated grading remains a work in progress, necessitating human oversight.", "year": 2023, "venue": "International Conference on Computer Supported Education", "authors": [ "Johannes Schneider", "Bernd Schenk", "Christina Niklaus", "Michalis Vlachos" ], "externalIds": { "DBLP": "conf/csedu/0002SN24", "ArXiv": "2309.11508", "DOI": "10.48550/arXiv.2309.11508", "CorpusId": 262083885 }, "url": "https://www.semanticscholar.org/paper/386c22dc924d0090b82b713ae9f553f1be91bf87", "referenceCount": 38, "citationCount": 12, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A large language model-assisted education tool to provide feedback on open-ended responses", "abstract": "Open-ended questions are a favored tool among instructors for assessing student understanding and encouraging critical exploration of course material. Providing feedback for such responses is a time-consuming task that can lead to overwhelmed instructors and decreased feedback quality. Many instructors resort to simpler question formats, like multiple-choice questions, which provide immediate feedback but at the expense of personalized and insightful comments. Here, we present a tool that uses large language models (LLMs), guided by instructor-defined criteria, to automate responses to open-ended questions. Our tool delivers rapid personalized feedback, enabling students to quickly test their knowledge and identify areas for improvement. We provide open-source reference implementations both as a web application and as a Jupyter Notebook widget that can be used with instructional coding or math notebooks. With instructor guidance, LLMs hold promise to enhance student learning outcomes and elevate instructional methodologies.", "year": 2023, "venue": "arXiv.org", "authors": [ "Jordan K Matelsky", "Felipe Parodi", "Tony Liu", "Richard D. Lange", "K. Kording" ], "externalIds": { "ArXiv": "2308.02439", "DBLP": "journals/corr/abs-2308-02439", "DOI": "10.48550/arXiv.2308.02439", "CorpusId": 260611570 }, "url": "https://www.semanticscholar.org/paper/4423887a6a1cdb8ef16e05200abb26b76c6fe475", "referenceCount": 28, "citationCount": 11, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ChatGPT outperforms crowd workers for text-annotation tasks", "abstract": "Many NLP applications require manual text annotations for a variety of tasks, notably to train classifiers or evaluate the performance of unsupervised models. Depending on the size and degree of complexity, the tasks may be conducted by crowd workers on platforms such as MTurk as well as trained annotators, such as research assistants. Using four samples of tweets and news articles (n = 6,183), we show that ChatGPT outperforms crowd workers for several annotation tasks, including relevance, stance, topics, and frame detection. Across the four datasets, the zero-shot accuracy of ChatGPT exceeds that of crowd workers by about 25 percentage points on average, while ChatGPT’s intercoder agreement exceeds that of both crowd workers and trained annotators for all tasks. Moreover, the per-annotation cost of ChatGPT is less than $0.003—about thirty times cheaper than MTurk. These results demonstrate the potential of large language models to drastically increase the efficiency of text classification.", "year": 2023, "venue": "Proceedings of the National Academy of Sciences of the United States of America", "authors": [ "F. Gilardi", "Meysam Alizadeh", "M. Kubli" ], "externalIds": { "DBLP": "journals/corr/abs-2303-15056", "ArXiv": "2303.15056", "PubMedCentral": "10372638", "DOI": "10.1073/pnas.2305016120", "CorpusId": 257766307, "PubMed": "37463210" }, "url": "https://www.semanticscholar.org/paper/a9e155fda1d97baa2b8712f580cc61887cc64e9b", "referenceCount": 25, "citationCount": 601, "influentialCitationCount": 27, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "ChatGPT: Beginning of an End of Manual Linguistic Data Annotation? Use Case of Automatic Genre Identification", "abstract": "ChatGPT has shown strong capabilities in natural language generation tasks, which naturally leads researchers to explore where its abilities end. In this paper, we examine whether ChatGPT can be used for zero-shot text classification, more specifically, automatic genre identification. We compare ChatGPT with a multilingual XLM-RoBERTa language model that was fine-tuned on datasets, manually annotated with genres. The models are compared on test sets in two languages: English and Slovenian. Results show that ChatGPT outperforms the fine-tuned model when applied to the dataset which was not seen before by either of the models. Even when applied on Slovenian language as an under-resourced language, ChatGPT's performance is no worse than when applied to English. However, if the model is fully prompted in Slovenian, the performance drops significantly, showing the current limitations of ChatGPT usage on smaller languages. The presented results lead us to questioning whether this is the beginning of an end of laborious manual annotation campaigns even for smaller languages, such as Slovenian.", "year": 2023, "venue": "arXiv.org", "authors": [ "Taja Kuzman", "I. Mozetič", "Nikola Ljubesic" ], "externalIds": { "ArXiv": "2303.03953", "DBLP": "journals/corr/abs-2303-03953", "DOI": "10.48550/arXiv.2303.03953", "CorpusId": 257405186 }, "url": "https://www.semanticscholar.org/paper/31f44f0f2124c54e47f4df54dec63118232c25da", "referenceCount": 47, "citationCount": 74, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Leveraging natural language processing to support automated assessment and feedback for student open responses in mathematics", "abstract": null, "year": 2023, "venue": "Journal of Computer Assisted Learning", "authors": [ "Anthony F. Botelho", "Sami Baral", "John A. Erickson", "Priyanka Benachamardi", "N. Heffernan" ], "externalIds": { "DBLP": "journals/jcal/BotelhoBEBH23", "DOI": "10.1111/jcal.12793", "CorpusId": 256887716 }, "url": "https://www.semanticscholar.org/paper/12035260003bda5b2c6846bfa8f3a7d212f61799", "referenceCount": 24, "citationCount": 23, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Large Language Models are Zero-Shot Reasoners", "abstract": "Pretrained large language models (LLMs) are widely used in many sub-fields of natural language processing (NLP) and generally known as excellent few-shot learners with task-specific exemplars. Notably, chain of thought (CoT) prompting, a recent technique for eliciting complex multi-step reasoning through step-by-step answer examples, achieved the state-of-the-art performances in arithmetics and symbolic reasoning, difficult system-2 tasks that do not follow the standard scaling laws for LLMs. While these successes are often attributed to LLMs' ability for few-shot learning, we show that LLMs are decent zero-shot reasoners by simply adding\"Let's think step by step\"before each answer. Experimental results demonstrate that our Zero-shot-CoT, using the same single prompt template, significantly outperforms zero-shot LLM performances on diverse benchmark reasoning tasks including arithmetics (MultiArith, GSM8K, AQUA-RAT, SVAMP), symbolic reasoning (Last Letter, Coin Flip), and other logical reasoning tasks (Date Understanding, Tracking Shuffled Objects), without any hand-crafted few-shot examples, e.g. increasing the accuracy on MultiArith from 17.7% to 78.7% and GSM8K from 10.4% to 40.7% with large InstructGPT model (text-davinci-002), as well as similar magnitudes of improvements with another off-the-shelf large model, 540B parameter PaLM. The versatility of this single prompt across very diverse reasoning tasks hints at untapped and understudied fundamental zero-shot capabilities of LLMs, suggesting high-level, multi-task broad cognitive capabilities may be extracted by simple prompting. We hope our work not only serves as the minimal strongest zero-shot baseline for the challenging reasoning benchmarks, but also highlights the importance of carefully exploring and analyzing the enormous zero-shot knowledge hidden inside LLMs before crafting finetuning datasets or few-shot exemplars.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Takeshi Kojima", "S. Gu", "Machel Reid", "Yutaka Matsuo", "Yusuke Iwasawa" ], "externalIds": { "DBLP": "journals/corr/abs-2205-11916", "ArXiv": "2205.11916", "CorpusId": 249017743 }, "url": "https://www.semanticscholar.org/paper/e7ad08848d5d7c5c47673ffe0da06af443643bda", "referenceCount": 61, "citationCount": 2722, "influentialCitationCount": 259, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Automated Scoring for Reading Comprehension via In-context BERT Tuning", "abstract": "Automated scoring of open-ended student responses has the potential to significantly reduce human grader effort. Recent advances in automated scoring often leverage textual representations based on pre-trained language models such as BERT and GPT as input to scoring models. Most existing approaches train a separate model for each item/question, which is suitable for scenarios such as essay scoring where items can be quite different from one another. However, these approaches have two limitations: 1) they fail to leverage item linkage for scenarios such as reading comprehension where multiple items may share a reading passage; 2) they are not scalable since storing one model per item becomes difficult when models have a large number of parameters. In this paper, we report our (grand prize-winning) solution to the National Assessment of Education Progress (NAEP) automated scoring challenge for reading comprehension. Our approach, in-context BERT fine-tuning, produces a single shared scoring model for all items with a carefully-designed input structure to provide contextual information on each item. We demonstrate the effectiveness of our approach via local evaluations using the training dataset provided by the challenge. We also discuss the biases, common error types, and limitations of our approach.", "year": 2022, "venue": "International Conference on Artificial Intelligence in Education", "authors": [ "Nigel Fernandez", "Aritra Ghosh", "Naiming Liu", "Zichao Wang", "Benoît Choffin", "Richard Baraniuk", "Andrew S. Lan" ], "externalIds": { "ArXiv": "2205.09864", "DBLP": "journals/corr/abs-2205-09864", "DOI": "10.48550/arXiv.2205.09864", "CorpusId": 248964958 }, "url": "https://www.semanticscholar.org/paper/ef5195b8b50cb6c4b1af1a457386ae10829b797b", "referenceCount": 32, "citationCount": 16, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "PaLM: Scaling Language Modeling with Pathways", "abstract": "Large language models have been shown to achieve remarkable performance across a variety of natural language tasks using few-shot learning, which drastically reduces the number of task-specific training examples needed to adapt the model to a particular application. To further our understanding of the impact of scale on few-shot learning, we trained a 540-billion parameter, densely activated, Transformer language model, which we call Pathways Language Model PaLM. We trained PaLM on 6144 TPU v4 chips using Pathways, a new ML system which enables highly efficient training across multiple TPU Pods. We demonstrate continued benefits of scaling by achieving state-of-the-art few-shot learning results on hundreds of language understanding and generation benchmarks. On a number of these tasks, PaLM 540B achieves breakthrough performance, outperforming the finetuned state-of-the-art on a suite of multi-step reasoning tasks, and outperforming average human performance on the recently released BIG-bench benchmark. A significant number of BIG-bench tasks showed discontinuous improvements from model scale, meaning that performance steeply increased as we scaled to our largest model. PaLM also has strong capabilities in multilingual tasks and source code generation, which we demonstrate on a wide array of benchmarks. We additionally provide a comprehensive analysis on bias and toxicity, and study the extent of training data memorization with respect to model scale. Finally, we discuss the ethical considerations related to large language models and discuss potential mitigation strategies.", "year": 2022, "venue": "Journal of machine learning research", "authors": [ "Aakanksha Chowdhery", "Sharan Narang", "Jacob Devlin", "Maarten Bosma", "Gaurav Mishra", "Adam Roberts", "P. Barham", "Hyung Won Chung", "Charles Sutton", "Sebastian Gehrmann", "Parker Schuh", "Kensen Shi", "Sasha Tsvyashchenko", "Joshua Maynez", "Abhishek Rao", "Parker Barnes", "Yi Tay", "Noam M. Shazeer", "Vinodkumar Prabhakaran", "Emily Reif", "Nan Du", "Ben Hutchinson", "Reiner Pope", "James Bradbury", "Jacob Austin", "M. Isard", "Guy Gur-Ari", "Pengcheng Yin", "Toju Duke", "Anselm Levskaya", "Sanjay Ghemawat", "Sunipa Dev", "H. Michalewski", "Xavier García", "Vedant Misra", "Kevin Robinson", "Liam Fedus", "Denny Zhou", "Daphne Ippolito", "D. Luan", "Hyeontaek Lim", "Barret Zoph", "A. Spiridonov", "Ryan Sepassi", "David Dohan", "Shivani Agrawal", "Mark Omernick", "Andrew M. Dai", "Thanumalayan Sankaranarayana Pillai", "Marie Pellat", "Aitor Lewkowycz", "Erica Moreira", "R. Child", "Oleksandr Polozov", "Katherine Lee", "Zongwei Zhou", "Xuezhi Wang", "Brennan Saeta", "Mark Díaz", "Orhan Firat", "Michele Catasta", "Jason Wei", "K. Meier-Hellstern", "D. Eck", "J. Dean", "Slav Petrov", "Noah Fiedel" ], "externalIds": { "ArXiv": "2204.02311", "DBLP": "journals/corr/abs-2204-02311", "CorpusId": 247951931 }, "url": "https://www.semanticscholar.org/paper/094ff971d6a8b8ff870946c9b3ce5aa173617bfb", "referenceCount": 173, "citationCount": 4789, "influentialCitationCount": 335, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Survey on Automated Short Answer Grading with Deep Learning: from Word Embeddings to Transformers", "abstract": "Automated short answer grading (ASAG) has gained attention in education as a means to scale educational tasks to the growing number of students. Recent progress in Natural Language Processing and Machine Learning has largely influenced the field of ASAG, of which we survey the recent research advancements. We complement previous surveys by providing a comprehensive analysis of recently published methods that deploy deep learning approaches. In particular, we focus our analysis on the transition from hand-engineered features to representation learning approaches, which learn representative features for the task at hand automatically from large corpora of data. We structure our analysis of deep learning methods along three categories: word embeddings, sequential models, and attention-based methods. Deep learning impacted ASAG differently than other fields of NLP, as we noticed that the learned representations alone do not contribute to achieve the best results, but they rather show to work in a complementary way with hand-engineered features. The best performance are indeed achieved by methods that combine the carefully hand-engineered features with the power of the semantic descriptions provided by the latest models, like transformers architectures. We identify challenges and provide an outlook on research direction that can be addressed in the future. a reference answer. The final score was predicted by a logistic regression classifier. In addition, domain adaption was used, where different features have individual weights for each domain.", "year": 2022, "venue": "arXiv.org", "authors": [ "Stefan Haller", "Adina Aldea", "C. Seifert", "N. Strisciuglio" ], "externalIds": { "ArXiv": "2204.03503", "DBLP": "journals/corr/abs-2204-03503", "DOI": "10.48550/arXiv.2204.03503", "CorpusId": 248006110 }, "url": "https://www.semanticscholar.org/paper/3e0b093db102aaf4e491342194d5942211081748", "referenceCount": 77, "citationCount": 31, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Training language models to follow instructions with human feedback", "abstract": "Making language models bigger does not inherently make them better at following a user's intent. For example, large language models can generate outputs that are untruthful, toxic, or simply not helpful to the user. In other words, these models are not aligned with their users. In this paper, we show an avenue for aligning language models with user intent on a wide range of tasks by fine-tuning with human feedback. Starting with a set of labeler-written prompts and prompts submitted through the OpenAI API, we collect a dataset of labeler demonstrations of the desired model behavior, which we use to fine-tune GPT-3 using supervised learning. We then collect a dataset of rankings of model outputs, which we use to further fine-tune this supervised model using reinforcement learning from human feedback. We call the resulting models InstructGPT. In human evaluations on our prompt distribution, outputs from the 1.3B parameter InstructGPT model are preferred to outputs from the 175B GPT-3, despite having 100x fewer parameters. Moreover, InstructGPT models show improvements in truthfulness and reductions in toxic output generation while having minimal performance regressions on public NLP datasets. Even though InstructGPT still makes simple mistakes, our results show that fine-tuning with human feedback is a promising direction for aligning language models with human intent.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Long Ouyang", "Jeff Wu", "Xu Jiang", "Diogo Almeida", "Carroll L. Wainwright", "Pamela Mishkin", "Chong Zhang", "Sandhini Agarwal", "Katarina Slama", "Alex Ray", "John Schulman", "Jacob Hilton", "Fraser Kelton", "Luke E. Miller", "Maddie Simens", "Amanda Askell", "P. Welinder", "P. Christiano", "J. Leike", "Ryan J. Lowe" ], "externalIds": { "DBLP": "conf/nips/Ouyang0JAWMZASR22", "ArXiv": "2203.02155", "CorpusId": 246426909 }, "url": "https://www.semanticscholar.org/paper/d766bffc357127e0dc86dd69561d5aeb520d6f4c", "referenceCount": 83, "citationCount": 8493, "influentialCitationCount": 1115, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning to summarize from human feedback", "abstract": "As language models become more powerful, training and evaluation are increasingly bottlenecked by the data and metrics used for a particular task. For example, summarization models are often trained to predict human reference summaries and evaluated using ROUGE, but both of these metrics are rough proxies for what we really care about---summary quality. In this work, we show that it is possible to significantly improve summary quality by training a model to optimize for human preferences. We collect a large, high-quality dataset of human comparisons between summaries, train a model to predict the human-preferred summary, and use that model as a reward function to fine-tune a summarization policy using reinforcement learning. We apply our method to a version of the TL;DR dataset of Reddit posts and find that our models significantly outperform both human reference summaries and much larger models fine-tuned with supervised learning alone. Our models also transfer to CNN/DM news articles, producing summaries nearly as good as the human reference without any news-specific fine-tuning. We conduct extensive analyses to understand our human feedback dataset and fine-tuned models We establish that our reward model generalizes to new datasets, and that optimizing our reward model results in better summaries than optimizing ROUGE according to humans. We hope the evidence from our paper motivates machine learning researchers to pay closer attention to how their training loss affects the model behavior they actually want.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Nisan Stiennon", "Long Ouyang", "Jeff Wu", "Daniel M. Ziegler", "Ryan J. Lowe", "Chelsea Voss", "Alec Radford", "Dario Amodei", "Paul Christiano" ], "externalIds": { "MAG": "3082115681", "DBLP": "journals/corr/abs-2009-01325", "ArXiv": "2009.01325", "CorpusId": 221665105 }, "url": "https://www.semanticscholar.org/paper/053b1d7b97eb2c91fc3921d589c160b0923c70b1", "referenceCount": 84, "citationCount": 1418, "influentialCitationCount": 176, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Moving beyond Test Scores: Analyzing the Effectiveness of a Digital Learning Game through Learning Analytics", "abstract": "A challenge in digital learning games is assessing students’ learning behaviors, which are often intertwined with game behaviors. How do we know whether students have learned enough or needed more practice at the end of their game play? To answer this question, we performed post hoc analyses on a prior study of the game Decimal Point , which teaches decimal numbers and decimal operations to middle school students. Using Bayesian Knowledge Tracing, we found that students had the most difficulty with mastering the number line and sorting skills, but also tended to over-practice the skills they had previously mastered. In addition, using students’ survey responses and in-game measurements, we identified the best feature sets to predict test scores and self-reported enjoyment. Analyzing these features and their connections with learning outcomes and enjoyment yielded useful insights into areas of improvement for the game. We conclude by highlighting the need for combining traditional test measures with rigorous learning analytics to critically evaluate the effectiveness of learning games.", "year": 2020, "venue": "Educational Data Mining", "authors": [ "H. Nguyen", "Xinying Hou", "John C. Stamper", "B. McLaren" ], "externalIds": { "MAG": "3100078491", "DBLP": "conf/edm/NguyenHSM20", "CorpusId": 220933772 }, "url": "https://www.semanticscholar.org/paper/7a848f3cfd47aeba8a5e5020c29a86f5a7f18c57", "referenceCount": 62, "citationCount": 12, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Should You Fine-Tune BERT for Automated Essay Scoring?", "abstract": "Most natural language processing research now recommends large Transformer-based models with fine-tuning for supervised classification tasks; older strategies like bag-of-words features and linear models have fallen out of favor. Here we investigate whether, in automated essay scoring (AES) research, deep neural models are an appropriate technological choice. We find that fine-tuning BERT produces similar performance to classical models at significant additional cost. We argue that while state-of-the-art strategies do match existing best results, they come with opportunity costs in computational resources. We conclude with a review of promising areas for research on student essays where the unique characteristics of Transformers may provide benefits over classical methods to justify the costs.", "year": 2020, "venue": "Workshop on Innovative Use of NLP for Building Educational Applications", "authors": [ "Elijah Mayfield", "A. Black" ], "externalIds": { "MAG": "3037207300", "ACL": "2020.bea-1.15", "DBLP": "conf/bea/MayfieldB20", "DOI": "10.18653/v1/2020.bea-1.15", "CorpusId": 220059617 }, "url": "https://www.semanticscholar.org/paper/54e267587f4493a6dc600724af03a70d35d3067d", "referenceCount": 81, "citationCount": 88, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Language Models are Few-Shot Learners", "abstract": "Recent work has demonstrated substantial gains on many NLP tasks and benchmarks by pre-training on a large corpus of text followed by fine-tuning on a specific task. While typically task-agnostic in architecture, this method still requires task-specific fine-tuning datasets of thousands or tens of thousands of examples. By contrast, humans can generally perform a new language task from only a few examples or from simple instructions - something which current NLP systems still largely struggle to do. Here we show that scaling up language models greatly improves task-agnostic, few-shot performance, sometimes even reaching competitiveness with prior state-of-the-art fine-tuning approaches. Specifically, we train GPT-3, an autoregressive language model with 175 billion parameters, 10x more than any previous non-sparse language model, and test its performance in the few-shot setting. For all tasks, GPT-3 is applied without any gradient updates or fine-tuning, with tasks and few-shot demonstrations specified purely via text interaction with the model. GPT-3 achieves strong performance on many NLP datasets, including translation, question-answering, and cloze tasks, as well as several tasks that require on-the-fly reasoning or domain adaptation, such as unscrambling words, using a novel word in a sentence, or performing 3-digit arithmetic. At the same time, we also identify some datasets where GPT-3's few-shot learning still struggles, as well as some datasets where GPT-3 faces methodological issues related to training on large web corpora. Finally, we find that GPT-3 can generate samples of news articles which human evaluators have difficulty distinguishing from articles written by humans. We discuss broader societal impacts of this finding and of GPT-3 in general.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Tom B. Brown", "Benjamin Mann", "Nick Ryder", "Melanie Subbiah", "J. Kaplan", "Prafulla Dhariwal", "Arvind Neelakantan", "Pranav Shyam", "Girish Sastry", "Amanda Askell", "Sandhini Agarwal", "Ariel Herbert-Voss", "Gretchen Krueger", "T. Henighan", "R. Child", "A. Ramesh", "Daniel M. Ziegler", "Jeff Wu", "Clemens Winter", "Christopher Hesse", "Mark Chen", "Eric Sigler", "Ma-teusz Litwin", "Scott Gray", "B. Chess", "Jack Clark", "Christopher Berner", "Sam McCandlish", "Alec Radford", "I. Sutskever", "Dario Amodei" ], "externalIds": { "ArXiv": "2005.14165", "DBLP": "conf/nips/BrownMRSKDNSSAA20", "MAG": "3030163527", "CorpusId": 218971783 }, "url": "https://www.semanticscholar.org/paper/90abbc2cf38462b954ae1b772fac9532e2ccd8b0", "referenceCount": 146, "citationCount": 30854, "influentialCitationCount": 3529, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Pre-Training BERT on Domain Resources for Short Answer Grading", "abstract": "Pre-trained BERT contextualized representations have achieved state-of-the-art results on multiple downstream NLP tasks by fine-tuning with task-specific data. While there has been a lot of focus on task-specific fine-tuning, there has been limited work on improving the pre-trained representations. In this paper, we explore ways of improving the pre-trained contextual representations for the task of automatic short answer grading, a critical component of intelligent tutoring systems. We show that the pre-trained BERT model can be improved by augmenting data from the domain-specific resources like textbooks. We also present a new approach to use labeled short answering grading data for further enhancement of the language model. Empirical evaluation on multi-domain datasets shows that task-specific fine-tuning on the enhanced pre-trained language model achieves superior performance for short answer grading.", "year": 2019, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Chul Sung", "Tejas I. Dhamecha", "Swarnadeep Saha", "Tengfei Ma", "V. Reddy", "R. Arora" ], "externalIds": { "DBLP": "conf/emnlp/SungDSMRA19", "ACL": "D19-1628", "MAG": "2970812170", "DOI": "10.18653/v1/D19-1628", "CorpusId": 202763579 }, "url": "https://www.semanticscholar.org/paper/b483694693f087827f966c390fe3878b39712334", "referenceCount": 20, "citationCount": 62, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Automated Summarization Evaluation (ASE) Using Natural Language Processing Tools", "abstract": null, "year": 2019, "venue": "International Conference on Artificial Intelligence in Education", "authors": [ "S. Crossley", "Minkyun Kim", "L. Allen", "D. McNamara" ], "externalIds": { "DBLP": "conf/aied/CrossleyKAM19", "MAG": "3092257139", "DOI": "10.1007/978-3-030-23204-7_8", "CorpusId": 195353287 }, "url": "https://www.semanticscholar.org/paper/c5a3dc6708dd14afe66d3ef20643eb27f3b9cf7d", "referenceCount": 45, "citationCount": 25, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Fast and Easy Short Answer Grading with High Accuracy", "abstract": "We present a fast, simple, and high-accuracy short answer grading system. Given a short-answer question and its correct answer, key measures of the correctness of a student response can be derived from its semantic similarity with the correct answer. Our supervised model (1) utilizes recent advances in the identification of short-text similarity, and (2) augments text similarity features with key grading-specific constructs. We present experimental results where our model demonstrates top performance on multiple benchmarks.", "year": 2016, "venue": "North American Chapter of the Association for Computational Linguistics", "authors": [ "Md Arafat Sultan", "Cristobal Salazar", "T. Sumner" ], "externalIds": { "MAG": "2474948357", "DBLP": "conf/naacl/SultanSS16", "ACL": "N16-1123", "DOI": "10.18653/v1/N16-1123", "CorpusId": 5975447 }, "url": "https://www.semanticscholar.org/paper/985443d2ab56323269551a6f895657aa5ff1a7d7", "referenceCount": 29, "citationCount": 111, "influentialCitationCount": 12, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The Eras and Trends of Automatic Short Answer Grading", "abstract": null, "year": 2015, "venue": "International Journal of Artificial Intelligence in Education", "authors": [ "Steven Burrows", "Iryna Gurevych", "Benno Stein" ], "externalIds": { "MAG": "1967082761", "DBLP": "journals/aiedu/BurrowsGS15", "DOI": "10.1007/s40593-014-0026-8", "CorpusId": 5917679 }, "url": "https://www.semanticscholar.org/paper/6404b29ac83a69670f1dd4b887e026bfbd844d83", "referenceCount": 181, "citationCount": 323, "influentialCitationCount": 25, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Reading comprehension components and their relation to writing", "abstract": "Within the educational community, research on student literacy often combines reading and writing measures, as they are presumed to draw on similar skills and background knowledge. However, relatively few studies have investigated the underlying cognitive skills required for both activities, and to what extent the required background knowledge and cognitive processes overlap. The current study investigates how individual differences commonly related to reading comprehension ability overlap and contribute to students' writing proficiency. University students ranging from 19 to 37 years old (n = 108) completed assessments to examine their reading comprehension and writing skills, as well as their vocabulary knowledge, lower-level cognitive skills (working memory), and higher-level cognitive skills (text memory, text inferencing, knowledge access, knowledge integration). Results indicated that reading comprehension was strongly related to both vocabulary knowledge and the higher-level cognitive skills. Further, writing ability was moderately associated with a subset of the measured variables, namely vocabulary knowledge and the ability to access prior knowledge. These results support the hypothesis that reading comprehension and writing share common knowledge sources and higher-level cognitive skills, although the writing process is much less reliant on these measured variables than reading comprehension.", "year": 2014, "venue": "L’Année psychologique", "authors": [ "L. Allen", "Erica L. Snow", "S. Crossley", "G. T. Jackson", "D. McNamara" ], "externalIds": { "MAG": "2014220607", "DOI": "10.4074/S0003503314004047", "CorpusId": 143474635 }, "url": "https://www.semanticscholar.org/paper/0344f86a750205315b8e091afa5cfb8c8bb4bbf6", "referenceCount": 68, "citationCount": 47, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Psychology" ] }, { "title": "Student modeling approaches: A literature review for the last decade", "abstract": null, "year": 2013, "venue": "Expert systems with applications", "authors": [ "K. Chrysafiadi", "M. Virvou" ], "externalIds": { "DBLP": "journals/eswa/ChrysafiadiV13", "MAG": "1981119341", "DOI": "10.1016/j.eswa.2013.02.007", "CorpusId": 11803429 }, "url": "https://www.semanticscholar.org/paper/95863f91b560179326bec48e3ad06804cae33514", "referenceCount": 164, "citationCount": 292, "influentialCitationCount": 26, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Computer-based assessment of student-constructed responses", "abstract": null, "year": 2012, "venue": "Behavior Research Methods", "authors": [ "Joseph P. Magliano", "A. Graesser" ], "externalIds": { "MAG": "2052929314", "DOI": "10.3758/s13428-012-0211-3", "CorpusId": 21080149, "PubMed": "22581494" }, "url": "https://www.semanticscholar.org/paper/8f0c980ab513a356148d90a48eca06fed3429775", "referenceCount": 101, "citationCount": 44, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Children’s comprehension problems in oral and written language: a cognitive perspective", "abstract": "Taylor and Francis CEJI_A_521402.sgm 10.1080/14675986.2010.521402 Intercultural Education 467-5986 (pri t)/1469-8439 (online) Original Article 2 10 & Francis 1 50 0 00October 20 0 Miss Rosse laBeltra e ros ell .beltr m @univr it Children’s comprehension problems in oral and written language: a cognitive perspective, edited by Kate Cain and Jane Oakhill, New York, The Guilford Press, 2008, 295 pp., $27.00 (paperback), ISBN 978-1-59385-832-2", "year": 2010, "venue": "", "authors": [ "Rossella Beltrame" ], "externalIds": { "MAG": "2024589969", "DOI": "10.1080/14675986.2010.521402", "CorpusId": 144243507 }, "url": "https://www.semanticscholar.org/paper/1b368ff4edeb6d9c6039d45a562da3d62e073dc1", "referenceCount": 0, "citationCount": 182, "influentialCitationCount": 12, "isOpenAccess": false, "fieldsOfStudy": [ "Psychology" ] }, { "title": "Visible learning: a synthesis of over 800 meta‐analyses relating to achievement", "abstract": null, "year": 2009, "venue": "", "authors": [ "M. Carter" ], "externalIds": { "MAG": "2153022270", "DOI": "10.1080/01443410903415150", "CorpusId": 145697895 }, "url": "https://www.semanticscholar.org/paper/89c622f711c10e1bdc0c8e50b9ca4bd6936c73b7", "referenceCount": 7, "citationCount": 5891, "influentialCitationCount": 699, "isOpenAccess": false, "fieldsOfStudy": [ "Psychology" ] }, { "title": "Developing the theory of formative assessment", "abstract": null, "year": 2009, "venue": "", "authors": [ "P. Black", "D. Wiliam" ], "externalIds": { "MAG": "1984748091", "DOI": "10.1007/S11092-008-9068-5", "CorpusId": 55955635 }, "url": "https://www.semanticscholar.org/paper/587e20871789d959ceb1f7a971695e73670c0463", "referenceCount": 80, "citationCount": 2699, "influentialCitationCount": 247, "isOpenAccess": false, "fieldsOfStudy": [ "Psychology" ] }, { "title": "Focus on Formative Feedback", "abstract": "This article reviews the corpus of research on feedback, with a focus on formative feedback—defined as information communicated to the learner that is intended to modify his or her thinking or behavior to improve learning. According to researchers, formative feedback should be nonevaluative, supportive, timely, and specific. Formative feedback is usually presented as information to a learner in response to some action on the learner’s part. It comes in a variety of types (e.g., verification of response accuracy, explanation of the correct answer, hints, worked examples) and can be administered at various times during the learning process (e.g., immediately following an answer, after some time has elapsed). Finally, several variables have been shown to interact with formative feedback’s success at promoting learning (e.g., individual characteristics of the learner and aspects of the task). All of these issues are discussed. This review concludes with guidelines for generating formative feedback.", "year": 2007, "venue": "", "authors": [ "V. Shute" ], "externalIds": { "MAG": "2085864841", "DOI": "10.3102/0034654307313795", "CorpusId": 3703021 }, "url": "https://www.semanticscholar.org/paper/0cc18ca8bad7c6c8ead9d9c793ca52c6928bd0d4", "referenceCount": 133, "citationCount": 2390, "influentialCitationCount": 267, "isOpenAccess": true, "fieldsOfStudy": [ "Psychology" ] }, { "title": "Automatic Short Answer Marking", "abstract": "A connector as disclosed for connecting fluid passages through a flexible sheet material. The connector is appropriate for use in connecting the coolant lines of a personal cooling garment through a waterproof immersion suit or a chemical proof outer garment. The connector includes inside and outside connector blocks with matching faces that sandwich the sheet material between them. Fluid passages through the two blocks meet at respective ports in the matching faces and are joined by coupling sleeves extending through holes in the sheet material. O-rings around the ports seal the holes through the sheet material. A cap screw or the like secures the two blocks to one another. The connector is equipped with valve male fittings and male lock components of a standard connector system that is now used for personal cooling garments. This allows the use of the connector in conjunction with an existing and satisfactory \"zero leak\" connect and disconnect system.", "year": 2005, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Stephen G. Pulman", "J. Sukkarieh" ], "externalIds": { "ACL": "W05-0202", "MAG": "2056874695", "DOI": "10.3115/1609829.1609831", "CorpusId": 267898439 }, "url": "https://www.semanticscholar.org/paper/91aa2c5ebd310c8c490b21d8c4e997c4bc1ccccf", "referenceCount": 13, "citationCount": 40, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The Assessment of Reading Comprehension: A Review of Practices—Past, Present, and Future", "abstract": "The purpose of this chapter is to build an argument for a fresh line of inquiry into the assessment of reading comprehension. We intend to accomplish that goal by providing a rich and detailed historical account of reading comprehension, both as a theoretical phenomenon and an operational construct that lives and breathes in classrooms throughout America. We review both basic research, which deals with reading comprehension largely in its theoretical aspect, and applied research, which is much more concerned about how comprehension gets operationalized in classrooms, reading materials, and tests. With a renewed professional interest in reading comprehension (e.g., Rand Study Group, 2001), it is an optimal time to undertake a new initiative in the area of reading comprehension assessment. For a host of reasons, many having to do with curricular politics, reading comprehension has been placed on a back burner for well over 15 years. It is time it returned to a central role in discussions of reading. To do so, it needs our rapt and collective attention at this particular", "year": 2005, "venue": "", "authors": [ "P. David Pearson", "Diane Hamm" ], "externalIds": { "MAG": "2892211760", "DOI": "10.4324/9781410612762-9", "CorpusId": 149477638 }, "url": "https://www.semanticscholar.org/paper/7b30b0087537d68fe0da2312368ba76780a0bb40", "referenceCount": 92, "citationCount": 153, "influentialCitationCount": 10, "isOpenAccess": false, "fieldsOfStudy": [ "Psychology" ] }, { "title": "Assessing Reading Skill With a Think-Aloud Procedure and Latent Semantic Analysis", "abstract": "The viability of assessing reading strategies is studied based on think-aloud protocols combined with Latent Semantic Analysis (LSA). Readers in two studies thought aloud after reading specific focal sentences embedded in two stories. LSA was used to estimate the semantic similarity between readers' think-aloud protocols to the focal sentences and sentences in the stories that provided direct causal antecedents to the focal sentences. Study 1 demonstrated that according to human- and LSA-based assessments of the protocols, the responses of less-skilled readers semantically overlapped more with the focal sentences than with the causal antecedent sentences, whereas the responses of skilled readers overlapped with these sentences equally. In addition, the extent that the semantic overlap with causal antecedents was greater than the overlap with the focal sentences predicted performance on comprehension test questions and the Nelson-Denny test of reading skill. Study 2 replicated these findings and also demonstrated that the semantic overlap scores (based on the protocols) predicted recall for stories that were read silently. Together, the findings supported the viability of developing a computerized assessment tool using verbal protocols and LSA.", "year": 2003, "venue": "", "authors": [ "Joseph P. Magliano", "Keith K. Millis" ], "externalIds": { "MAG": "2076929833", "DOI": "10.1207/S1532690XCI2103_02", "CorpusId": 145692192 }, "url": "https://www.semanticscholar.org/paper/76d9ae1d2a3590bb737352edc95588f9cfe4140d", "referenceCount": 99, "citationCount": 176, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Psychology" ] }, { "title": "On the Construct Validity of Multiple- Choice Items for Reading Comprehension", "abstract": "In this study 590 third-grade students took one of four reading comprehension tests with either multiple- choice items or open-ended items. Each also took 32 tests indicating 16 semantic Structure-of-Intellect (si) abilities. Four conditions or groups were distinguished on the basis of the reading comprehension tests. The four 33 x 33 correlation matrices were analyzed si multaneously with a four-group LISREL model. The 16 intellectual abilities explained approximately 62% of the variance in true reading comprehension scores. None of the SI abilities proved to be differentially re lated to item type. Therefore, it was concluded that item type for reading comprehension is congeneric with respect to the SI abilities measured. Index terms: construct validity, item format, free response, reading comprehension, Structure-of-Intellect model.", "year": 1990, "venue": "", "authors": [ "H. Bergh" ], "externalIds": { "MAG": "2147514634", "DOI": "10.1177/014662169001400101", "CorpusId": 40756676 }, "url": "https://www.semanticscholar.org/paper/7d812b336be0aec9682b9b8911879af0fa0c2c9e", "referenceCount": 49, "citationCount": 53, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Psychology" ] }, { "title": "Exploring the Potential of Using an Ai Language Model for Automated Essay Scoring", "abstract": null, "year": 2023, "venue": "Social Science Research Network", "authors": [ "Atsushi Mizumoto", "Masaki Eguchi" ], "externalIds": { "DOI": "10.2139/ssrn.4373111", "CorpusId": 257277710 }, "url": "https://www.semanticscholar.org/paper/a6495e4fcf6e0092e9429c955bb4782e3184dee9", "referenceCount": 58, "citationCount": 147, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "Knowledge tracing: Modeling the acquisition of procedural knowledge", "abstract": null, "year": 2005, "venue": "User modeling and user-adapted interaction", "authors": [ "Albert T. Corbett", "John R. Anderson" ], "externalIds": { "DBLP": "journals/umuai/CorbettA95", "MAG": "2015040676", "DOI": "10.1007/BF01099821", "CorpusId": 19228797 }, "url": "https://www.semanticscholar.org/paper/645b2c28c28bd28eaa187a2faafa5ec12bc12e3a", "referenceCount": 34, "citationCount": 1942, "influentialCitationCount": 279, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "2022. Emergent Abilities of Large Language Models", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2023. Knowledge Tracing: A Survey", "abstract": null, "year": null, "venue": "ACM Computing Surveys", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2023. Supporting Foundational Literacy Assessment in LMICs: Can LLMs Grade Short-answer Reading Comprehension Questions? (2023)", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Few-shot Pairwise Rank Prompting: An Effective Non-Parametric Retrieval Model": { "paper_title": "Few-shot Pairwise Rank Prompting: An Effective Non-Parametric Retrieval Model", "arxiv_id": "2409.17745v2", "keyword": "large language model", "authors": [ "Nilanjan Sinhababu", "Andrew Parry", "Debasis Ganguly", "Debasis Samanta", "Pabitra Mitra" ], "references": [ { "title": "\"In-Context Learning\" or: How I learned to stop worrying and love \"Applied Information Retrieval\"", "abstract": "With the increasing ability of large language models (LLMs), in-context learning (ICL) has evolved as a new paradigm for natural language processing (NLP), where instead of fine-tuning the parameters of an LLM specific to a downstream task with labeled examples, a small number of such examples is appended to a prompt instruction for controlling the decoder's generation process. ICL, thus, is conceptually similar to a non-parametric approach, such as $k$-NN, where the prediction for each instance essentially depends on the local topology, i.e., on a localised set of similar instances and their labels (called few-shot examples). This suggests that a test instance in ICL is analogous to a query in IR, and similar examples in ICL retrieved from a training set relate to a set of documents retrieved from a collection in IR. While standard unsupervised ranking models can be used to retrieve these few-shot examples from a training set, the effectiveness of the examples can potentially be improved by re-defining the notion of relevance specific to its utility for the downstream task, i.e., considering an example to be relevant if including it in the prompt instruction leads to a correct prediction. With this task-specific notion of relevance, it is possible to train a supervised ranking model (e.g., a bi-encoder or cross-encoder), which potentially learns to optimally select the few-shot examples. We believe that the recent advances in neural rankers can potentially find a use case for this task of optimally choosing examples for more effective downstream ICL predictions.", "year": 2024, "venue": "Annual International ACM SIGIR Conference on Research and Development in Information Retrieval", "authors": [ "Andrew Parry", "Debasis Ganguly", "Manish Chandra" ], "externalIds": { "DBLP": "conf/sigir/ParryGC24", "ArXiv": "2405.01116", "DOI": "10.1145/3626772.3657842", "CorpusId": 269502338 }, "url": "https://www.semanticscholar.org/paper/fd2217cf58b141a4dbb07dc5f6374d38807dbf1c", "referenceCount": 87, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Two-Step SPLADE: Simple, Efficient and Effective Approximation of SPLADE", "abstract": null, "year": 2024, "venue": "European Conference on Information Retrieval", "authors": [ "Carlos Lassance", "Hervé Déjean", "S. Clinchant", "Nicola Tonellotto" ], "externalIds": { "DBLP": "journals/corr/abs-2404-13357", "ArXiv": "2404.13357", "DOI": "10.1007/978-3-031-56060-6_23", "CorpusId": 268692418 }, "url": "https://www.semanticscholar.org/paper/aceceff742e06ddd5c2828c83cb8cce227221533", "referenceCount": 32, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Who Determines What Is Relevant? Humans or AI? Why Not Both?", "abstract": "A spectrum of human-artificial intelligence collaboration in assessing relevance.", "year": 2024, "venue": "Communications of the ACM", "authors": [ "G. Faggioli", "Laura Dietz", "Charles Clarke", "Gianluca Demartini", "Matthias Hagen", "Claudia Hauff", "Noriko Kando", "E. Kanoulas", "Martin Potthast", "Benno Stein", "Henning Wachsmuth" ], "externalIds": { "DBLP": "journals/cacm/FaggioliDCDHHKKPSW24", "DOI": "10.1145/3624730", "CorpusId": 268432140 }, "url": "https://www.semanticscholar.org/paper/4debaffe1b311093c738fed65b956ecff34d199d", "referenceCount": 14, "citationCount": 8, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "RankZephyr: Effective and Robust Zero-Shot Listwise Reranking is a Breeze!", "abstract": "In information retrieval, proprietary large language models (LLMs) such as GPT-4 and open-source counterparts such as LLaMA and Vicuna have played a vital role in reranking. However, the gap between open-source and closed models persists, with reliance on proprietary, non-transparent models constraining reproducibility. Addressing this gap, we introduce RankZephyr, a state-of-the-art, open-source LLM for listwise zero-shot reranking. RankZephyr not only bridges the effectiveness gap with GPT-4 but in some cases surpasses the proprietary model. Our comprehensive evaluations across several datasets (TREC Deep Learning Tracks; NEWS and COVID from BEIR) showcase this ability. RankZephyr benefits from strategic training choices and is resilient against variations in initial document ordering and the number of documents reranked. Additionally, our model outperforms GPT-4 on the NovelEval test set, comprising queries and passages past its training period, which addresses concerns about data contamination. To foster further research in this rapidly evolving field, we provide all code necessary to reproduce our results at https://github.com/castorini/rank_llm.", "year": 2023, "venue": "arXiv.org", "authors": [ "Ronak Pradeep", "Sahel Sharifymoghaddam", "Jimmy J. Lin" ], "externalIds": { "DBLP": "journals/corr/abs-2312-02724", "ArXiv": "2312.02724", "DOI": "10.48550/arXiv.2312.02724", "CorpusId": 265659387 }, "url": "https://www.semanticscholar.org/paper/5605e5bbe71a6d52be0930865f1635ed8644dea8", "referenceCount": 40, "citationCount": 31, "influentialCitationCount": 7, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Adapt in Contexts: Retrieval-Augmented Domain Adaptation via In-Context Learning", "abstract": "Large language models (LLMs) have showcased their capability with few-shot inference known as in-context learning. However, in-domain demonstrations are not always readily available in real scenarios, leading to cross-domain in-context learning. Besides, LLMs are still facing challenges in long-tail knowledge in unseen and unfamiliar domains. The above limitations demonstrate the necessity of Unsupervised Domain Adaptation (UDA). In this paper, we study the UDA problem under an in-context learning setting to adapt language models from the source domain to the target domain without any target labels. The core idea is to retrieve a subset of cross-domain elements that are the most similar to the query, and elicit language model to adapt in an in-context manner by learning both target domain distribution and the discriminative task signal simultaneously with the augmented cross-domain in-context examples. We devise different prompting and training strategies, accounting for different LM architectures to learn the target distribution via language modeling. With extensive experiments on Sentiment Analysis (SA) and Named Entity Recognition (NER) tasks, we thoroughly study the effectiveness of ICL for domain transfer and demonstrate significant improvements over baseline models.", "year": 2023, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Quanyu Long", "Wenya Wang", "Sinno Jialin Pan" ], "externalIds": { "DBLP": "conf/emnlp/LongWP23", "ArXiv": "2311.11551", "DOI": "10.48550/arXiv.2311.11551", "CorpusId": 265294654 }, "url": "https://www.semanticscholar.org/paper/4a6c1ee10c448840f598a281374496f6ebe11b5c", "referenceCount": 46, "citationCount": 7, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Zephyr: Direct Distillation of LM Alignment", "abstract": "We aim to produce a smaller language model that is aligned to user intent. Previous research has shown that applying distilled supervised fine-tuning (dSFT) on larger models significantly improves task accuracy; however, these models are unaligned, i.e. they do not respond well to natural prompts. To distill this property, we experiment with the use of preference data from AI Feedback (AIF). Starting from a dataset of outputs ranked by a teacher model, we apply distilled direct preference optimization (dDPO) to learn a chat model with significantly improved intent alignment. The approach requires only a few hours of training without any additional sampling during fine-tuning. The final result, Zephyr-7B, sets the state-of-the-art on chat benchmarks for 7B parameter models, and requires no human annotation. In particular, results on MT-Bench show that Zephyr-7B surpasses Llama2-Chat-70B, the best open-access RLHF-based model. Code, models, data, and tutorials for the system are available at https://github.com/huggingface/alignment-handbook.", "year": 2023, "venue": "arXiv.org", "authors": [ "Lewis Tunstall", "E. Beeching", "Nathan Lambert", "Nazneen Rajani", "Kashif Rasul", "Younes Belkada", "Shengyi Huang", "Leandro von Werra", "Clémentine Fourrier", "Nathan Habib", "Nathan Sarrazin", "Omar Sanseviero", "Alexander M. Rush", "Thomas Wolf" ], "externalIds": { "DBLP": "journals/corr/abs-2310-16944", "ArXiv": "2310.16944", "DOI": "10.48550/arXiv.2310.16944", "CorpusId": 264490502 }, "url": "https://www.semanticscholar.org/paper/cdcf3f36866ef1e16eba26d57c2324362247ba84", "referenceCount": 28, "citationCount": 226, "influentialCitationCount": 38, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Fine-Tuning LLaMA for Multi-Stage Text Retrieval", "abstract": "The effectiveness of multi-stage text retrieval has been solidly demonstrated since before the era of pre-trained language models. However, most existing studies utilize models that predate recent advances in large language models (LLMs). This study seeks to explore potential improvements that state-of-the-art LLMs can bring. We conduct a comprehensive study, fine-tuning the latest LLaMA model both as a dense retriever (RepLLaMA) and as a pointwise reranker (RankLLaMA) for both passage retrieval and document retrieval using the MS MARCO datasets. Our findings demonstrate that the effectiveness of large language models indeed surpasses that of smaller models. Additionally, since LLMs can inherently handle longer contexts, they can represent entire documents holistically, obviating the need for traditional segmenting and pooling strategies. Furthermore, evaluations on BEIR demonstrate that our RepLLaMA-RankLLaMA pipeline exhibits strong zero-shot effectiveness. Model checkpoints from this study are available on HuggingFace.", "year": 2023, "venue": "Annual International ACM SIGIR Conference on Research and Development in Information Retrieval", "authors": [ "Xueguang Ma", "Liang Wang", "Nan Yang", "Furu Wei", "Jimmy Lin" ], "externalIds": { "DBLP": "journals/corr/abs-2310-08319", "ArXiv": "2310.08319", "DOI": "10.48550/arXiv.2310.08319", "CorpusId": 263908865 }, "url": "https://www.semanticscholar.org/paper/a531e9a328ad1488567fa68c15d5bf30bfb90c78", "referenceCount": 63, "citationCount": 80, "influentialCitationCount": 23, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "In-Context Learning for Text Classification with Many Labels", "abstract": "In-context learning (ICL) using large language models for tasks with many labels is challenging due to the limited context window, which makes it difficult to fit a sufficient number of examples in the prompt. In this paper, we use a pre-trained dense retrieval model to bypass this limitation, giving the model only a partial view of the full label space for each inference call. Testing with recent open-source LLMs (OPT, LLaMA), we set new state of the art performance in few-shot settings for three common intent classification datasets, with no fine-tuning. We also surpass fine-tuned performance on fine-grained sentiment classification in certain cases. We analyze the performance across number of in-context examples and different model scales, showing that larger models are necessary to effectively make use of larger context lengths for ICL. By running several ablations, we analyze the model’s use of: a) the similarity of the in-context examples to the current input, b) the semantic content of the class names, and c) the correct correspondence between examples and labels. We demonstrate that all three are needed to varying degrees depending on the domain, contrary to certain recent works.", "year": 2023, "venue": "GENBENCH", "authors": [ "Aristides Milios", "Siva Reddy", "Dzmitry Bahdanau" ], "externalIds": { "DBLP": "journals/corr/abs-2309-10954", "ACL": "2023.genbench-1.14", "ArXiv": "2309.10954", "DOI": "10.48550/arXiv.2309.10954", "CorpusId": 262063582 }, "url": "https://www.semanticscholar.org/paper/b5baedd5b7c270903e6861bebbfda81b10d59419", "referenceCount": 38, "citationCount": 17, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Lost in the Middle: How Language Models Use Long Contexts", "abstract": "While recent language models have the ability to take long contexts as input, relatively little is known about how well they use longer context. We analyze the performance of language models on two tasks that require identifying relevant information in their input contexts: multi-document question answering and key-value retrieval. We find that performance can degrade significantly when changing the position of relevant information, indicating that current language models do not robustly make use of information in long input contexts. In particular, we observe that performance is often highest when relevant information occurs at the beginning or end of the input context, and significantly degrades when models must access relevant information in the middle of long contexts, even for explicitly long-context models. Our analysis provides a better understanding of how language models use their input context and provides new evaluation protocols for future long-context language models.", "year": 2023, "venue": "Transactions of the Association for Computational Linguistics", "authors": [ "Nelson F. Liu", "Kevin Lin", "John Hewitt", "Ashwin Paranjape", "Michele Bevilacqua", "F. Petroni", "Percy Liang" ], "externalIds": { "ArXiv": "2307.03172", "DBLP": "journals/tacl/LiuLHPBPL24", "ACL": "2024.tacl-1.9", "DOI": "10.1162/tacl_a_00638", "CorpusId": 259360665 }, "url": "https://www.semanticscholar.org/paper/1733eb7792f7a43dd21f51f4d1017a1bffd217b5", "referenceCount": 58, "citationCount": 767, "influentialCitationCount": 61, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Large Language Models are Effective Text Rankers with Pairwise Ranking Prompting", "abstract": "Ranking documents using Large Language Models (LLMs) by directly feeding the query and candidate documents into the prompt is an interesting and practical problem. However, researchers have found it difficult to outperform fine-tuned baseline rankers on benchmark datasets. We analyze pointwise and listwise ranking prompts used by existing methods and argue that off-the-shelf LLMs do not fully understand these challenging ranking formulations. In this paper, we propose to significantly reduce the burden on LLMs by using a new technique called Pairwise Ranking Prompting (PRP). Our results are the first in the literature to achieve state-of-the-art ranking performance on standard benchmarks using moderate-sized open-sourced LLMs. On TREC-DL 2019&2020, PRP based on the Flan-UL2 model with 20B parameters performs favorably with the previous best approach in the literature, which is based on the blackbox commercial GPT-4 that has 50x (estimated) model size, while outperforming other LLM-based solutions, such as InstructGPT which has 175B parameters, by over 10% for all ranking metrics. By using the same prompt template on seven BEIR tasks, PRP outperforms supervised baselines and outperforms the blackbox commercial ChatGPT solution by 4.2% and pointwise LLM-based solutions by more than 10% on average NDCG@10. Furthermore, we propose several variants of PRP to improve efficiency and show that it is possible to achieve competitive results even with linear complexity.", "year": 2023, "venue": "NAACL-HLT", "authors": [ "Zhen Qin", "R. Jagerman", "Kai Hui", "Honglei Zhuang", "Junru Wu", "Jiaming Shen", "Tianqi Liu", "Jialu Liu", "Donald Metzler", "Xuanhui Wang", "Michael Bendersky" ], "externalIds": { "ArXiv": "2306.17563", "DBLP": "journals/corr/abs-2306-17563", "DOI": "10.48550/arXiv.2306.17563", "CorpusId": 259309299 }, "url": "https://www.semanticscholar.org/paper/2d3bc530d8f1ed36932a70bc362ea94d988adec9", "referenceCount": 56, "citationCount": 134, "influentialCitationCount": 17, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Few-shot In-context Learning on Knowledge Base Question Answering", "abstract": "Question answering over knowledge bases is considered a difficult problem due to the challenge of generalizing to a wide variety of possible natural language questions. Additionally, the heterogeneity of knowledge base schema items between different knowledge bases often necessitates specialized training for different knowledge base question-answering (KBQA) datasets. To handle questions over diverse KBQA datasets with a unified training-free framework, we propose KB-BINDER, which for the first time enables few-shot in-context learning over KBQA tasks. Firstly, KB-BINDER leverages large language models like Codex to generate logical forms as the draft for a specific question by imitating a few demonstrations. Secondly, KB-BINDER grounds on the knowledge base to bind the generated draft to an executable one with BM25 score matching. The experimental results on four public heterogeneous KBQA datasets show that KB-BINDER can achieve a strong performance with only a few in-context demonstrations. Especially on GraphQA and 3-hop MetaQA, KB-BINDER can even outperform the state-of-the-art trained models. On GrailQA and WebQSP, our model is also on par with other fully-trained models. We believe KB-BINDER can serve as an important baseline for future research. We plan to release all the code and data. Our code is available at https://github.com/ltl3A87/KB-BINDER.", "year": 2023, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Tianle Li", "Xueguang Ma", "Alex Zhuang", "Yu Gu", "Yu Su", "Wenhu Chen" ], "externalIds": { "DBLP": "journals/corr/abs-2305-01750", "ArXiv": "2305.01750", "ACL": "2023.acl-long.385", "DOI": "10.48550/arXiv.2305.01750", "CorpusId": 258461017 }, "url": "https://www.semanticscholar.org/paper/0139e689add40a61c9454674edac4e93702aa5fc", "referenceCount": 59, "citationCount": 45, "influentialCitationCount": 14, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Is ChatGPT Good at Search? Investigating Large Language Models as Re-Ranking Agent", "abstract": "Large Language Models (LLMs) have demonstrated remarkable zero-shot generalization across various language-related tasks, including search engines. However, existing work utilizes the generative ability of LLMs for Information Retrieval (IR) rather than direct passage ranking. The discrepancy between the pre-training objectives of LLMs and the ranking objective poses another challenge. In this paper, we first investigate generative LLMs such as ChatGPT and GPT-4 for relevance ranking in IR. Surprisingly, our experiments reveal that properly instructed LLMs can deliver competitive, even superior results to state-of-the-art supervised methods on popular IR benchmarks. Furthermore, to address concerns about data contamination of LLMs, we collect a new test set called NovelEval, based on the latest knowledge and aiming to verify the model's ability to rank unknown knowledge. Finally, to improve efficiency in real-world applications, we delve into the potential for distilling the ranking capabilities of ChatGPT into small specialized models using a permutation distillation scheme. Our evaluation results turn out that a distilled 440M model outperforms a 3B supervised model on the BEIR benchmark. The code to reproduce our results is available at www.github.com/sunnweiwei/RankGPT.", "year": 2023, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Weiwei Sun", "Lingyong Yan", "Xinyu Ma", "Pengjie Ren", "Dawei Yin", "Z. Ren" ], "externalIds": { "ArXiv": "2304.09542", "DBLP": "journals/corr/abs-2304-09542", "DOI": "10.48550/arXiv.2304.09542", "CorpusId": 258212638 }, "url": "https://www.semanticscholar.org/paper/459c82205d2a27a8542bba7a4d478a8a23be2f5d", "referenceCount": 46, "citationCount": 181, "influentialCitationCount": 29, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Curriculum Sampling for Dense Retrieval with Document Expansion", "abstract": "The dual-encoder has become the de facto architecture for dense retrieval. Typically, it computes the latent representations of the query and document independently, thus failing to fully capture the interactions between the query and document. To alleviate this, recent research has focused on obtaining query-informed document representations. During training, it expands the document with a real query, but during inference, it replaces the real query with a generated one. This inconsistency between training and inference causes the dense retrieval model to prioritize query information while disregarding the document when computing the document representation. Consequently, it performs even worse than the vanilla dense retrieval model because its performance heavily relies on the relevance between the generated queries and the real query.In this paper, we propose a curriculum sampling strategy that utilizes pseudo queries during training and progressively enhances the relevance between the generated query and the real query. By doing so, the retrieval model learns to extend its attention from the document alone to both the document and query, resulting in high-quality query-informed document representations. Experimental results on both in-domain and out-of-domain datasets demonstrate that our approach outperforms previous dense retrieval models.", "year": 2022, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Xingwei He", "Yeyun Gong", "Alex Jin", "Hang Zhang", "Anlei Dong", "Jian Jiao", "S. Yiu", "Nan Duan" ], "externalIds": { "DBLP": "conf/emnlp/0003GJ0D0YD23", "ArXiv": "2212.09114", "DOI": "10.48550/arXiv.2212.09114", "CorpusId": 254853896 }, "url": "https://www.semanticscholar.org/paper/7a5c061f373d91c7865f227053ed3cb1cd99bbfa", "referenceCount": 41, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Improving Few-Shot Performance of Language Models via Nearest Neighbor Calibration", "abstract": "Pre-trained language models (PLMs) have exhibited remarkable few-shot learning capabilities when provided a few examples in a natural language prompt as demonstrations of test instances, i.e., in-context learning. However, the performance of in-context learning is susceptible to the choice of prompt format, training examples and the ordering of the training examples. In this paper, we propose a novel nearest-neighbor calibration framework for in-context learning to ease this issue. It is inspired by a phenomenon that the in-context learning paradigm produces incorrect labels when inferring training instances, which provides a useful supervised signal to calibrate predictions. Thus, our method directly augments the predictions with a $k$-nearest-neighbor ($k$NN) classifier over a datastore of cached few-shot instance representations obtained by PLMs and their corresponding labels. Then adaptive neighbor selection and feature regularization modules are introduced to make full use of a few support instances to reduce the $k$NN retrieval noise. Experiments on various few-shot text classification tasks demonstrate that our method significantly improves in-context learning, while even achieving comparable performance with state-of-the-art tuning-based approaches in some sentiment analysis tasks.", "year": 2022, "venue": "arXiv.org", "authors": [ "Feng Nie", "Meixi Chen", "Zhirui Zhang", "Xuan Cheng" ], "externalIds": { "ArXiv": "2212.02216", "DBLP": "journals/corr/abs-2212-02216", "DOI": "10.48550/arXiv.2212.02216", "CorpusId": 254246441 }, "url": "https://www.semanticscholar.org/paper/6d951d939d3f27054215f2606a0cf89ed21550e9", "referenceCount": 45, "citationCount": 26, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "RankT5: Fine-Tuning T5 for Text Ranking with Ranking Losses", "abstract": "Pretrained language models such as BERT have been shown to be exceptionally effective for text ranking. However, there are limited studies on how to leverage more powerful sequence-to-sequence models such as T5. Existing attempts usually formulate text ranking as a classification problem and rely on postprocessing to obtain a ranked list. In this paper, we propose RankT5 and study two T5-based ranking model structures, an encoder-decoder and an encoder-only one, so that they not only can directly output ranking scores for each query-document pair, but also can be fine-tuned with pairwise or listwise ranking losses to optimize ranking performance. Our experiments show that the proposed models with ranking losses can achieve substantial ranking performance gains on different public text ranking data sets. Moreover, ranking models fine-tuned with listwise ranking losses have better zero-shot ranking performance on out-of-domain data than models fine-tuned with classification losses.", "year": 2022, "venue": "Annual International ACM SIGIR Conference on Research and Development in Information Retrieval", "authors": [ "Honglei Zhuang", "Zhen Qin", "R. Jagerman", "Kai Hui", "Ji Ma", "Jing Lu", "Jianmo Ni", "Xuanhui Wang", "Michael Bendersky" ], "externalIds": { "DBLP": "journals/corr/abs-2210-10634", "ArXiv": "2210.10634", "DOI": "10.1145/3539618.3592047", "CorpusId": 252993059 }, "url": "https://www.semanticscholar.org/paper/4f26132fe5a982f47bba8941dba84cc0d6aa4cbc", "referenceCount": 60, "citationCount": 83, "influentialCitationCount": 14, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Promptagator: Few-shot Dense Retrieval From 8 Examples", "abstract": "Much recent research on information retrieval has focused on how to transfer from one task (typically with abundant supervised data) to various other tasks where supervision is limited, with the implicit assumption that it is possible to generalize from one task to all the rest. However, this overlooks the fact that there are many diverse and unique retrieval tasks, each targeting different search intents, queries, and search domains. In this paper, we suggest to work on Few-shot Dense Retrieval, a setting where each task comes with a short description and a few examples. To amplify the power of a few examples, we propose Prompt-base Query Generation for Retriever (Promptagator), which leverages large language models (LLM) as a few-shot query generator, and creates task-specific retrievers based on the generated data. Powered by LLM's generalization ability, Promptagator makes it possible to create task-specific end-to-end retrievers solely based on a few examples {without} using Natural Questions or MS MARCO to train %question generators or dual encoders. Surprisingly, LLM prompting with no more than 8 examples allows dual encoders to outperform heavily engineered models trained on MS MARCO like ColBERT v2 by more than 1.2 nDCG on average on 11 retrieval sets. Further training standard-size re-rankers using the same generated data yields another 5.0 point nDCG improvement. Our studies determine that query generation can be far more effective than previously observed, especially when a small amount of task-specific knowledge is given.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Zhuyun Dai", "Vincent Zhao", "Ji Ma", "Yi Luan", "Jianmo Ni", "Jing Lu", "A. Bakalov", "Kelvin Guu", "Keith B. Hall", "Ming-Wei Chang" ], "externalIds": { "ArXiv": "2209.11755", "DBLP": "conf/iclr/DaiZMLNLBGHC23", "DOI": "10.48550/arXiv.2209.11755", "CorpusId": 252519173 }, "url": "https://www.semanticscholar.org/paper/e86009d9f9b1cdf083a48d087552bc4153784451", "referenceCount": 76, "citationCount": 160, "influentialCitationCount": 22, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "SimLM: Pre-training with Representation Bottleneck for Dense Passage Retrieval", "abstract": "In this paper, we propose SimLM (Similarity matching with Language Model pre-training), a simple yet effective pre-training method for dense passage retrieval. It employs a simple bottleneck architecture that learns to compress the passage information into a dense vector through self-supervised pre-training. We use a replaced language modeling objective, which is inspired by ELECTRA (Clark et al., 2020), to improve the sample efficiency and reduce the mismatch of the input distribution between pre-training and fine-tuning. SimLM only requires access to an unlabeled corpus and is more broadly applicable when there are no labeled data or queries. We conduct experiments on several large-scale passage retrieval datasets and show substantial improvements over strong baselines under various settings. Remarkably, SimLM even outperforms multi-vector approaches such as ColBERTv2 (Santhanam et al., 2021) which incurs significantly more storage cost. Our code and model checkpoints are available at https://github.com/microsoft/unilm/tree/master/simlm .", "year": 2022, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Liang Wang", "Nan Yang", "Xiaolong Huang", "Binxing Jiao", "Linjun Yang", "Daxin Jiang", "Rangan Majumder", "Furu Wei" ], "externalIds": { "ACL": "2023.acl-long.125", "DBLP": "journals/corr/abs-2207-02578", "ArXiv": "2207.02578", "DOI": "10.48550/arXiv.2207.02578", "CorpusId": 250311114 }, "url": "https://www.semanticscholar.org/paper/4dd9836b65c5694f6796159177fda6c7f594ab5f", "referenceCount": 53, "citationCount": 79, "influentialCitationCount": 18, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Long Document Re-ranking with Modular Re-ranker", "abstract": "Long document re-ranking has been a challenging problem for neural re-rankers based on deep language models like BERT. Early work breaks the documents into short passage-like chunks. These chunks are independently mapped to scalar scores or latent vectors, which are then pooled into a final relevance score. These encode-and-pool methods however inevitably introduce an information bottleneck: the low dimension representations. In this paper, we propose instead to model full query-to-document interaction, leveraging the attention operation and modular Transformer re-ranker framework. First, document chunks are encoded independently with an encoder module. An interaction module then encodes the query and performs joint attention from the query to all document chunk representations. We demonstrate that the model can use this new degree of freedom to aggregate important information from the entire document. Our experiments show that this design produces effective re-ranking on two classical IR collections Robust04 and ClueWeb09, and a large-scale supervised collection MS-MARCO document ranking.", "year": 2022, "venue": "Annual International ACM SIGIR Conference on Research and Development in Information Retrieval", "authors": [ "Luyu Gao", "Jamie Callan" ], "externalIds": { "DBLP": "conf/sigir/GaoC22", "ArXiv": "2205.04275", "DOI": "10.1145/3477495.3531860", "CorpusId": 248571516 }, "url": "https://www.semanticscholar.org/paper/a40fff53696bd2b1c90dcea6ada26cf6cf115c78", "referenceCount": 26, "citationCount": 9, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Curriculum Learning for Dense Retrieval Distillation", "abstract": "Recent work has shown that more effective dense retrieval models can be obtained by distilling ranking knowledge from an existing base re-ranking model. In this paper, we propose a generic curriculum learning based optimization framework called CL-DRD that controls the difficulty level of training data produced by the re-ranking (teacher) model. CL-DRD iteratively optimizes the dense retrieval (student) model by increasing the difficulty of the knowledge distillation data made available to it. In more detail, we initially provide the student model coarse-grained preference pairs between documents in the teacher's ranking, and progressively move towards finer-grained pairwise document ordering requirements. In our experiments, we apply a simple implementation of the CL-DRD framework to enhance two state-of-the-art dense retrieval models. Experiments on three public passage retrieval datasets demonstrate the effectiveness of our proposed framework.", "year": 2022, "venue": "Annual International ACM SIGIR Conference on Research and Development in Information Retrieval", "authors": [ "Hansi Zeng", "Hamed Zamani", "Vishwa Vinay" ], "externalIds": { "DBLP": "journals/corr/abs-2204-13679", "ArXiv": "2204.13679", "DOI": "10.1145/3477495.3531791", "CorpusId": 248426770 }, "url": "https://www.semanticscholar.org/paper/58aa3467505e900f76fb2295f2f94d60c70a65e2", "referenceCount": 32, "citationCount": 44, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Distill-VQ: Learning Retrieval Oriented Vector Quantization By Distilling Knowledge from Dense Embeddings", "abstract": "Vector quantization (VQ) based ANN indexes, such as Inverted File System (IVF) and Product Quantization (PQ), have been widely applied to embedding based document retrieval thanks to the competitive time and memory efficiency. Originally, VQ is learned to minimize the reconstruction loss, i.e., the distortions between the original dense embeddings and the reconstructed embeddings after quantization. Unfortunately, such an objective is inconsistent with the goal of selecting ground-truth documents for the input query, which may cause severe loss of retrieval quality. Recent works identify such a defect, and propose to minimize the retrieval loss through contrastive learning. However, these methods intensively rely on queries with ground-truth documents, whose performance is limited by the insufficiency of labeled data. In this paper, we propose Distill-VQ, which unifies the learning of IVF and PQ within a knowledge distillation framework. In Distill-VQ, the dense embeddings are leveraged as \"teachers'', which predict the query's relevance to the sampled documents. The VQ modules are treated as the \"students'', which are learned to reproduce the predicted relevance, such that the reconstructed embeddings may fully preserve the retrieval result of the dense embeddings. By doing so, Distill-VQ is able to derive substantial training signals from the massive unlabeled data, which significantly contributes to the retrieval quality. We perform comprehensive explorations for the optimal conduct of knowledge distillation, which may provide useful insights for the learning of VQ based ANN index. We also experimentally show that the labeled data is no longer a necessity for high-quality vector quantization, which indicates Distill-VQ's strong applicability in practice. The evaluations are performed on MS MARCO and Natural Questions benchmarks, where Distill-VQ notably outperforms the SOTA VQ methods in Recall and MRR. Our code is avaliable at https://github.com/staoxiao/LibVQ.", "year": 2022, "venue": "Annual International ACM SIGIR Conference on Research and Development in Information Retrieval", "authors": [ "Shitao Xiao", "Zheng Liu", "Weihao Han", "Jianjin Zhang", "Defu Lian", "Yeyun Gong", "Qi Chen", "Fan Yang", "Hao Sun", "Yingxia Shao", "Denvy Deng", "Qi Zhang", "Xing Xie" ], "externalIds": { "DBLP": "conf/sigir/XiaoLHZLGCYSSX22", "ArXiv": "2204.00185", "DOI": "10.1145/3477495.3531799", "CorpusId": 247922621 }, "url": "https://www.semanticscholar.org/paper/0eb19e852013f83e2001e3191ccd3b46b280d417", "referenceCount": 56, "citationCount": 27, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Training language models to follow instructions with human feedback", "abstract": "Making language models bigger does not inherently make them better at following a user's intent. For example, large language models can generate outputs that are untruthful, toxic, or simply not helpful to the user. In other words, these models are not aligned with their users. In this paper, we show an avenue for aligning language models with user intent on a wide range of tasks by fine-tuning with human feedback. Starting with a set of labeler-written prompts and prompts submitted through the OpenAI API, we collect a dataset of labeler demonstrations of the desired model behavior, which we use to fine-tune GPT-3 using supervised learning. We then collect a dataset of rankings of model outputs, which we use to further fine-tune this supervised model using reinforcement learning from human feedback. We call the resulting models InstructGPT. In human evaluations on our prompt distribution, outputs from the 1.3B parameter InstructGPT model are preferred to outputs from the 175B GPT-3, despite having 100x fewer parameters. Moreover, InstructGPT models show improvements in truthfulness and reductions in toxic output generation while having minimal performance regressions on public NLP datasets. Even though InstructGPT still makes simple mistakes, our results show that fine-tuning with human feedback is a promising direction for aligning language models with human intent.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Long Ouyang", "Jeff Wu", "Xu Jiang", "Diogo Almeida", "Carroll L. Wainwright", "Pamela Mishkin", "Chong Zhang", "Sandhini Agarwal", "Katarina Slama", "Alex Ray", "John Schulman", "Jacob Hilton", "Fraser Kelton", "Luke E. Miller", "Maddie Simens", "Amanda Askell", "P. Welinder", "P. Christiano", "J. Leike", "Ryan J. Lowe" ], "externalIds": { "DBLP": "conf/nips/Ouyang0JAWMZASR22", "ArXiv": "2203.02155", "CorpusId": 246426909 }, "url": "https://www.semanticscholar.org/paper/d766bffc357127e0dc86dd69561d5aeb520d6f4c", "referenceCount": 83, "citationCount": 8493, "influentialCitationCount": 1115, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Unsupervised Dense Information Retrieval with Contrastive Learning", "abstract": "Recently, information retrieval has seen the emergence of dense retrievers, using neural networks, as an alternative to classical sparse methods based on term-frequency. These models have obtained state-of-the-art results on datasets and tasks where large training sets are available. However, they do not transfer well to new applications with no training data, and are outperformed by unsupervised term-frequency methods such as BM25. In this work, we explore the limits of contrastive learning as a way to train unsupervised dense retrievers and show that it leads to strong performance in various retrieval settings. On the BEIR benchmark our unsupervised model outperforms BM25 on 11 out of 15 datasets for the Recall@100. When used as pre-training before fine-tuning, either on a few thousands in-domain examples or on the large MS~MARCO dataset, our contrastive model leads to improvements on the BEIR benchmark. Finally, we evaluate our approach for multi-lingual retrieval, where training data is even scarcer than for English, and show that our approach leads to strong unsupervised performance. Our model also exhibits strong cross-lingual transfer when fine-tuned on supervised English data only and evaluated on low resources language such as Swahili. We show that our unsupervised models can perform cross-lingual retrieval between different scripts, such as retrieving English documents from Arabic queries, which would not be possible with term matching methods.", "year": 2021, "venue": "Trans. Mach. Learn. Res.", "authors": [ "Gautier Izacard", "Mathilde Caron", "Lucas Hosseini", "Sebastian Riedel", "Piotr Bojanowski", "Armand Joulin", "Edouard Grave" ], "externalIds": { "ArXiv": "2112.09118", "DBLP": "journals/tmlr/IzacardCHRBJG22", "CorpusId": 249097975 }, "url": "https://www.semanticscholar.org/paper/4f4a409f701f7552d45c46a5b0fea69dca6f8e84", "referenceCount": 68, "citationCount": 531, "influentialCitationCount": 116, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Large Dual Encoders Are Generalizable Retrievers", "abstract": "It has been shown that dual encoders trained on one domain often fail to generalize to other domains for retrieval tasks. One widespread belief is that the bottleneck layer of a dual encoder, where the final score is simply a dot-product between a query vector and a passage vector, is too limited compared to models with fine-grained interactions between the query and the passage. In this paper, we challenge this belief by scaling up the size of the dual encoder model while keeping the bottleneck layer as a single dot-product with a fixed size. With multi-stage training, scaling up the model size brings significant improvement on a variety of retrieval tasks, especially for out-of-domain generalization. We further analyze the impact of the bottleneck layer and demonstrate diminishing improvement when scaling up the embedding size. Experimental results show that our dual encoders, Generalizable T5-based dense Retrievers (GTR), outperform previous sparse and dense retrievers on the BEIR dataset significantly. Most surprisingly, our ablation study finds that GTR is very data efficient, as it only needs 10% of MS Marco supervised data to match the out-of-domain performance of using all supervised data.", "year": 2021, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Jianmo Ni", "Chen Qu", "Jing Lu", "Zhuyun Dai", "Gustavo Hernández Abrego", "Ji Ma", "Vincent Zhao", "Yi Luan", "Keith B. Hall", "Ming-Wei Chang", "Yinfei Yang" ], "externalIds": { "DBLP": "conf/emnlp/Ni0LDAMZLHCY22", "ACL": "2022.emnlp-main.669", "ArXiv": "2112.07899", "DOI": "10.18653/v1/2022.emnlp-main.669", "CorpusId": 245144556 }, "url": "https://www.semanticscholar.org/paper/9f2cf7b35224aad3a8d261e4456fe2d65a5f5d3e", "referenceCount": 41, "citationCount": 320, "influentialCitationCount": 55, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Fantastically Ordered Prompts and Where to Find Them: Overcoming Few-Shot Prompt Order Sensitivity", "abstract": "When primed with only a handful of training samples, very large, pretrained language models such as GPT-3 have shown competitive results when compared to fully-supervised, fine-tuned, large, pretrained language models. We demonstrate that the order in which the samples are provided can make the difference between near state-of-the-art and random guess performance: essentially some permutations are “fantastic” and some not. We analyse this phenomenon in detail, establishing that: it is present across model sizes (even for the largest current models), it is not related to a specific subset of samples, and that a given good permutation for one model is not transferable to another. While one could use a development set to determine which permutations are performant, this would deviate from the true few-shot setting as it requires additional annotated data. Instead, we use the generative nature of language models to construct an artificial development set and based on entropy statistics of the candidate permutations on this set, we identify performant prompts. Our method yields a 13% relative improvement for GPT-family models across eleven different established text classification tasks.", "year": 2021, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Yao Lu", "Max Bartolo", "Alastair Moore", "Sebastian Riedel", "Pontus Stenetorp" ], "externalIds": { "DBLP": "journals/corr/abs-2104-08786", "ArXiv": "2104.08786", "ACL": "2022.acl-long.556", "DOI": "10.18653/v1/2022.acl-long.556", "CorpusId": 233296494 }, "url": "https://www.semanticscholar.org/paper/0adec918885dff698acf359988ed79a543157f80", "referenceCount": 32, "citationCount": 886, "influentialCitationCount": 63, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "BEIR: A Heterogenous Benchmark for Zero-shot Evaluation of Information Retrieval Models", "abstract": "Existing neural information retrieval (IR) models have often been studied in homogeneous and narrow settings, which has considerably limited insights into their out-of-distribution (OOD) generalization capabilities. To address this, and to facilitate researchers to broadly evaluate the effectiveness of their models, we introduce Benchmarking-IR (BEIR), a robust and heterogeneous evaluation benchmark for information retrieval. We leverage a careful selection of 18 publicly available datasets from diverse text retrieval tasks and domains and evaluate 10 state-of-the-art retrieval systems including lexical, sparse, dense, late-interaction and re-ranking architectures on the BEIR benchmark. Our results show BM25 is a robust baseline and re-ranking and late-interaction-based models on average achieve the best zero-shot performances, however, at high computational costs. In contrast, dense and sparse-retrieval models are computationally more efficient but often underperform other approaches, highlighting the considerable room for improvement in their generalization capabilities. We hope this framework allows us to better evaluate and understand existing retrieval systems, and contributes to accelerating progress towards better robust and generalizable systems in the future. BEIR is publicly available at https://github.com/UKPLab/beir.", "year": 2021, "venue": "NeurIPS Datasets and Benchmarks", "authors": [ "Nandan Thakur", "Nils Reimers", "Andreas Ruckl'e", "Abhishek Srivastava", "Iryna Gurevych" ], "externalIds": { "DBLP": "journals/corr/abs-2104-08663", "ArXiv": "2104.08663", "CorpusId": 233296016 }, "url": "https://www.semanticscholar.org/paper/807600ef43073cd9c59d4208ee710e90cf14efa8", "referenceCount": 103, "citationCount": 720, "influentialCitationCount": 189, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Condenser: a Pre-training Architecture for Dense Retrieval", "abstract": "Pre-trained Transformer language models (LM) have become go-to text representation encoders. Prior research fine-tunes deep LMs to encode text sequences such as sentences and passages into single dense vector representations for efficient text comparison and retrieval. However, dense encoders require a lot of data and sophisticated techniques to effectively train and suffer in low data situations. This paper finds a key reason is that standard LMs’ internal attention structure is not ready-to-use for dense encoders, which needs to aggregate text information into the dense representation. We propose to pre-train towards dense encoder with a novel Transformer architecture, Condenser, where LM prediction CONditions on DENSE Representation. Our experiments show Condenser improves over standard LM by large margins on various text retrieval and similarity tasks.", "year": 2021, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Luyu Gao", "Jamie Callan" ], "externalIds": { "ACL": "2021.emnlp-main.75", "ArXiv": "2104.08253", "DBLP": "conf/emnlp/GaoC21", "DOI": "10.18653/v1/2021.emnlp-main.75", "CorpusId": 237581068 }, "url": "https://www.semanticscholar.org/paper/9bbdcc03d872987eef9165f4a63c3878a5b05189", "referenceCount": 53, "citationCount": 219, "influentialCitationCount": 36, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "COIL: Revisit Exact Lexical Match in Information Retrieval with Contextualized Inverted List", "abstract": "Classical information retrieval systems such as BM25 rely on exact lexical match and can carry out search efficiently with inverted list index. Recent neural IR models shifts towards soft matching all query document terms, but they lose the computation efficiency of exact match systems. This paper presents COIL, a contextualized exact match retrieval architecture, where scoring is based on overlapping query document tokens’ contextualized representations. The new architecture stores contextualized token representations in inverted lists, bringing together the efficiency of exact match and the representation power of deep language models. Our experimental results show COIL outperforms classical lexical retrievers and state-of-the-art deep LM retrievers with similar or smaller latency.", "year": 2021, "venue": "North American Chapter of the Association for Computational Linguistics", "authors": [ "Luyu Gao", "Zhuyun Dai", "Jamie Callan" ], "externalIds": { "ACL": "2021.naacl-main.241", "MAG": "3172119680", "DBLP": "conf/naacl/GaoDC21", "ArXiv": "2104.07186", "DOI": "10.18653/V1/2021.NAACL-MAIN.241", "CorpusId": 233241070 }, "url": "https://www.semanticscholar.org/paper/2d7a784a093615d00d4ac0a7b5763a15d86d4996", "referenceCount": 39, "citationCount": 191, "influentialCitationCount": 33, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Efficiently Teaching an Effective Dense Retriever with Balanced Topic Aware Sampling", "abstract": "A vital step towards the widespread adoption of neural retrieval models is their resource efficiency throughout the training, indexing and query workflows. The neural IR community made great advancements in training effective dual-encoder dense retrieval (DR) models recently. A dense text retrieval model uses a single vector representation per query and passage to score a match, which enables low-latency first-stage retrieval with a nearest neighbor search. Increasingly common, training approaches require enormous compute power, as they either conduct negative passage sampling out of a continuously updating refreshing index or require very large batch sizes. Instead of relying on more compute capability, we introduce an efficient topic-aware query and balanced margin sampling technique, called TAS-Balanced. We cluster queries once before training and sample queries out of a cluster per batch. We train our lightweight 6-layer DR model with a novel dual-teacher supervision that combines pairwise and in-batch negative teachers. Our method is trainable on a single consumer-grade GPU in under 48 hours. We show that our TAS-Balanced training method achieves state-of-the-art low-latency (64ms per query) results on two TREC Deep Learning Track query sets. Evaluated on NDCG@10, we outperform BM25 by 44%, a plainly trained DR by 19%, docT5query by 11%, and the previous best DR model by 5%. Additionally, TAS-Balanced produces the first dense retriever that outperforms every other method on recall at any cutoff on TREC-DL and allows more resource intensive re-ranking models to operate on fewer passages to improve results further.", "year": 2021, "venue": "Annual International ACM SIGIR Conference on Research and Development in Information Retrieval", "authors": [ "Sebastian Hofstätter", "Sheng-Chieh Lin", "Jheng-Hong Yang", "Jimmy J. Lin", "A. Hanbury" ], "externalIds": { "DBLP": "conf/sigir/HofstatterLYLH21", "ArXiv": "2104.06967", "MAG": "3154670582", "DOI": "10.1145/3404835.3462891", "CorpusId": 233231706 }, "url": "https://www.semanticscholar.org/paper/4deed74a3eee7e629dce2b8ef1e437ca74b2e64a", "referenceCount": 49, "citationCount": 326, "influentialCitationCount": 54, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Overview of the TREC 2020 Deep Learning Track", "abstract": "This is the second year of the TREC Deep Learning Track, with the goal of studying ad hoc ranking in the large training data regime. We again have a document retrieval task and a passage retrieval task, each with hundreds of thousands of human-labeled training queries. We evaluate using single-shot TREC-style evaluation, to give us a picture of which ranking methods work best when large data is available, with much more comprehensive relevance labeling on the small number of test queries. This year we have further evidence that rankers with BERT-style pretraining outperform other rankers in the large data regime.", "year": 2021, "venue": "Text Retrieval Conference", "authors": [ "Nick Craswell", "Bhaskar Mitra", "Emine Yilmaz", "Daniel Fernando Campos", "E. Voorhees" ], "externalIds": { "DBLP": "journals/corr/abs-2102-07662", "ArXiv": "2102.07662", "CorpusId": 212737158 }, "url": "https://www.semanticscholar.org/paper/494e730f73035376736b36cecf2023ea6deb3cbe", "referenceCount": 22, "citationCount": 313, "influentialCitationCount": 48, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "What Makes Good In-Context Examples for GPT-3?", "abstract": "GPT-3 has attracted lots of attention due to its superior performance across a wide range of NLP tasks, especially with its in-context learning abilities. Despite its success, we found that the empirical results of GPT-3 depend heavily on the choice of in-context examples. In this work, we investigate whether there are more effective strategies for judiciously selecting in-context examples (relative to random sampling) that better leverage GPT-3’s in-context learning capabilities.Inspired by the recent success of leveraging a retrieval module to augment neural networks, we propose to retrieve examples that are semantically-similar to a test query sample to formulate its corresponding prompt. Intuitively, the examples selected with such a strategy may serve as more informative inputs to unleash GPT-3’s power of text generation. We evaluate the proposed approach on several natural language understanding and generation benchmarks, where the retrieval-based prompt selection approach consistently outperforms the random selection baseline. Moreover, it is observed that the sentence encoders fine-tuned on task-related datasets yield even more helpful retrieval results. Notably, significant gains are observed on tasks such as table-to-text generation (44.3% on the ToTTo dataset) and open-domain question answering (45.5% on the NQ dataset).", "year": 2021, "venue": "Workshop on Knowledge Extraction and Integration for Deep Learning Architectures; Deep Learning Inside Out", "authors": [ "Jiachang Liu", "Dinghan Shen", "Yizhe Zhang", "Bill Dolan", "L. Carin", "Weizhu Chen" ], "externalIds": { "DBLP": "conf/acl-deelio/LiuSZDCC22", "ArXiv": "2101.06804", "ACL": "2022.deelio-1.10", "DOI": "10.18653/v1/2022.deelio-1.10", "CorpusId": 231632658 }, "url": "https://www.semanticscholar.org/paper/59641c10ed7431a3cf841f308367dc2dc0281b74", "referenceCount": 54, "citationCount": 1046, "influentialCitationCount": 129, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The Expando-Mono-Duo Design Pattern for Text Ranking with Pretrained Sequence-to-Sequence Models", "abstract": "We propose a design pattern for tackling text ranking problems, dubbed\"Expando-Mono-Duo\", that has been empirically validated for a number of ad hoc retrieval tasks in different domains. At the core, our design relies on pretrained sequence-to-sequence models within a standard multi-stage ranking architecture.\"Expando\"refers to the use of document expansion techniques to enrich keyword representations of texts prior to inverted indexing.\"Mono\"and\"Duo\"refer to components in a reranking pipeline based on a pointwise model and a pairwise model that rerank initial candidates retrieved using keyword search. We present experimental results from the MS MARCO passage and document ranking tasks, the TREC 2020 Deep Learning Track, and the TREC-COVID challenge that validate our design. In all these tasks, we achieve effectiveness that is at or near the state of the art, in some cases using a zero-shot approach that does not exploit any training data from the target task. To support replicability, implementations of our design pattern are open-sourced in the Pyserini IR toolkit and PyGaggle neural reranking library.", "year": 2021, "venue": "arXiv.org", "authors": [ "Ronak Pradeep", "Rodrigo Nogueira", "Jimmy J. Lin" ], "externalIds": { "DBLP": "journals/corr/abs-2101-05667", "ArXiv": "2101.05667", "CorpusId": 231603106 }, "url": "https://www.semanticscholar.org/paper/e08eed9608382beea1febca49119c665fbabd031", "referenceCount": 62, "citationCount": 132, "influentialCitationCount": 15, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Distilling Dense Representations for Ranking using Tightly-Coupled Teachers", "abstract": "We present an approach to ranking with dense representations that applies knowledge distillation to improve the recently proposed late-interaction ColBERT model. Specifically, we distill the knowledge from ColBERT's expressive MaxSim operator for computing relevance scores into a simple dot product, thus enabling single-step ANN search. Our key insight is that during distillation, tight coupling between the teacher model and the student model enables more flexible distillation strategies and yields better learned representations. We empirically show that our approach improves query latency and greatly reduces the onerous storage requirements of ColBERT, while only making modest sacrifices in terms of effectiveness. By combining our dense representations with sparse representations derived from document expansion, we are able to approach the effectiveness of a standard cross-encoder reranker using BERT that is orders of magnitude slower.", "year": 2020, "venue": "arXiv.org", "authors": [ "Sheng-Chieh Lin", "Jheng-Hong Yang", "Jimmy J. Lin" ], "externalIds": { "DBLP": "journals/corr/abs-2010-11386", "ArXiv": "2010.11386", "MAG": "3093955333", "CorpusId": 225041183 }, "url": "https://www.semanticscholar.org/paper/83f915d30720f1aa1c6f6a4342d7f9e52add756e", "referenceCount": 28, "citationCount": 111, "influentialCitationCount": 18, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Approximate Nearest Neighbor Negative Contrastive Learning for Dense Text Retrieval", "abstract": "Conducting text retrieval in a dense learned representation space has many intriguing advantages over sparse retrieval. Yet the effectiveness of dense retrieval (DR) often requires combination with sparse retrieval. In this paper, we identify that the main bottleneck is in the training mechanisms, where the negative instances used in training are not representative of the irrelevant documents in testing. This paper presents Approximate nearest neighbor Negative Contrastive Estimation (ANCE), a training mechanism that constructs negatives from an Approximate Nearest Neighbor (ANN) index of the corpus, which is parallelly updated with the learning process to select more realistic negative training instances. This fundamentally resolves the discrepancy between the data distribution used in the training and testing of DR. In our experiments, ANCE boosts the BERT-Siamese DR model to outperform all competitive dense and sparse retrieval baselines. It nearly matches the accuracy of sparse-retrieval-and-BERT-reranking using dot-product in the ANCE-learned representation space and provides almost 100x speed-up.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Lee Xiong", "Chenyan Xiong", "Ye Li", "Kwok-Fung Tang", "Jialin Liu", "Paul N. Bennett", "Junaid Ahmed", "Arnold Overwijk" ], "externalIds": { "MAG": "3038572442", "DBLP": "conf/iclr/XiongXLTLBAO21", "ArXiv": "2007.00808", "CorpusId": 220302524 }, "url": "https://www.semanticscholar.org/paper/c9b8593db099869fe7254aa1fa53f3c9073b0176", "referenceCount": 65, "citationCount": 1007, "influentialCitationCount": 231, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Language Models are Few-Shot Learners", "abstract": "Recent work has demonstrated substantial gains on many NLP tasks and benchmarks by pre-training on a large corpus of text followed by fine-tuning on a specific task. While typically task-agnostic in architecture, this method still requires task-specific fine-tuning datasets of thousands or tens of thousands of examples. By contrast, humans can generally perform a new language task from only a few examples or from simple instructions - something which current NLP systems still largely struggle to do. Here we show that scaling up language models greatly improves task-agnostic, few-shot performance, sometimes even reaching competitiveness with prior state-of-the-art fine-tuning approaches. Specifically, we train GPT-3, an autoregressive language model with 175 billion parameters, 10x more than any previous non-sparse language model, and test its performance in the few-shot setting. For all tasks, GPT-3 is applied without any gradient updates or fine-tuning, with tasks and few-shot demonstrations specified purely via text interaction with the model. GPT-3 achieves strong performance on many NLP datasets, including translation, question-answering, and cloze tasks, as well as several tasks that require on-the-fly reasoning or domain adaptation, such as unscrambling words, using a novel word in a sentence, or performing 3-digit arithmetic. At the same time, we also identify some datasets where GPT-3's few-shot learning still struggles, as well as some datasets where GPT-3 faces methodological issues related to training on large web corpora. Finally, we find that GPT-3 can generate samples of news articles which human evaluators have difficulty distinguishing from articles written by humans. We discuss broader societal impacts of this finding and of GPT-3 in general.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Tom B. Brown", "Benjamin Mann", "Nick Ryder", "Melanie Subbiah", "J. Kaplan", "Prafulla Dhariwal", "Arvind Neelakantan", "Pranav Shyam", "Girish Sastry", "Amanda Askell", "Sandhini Agarwal", "Ariel Herbert-Voss", "Gretchen Krueger", "T. Henighan", "R. Child", "A. Ramesh", "Daniel M. Ziegler", "Jeff Wu", "Clemens Winter", "Christopher Hesse", "Mark Chen", "Eric Sigler", "Ma-teusz Litwin", "Scott Gray", "B. Chess", "Jack Clark", "Christopher Berner", "Sam McCandlish", "Alec Radford", "I. Sutskever", "Dario Amodei" ], "externalIds": { "ArXiv": "2005.14165", "DBLP": "conf/nips/BrownMRSKDNSSAA20", "MAG": "3030163527", "CorpusId": 218971783 }, "url": "https://www.semanticscholar.org/paper/90abbc2cf38462b954ae1b772fac9532e2ccd8b0", "referenceCount": 146, "citationCount": 30854, "influentialCitationCount": 3529, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks", "abstract": "Large pre-trained language models have been shown to store factual knowledge in their parameters, and achieve state-of-the-art results when fine-tuned on downstream NLP tasks. However, their ability to access and precisely manipulate knowledge is still limited, and hence on knowledge-intensive tasks, their performance lags behind task-specific architectures. Additionally, providing provenance for their decisions and updating their world knowledge remain open research problems. Pre-trained models with a differentiable access mechanism to explicit non-parametric memory can overcome this issue, but have so far been only investigated for extractive downstream tasks. We explore a general-purpose fine-tuning recipe for retrieval-augmented generation (RAG) -- models which combine pre-trained parametric and non-parametric memory for language generation. We introduce RAG models where the parametric memory is a pre-trained seq2seq model and the non-parametric memory is a dense vector index of Wikipedia, accessed with a pre-trained neural retriever. We compare two RAG formulations, one which conditions on the same retrieved passages across the whole generated sequence, the other can use different passages per token. We fine-tune and evaluate our models on a wide range of knowledge-intensive NLP tasks and set the state-of-the-art on three open domain QA tasks, outperforming parametric seq2seq models and task-specific retrieve-and-extract architectures. For language generation tasks, we find that RAG models generate more specific, diverse and factual language than a state-of-the-art parametric-only seq2seq baseline.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Patrick Lewis", "Ethan Perez", "Aleksandara Piktus", "F. Petroni", "Vladimir Karpukhin", "Naman Goyal", "Heinrich Kuttler", "M. Lewis", "Wen-tau Yih", "Tim Rocktäschel", "Sebastian Riedel", "Douwe Kiela" ], "externalIds": { "ArXiv": "2005.11401", "MAG": "3027879771", "DBLP": "conf/nips/LewisPPPKGKLYR020", "CorpusId": 218869575 }, "url": "https://www.semanticscholar.org/paper/58ed1fbaabe027345f7bb3a6312d41c5aac63e22", "referenceCount": 67, "citationCount": 3123, "influentialCitationCount": 375, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Fact or Fiction: Verifying Scientific Claims", "abstract": "We introduce scientific claim verification, a new task to select abstracts from the research literature containing evidence that supports or refutes a given scientific claim, and to identify rationales justifying each decision. To study this task, we construct SciFact, a dataset of 1.4K expert-written scientific claims paired with evidence-containing abstracts annotated with labels and rationales. We develop baseline models for SciFact, and demonstrate that these models benefit from combined training on a large dataset of claims about Wikipedia articles, together with the new SciFact data. We show that our claim verification system is able to identify plausible evidence for 23 / 36 claims relevant to COVID-19 on the CORD-19 corpus. Our results and experiments strongly suggest that our new task and data will support significant future research efforts.", "year": 2020, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "David Wadden", "Kyle Lo", "Lucy Lu Wang", "Shanchuan Lin", "Madeleine van Zuylen", "Arman Cohan", "Hannaneh Hajishirzi" ], "externalIds": { "ArXiv": "2004.14974", "DBLP": "journals/corr/abs-2004-14974", "MAG": "3023035014", "ACL": "2020.emnlp-main.609", "DOI": "10.18653/v1/2020.emnlp-main.609", "CorpusId": 216867133 }, "url": "https://www.semanticscholar.org/paper/b770d84055c32febe922be9931c453fdbebe9002", "referenceCount": 44, "citationCount": 354, "influentialCitationCount": 52, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Expansion via Prediction of Importance with Contextualization", "abstract": "The identification of relevance with little textual context is a primary challenge in passage retrieval. We address this problem with a representation-based ranking approach that: (1) explicitly models the importance of each term using a contextualized language model; (2) performs passage expansion by propagating the importance to similar terms; and (3) grounds the representations in the lexicon, making them interpretable. Passage representations can be pre-computed at index time to reduce query-time latency. We call our approach EPIC (Expansion via Prediction of Importance with Contextualization). We show that EPIC significantly outperforms prior importance-modeling and document expansion approaches. We also observe that the performance is additive with the current leading first-stage retrieval methods, further narrowing the gap between inexpensive and cost-prohibitive passage ranking approaches. Specifically, EPIC achieves a MRR@10 of 0.304 on the MS-MARCO passage ranking dataset with 78ms average query latency on commodity hardware. We also find that the latency is further reduced to 68ms by pruning document representations, with virtually no difference in effectiveness.", "year": 2020, "venue": "Annual International ACM SIGIR Conference on Research and Development in Information Retrieval", "authors": [ "Sean MacAvaney", "F. M. Nardini", "R. Perego", "N. Tonellotto", "Nazli Goharian", "O. Frieder" ], "externalIds": { "MAG": "3099384026", "DBLP": "conf/sigir/MacAvaneyN0TGF20b", "ArXiv": "2004.14245", "DOI": "10.1145/3397271.3401262", "CorpusId": 216641912 }, "url": "https://www.semanticscholar.org/paper/0c57dcf959ead9530f9ec3ebe0dd58de42a3e8af", "referenceCount": 18, "citationCount": 85, "influentialCitationCount": 9, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ColBERT: Efficient and Effective Passage Search via Contextualized Late Interaction over BERT", "abstract": "Recent progress in Natural Language Understanding (NLU) is driving fast-paced advances in Information Retrieval (IR), largely owed to fine-tuning deep language models (LMs) for document ranking. While remarkably effective, the ranking models based on these LMs increase computational cost by orders of magnitude over prior approaches, particularly as they must feed each query-document pair through a massive neural network to compute a single relevance score. To tackle this, we present ColBERT, a novel ranking model that adapts deep LMs (in particular, BERT) for efficient retrieval. ColBERT introduces a late interaction architecture that independently encodes the query and the document using BERT and then employs a cheap yet powerful interaction step that models their fine-grained similarity. By delaying and yet retaining this fine-granular interaction, ColBERT can leverage the expressiveness of deep LMs while simultaneously gaining the ability to pre-compute document representations offline, considerably speeding up query processing. Crucially, ColBERT's pruning-friendly interaction mechanism enables leveraging vector-similarity indexes for end-to-end retrieval directly from millions of documents. We extensively evaluate ColBERT using two recent passage search datasets. Results show that ColBERT's effectiveness is competitive with existing BERT-based models (and outperforms every non-BERT baseline), while executing two orders-of-magnitude faster and requiring up to four orders-of-magnitude fewer FLOPs per query.", "year": 2020, "venue": "Annual International ACM SIGIR Conference on Research and Development in Information Retrieval", "authors": [ "O. Khattab", "M. Zaharia" ], "externalIds": { "MAG": "3021397474", "DBLP": "conf/sigir/KhattabZ20", "ArXiv": "2004.12832", "DOI": "10.1145/3397271.3401075", "CorpusId": 216553223 }, "url": "https://www.semanticscholar.org/paper/60b8ad6177230ad5402af409a6edb5af441baeb4", "referenceCount": 44, "citationCount": 1056, "influentialCitationCount": 152, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "CORD-19: The Covid-19 Open Research Dataset", "abstract": "The COVID-19 Open Research Dataset (CORD-19) is a growing resource of scientific papers on COVID-19 and related historical coronavirus research. CORD-19 is designed to facilitate the development of text mining and information retrieval systems over its rich collection of metadata and structured full text papers. Since its release, CORD-19 has been downloaded over 200K times and has served as the basis of many COVID-19 text mining and discovery systems. In this article, we describe the mechanics of dataset construction, highlighting challenges and key design decisions, provide an overview of how CORD-19 has been used, and describe several shared tasks built around the dataset. We hope this resource will continue to bring together the computing community, biomedical experts, and policy makers in the search for effective treatments and management policies for COVID-19.", "year": 2020, "venue": "NLPCOVID19", "authors": [ "Lucy Lu Wang", "Kyle Lo", "Yoganand Chandrasekhar", "Russell Reas", "Jiangjiang Yang", "Darrin Eide", "Kathryn Funk", "Rodney Michael Kinney", "Ziyang Liu", "William Merrill", "P. Mooney", "D. Murdick", "Devvret Rishi", "J. Sheehan", "Zhihong Shen", "Brandon Stilson", "Alex D Wade", "Kuansan Wang", "Christopher Wilhelm", "Boya Xie", "Douglas A. Raymond", "Daniel S. Weld", "Oren Etzioni", "Sebastian Kohlmeier" ], "externalIds": { "ACL": "2020.nlpcovid19-acl.1", "DBLP": "journals/corr/abs-2004-10706", "ArXiv": "2004.10706", "PubMedCentral": "7251955", "MAG": "3020786614", "CorpusId": 216056360, "PubMed": "32510522" }, "url": "https://www.semanticscholar.org/paper/bc411487f305e451d7485e53202ec241fcc97d3b", "referenceCount": 46, "citationCount": 760, "influentialCitationCount": 110, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Dense Passage Retrieval for Open-Domain Question Answering", "abstract": "Open-domain question answering relies on efficient passage retrieval to select candidate contexts, where traditional sparse vector space models, such as TF-IDF or BM25, are the de facto method. In this work, we show that retrieval can be practically implemented using dense representations alone, where embeddings are learned from a small number of questions and passages by a simple dual-encoder framework. When evaluated on a wide range of open-domain QA datasets, our dense retriever outperforms a strong Lucene-BM25 system largely by 9%-19% absolute in terms of top-20 passage retrieval accuracy, and helps our end-to-end QA system establish new state-of-the-art on multiple open-domain QA benchmarks.", "year": 2020, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Vladimir Karpukhin", "Barlas Oğuz", "Sewon Min", "Patrick Lewis", "Ledell Yu Wu", "Sergey Edunov", "Danqi Chen", "Wen-tau Yih" ], "externalIds": { "MAG": "3015883388", "DBLP": "journals/corr/abs-2004-04906", "ArXiv": "2004.04906", "ACL": "2020.emnlp-main.550", "DOI": "10.18653/v1/2020.emnlp-main.550", "CorpusId": 215737187 }, "url": "https://www.semanticscholar.org/paper/b26f2037f769d5ffc5f7bdcec2de8da28ec14bee", "referenceCount": 55, "citationCount": 2795, "influentialCitationCount": 723, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Overview of the TREC 2019 deep learning track", "abstract": "The Deep Learning Track is a new track for TREC 2019, with the goal of studying ad hoc ranking in a large data regime. It is the first track with large human-labeled training sets, introducing two sets corresponding to two tasks, each with rigorous TREC-style blind evaluation and reusable test sets. The document retrieval task has a corpus of 3.2 million documents with 367 thousand training queries, for which we generate a reusable test set of 43 queries. The passage retrieval task has a corpus of 8.8 million passages with 503 thousand training queries, for which we generate a reusable test set of 43 queries. This year 15 groups submitted a total of 75 runs, using various combinations of deep learning, transfer learning and traditional IR ranking methods. Deep learning runs significantly outperformed traditional IR runs. Possible explanations for this result are that we introduced large training data and we included deep models trained on such data in our judging pools, whereas some past studies did not have such training data or pooling.", "year": 2020, "venue": "arXiv.org", "authors": [ "Nick Craswell", "Bhaskar Mitra", "Emine Yilmaz", "Daniel Fernando Campos", "E. Voorhees" ], "externalIds": { "DBLP": "journals/corr/abs-2003-07820", "ArXiv": "2003.07820", "MAG": "3011794880", "CorpusId": 253234683 }, "url": "https://www.semanticscholar.org/paper/1163d1ffeb57695dafe7084a63dafd5d34004db5", "referenceCount": 35, "citationCount": 376, "influentialCitationCount": 58, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Document Ranking with a Pretrained Sequence-to-Sequence Model", "abstract": "This work proposes the use of a pretrained sequence-to-sequence model for document ranking. Our approach is fundamentally different from a commonly adopted classification-based formulation based on encoder-only pretrained transformer architectures such as BERT. We show how a sequence-to-sequence model can be trained to generate relevance labels as “target tokens”, and how the underlying logits of these target tokens can be interpreted as relevance probabilities for ranking. Experimental results on the MS MARCO passage ranking task show that our ranking approach is superior to strong encoder-only models. On three other document retrieval test collections, we demonstrate a zero-shot transfer-based approach that outperforms previous state-of-the-art models requiring in-domain cross-validation. Furthermore, we find that our approach significantly outperforms an encoder-only architecture in a data-poor setting. We investigate this observation in more detail by varying target tokens to probe the model’s use of latent knowledge. Surprisingly, we find that the choice of target tokens impacts effectiveness, even for words that are closely related semantically. This finding sheds some light on why our sequence-to-sequence formulation for document ranking is effective. Code and models are available at pygaggle.ai.", "year": 2020, "venue": "Findings", "authors": [ "Rodrigo Nogueira", "Zhiying Jiang", "Ronak Pradeep", "Jimmy J. Lin" ], "externalIds": { "MAG": "3100107515", "ArXiv": "2003.06713", "DBLP": "journals/corr/abs-2003-06713", "ACL": "2020.findings-emnlp.63", "DOI": "10.18653/v1/2020.findings-emnlp.63", "CorpusId": 212725651 }, "url": "https://www.semanticscholar.org/paper/f6e0164466e827112fd415afdc28ddf8e0eb1ba3", "referenceCount": 36, "citationCount": 441, "influentialCitationCount": 69, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Context-Aware Sentence/Passage Term Importance Estimation For First Stage Retrieval", "abstract": "Term frequency is a common method for identifying the importance of a term in a query or document. But it is a weak signal, especially when the frequency distribution is flat, such as in long queries or short documents where the text is of sentence/passage-length. This paper proposes a Deep Contextualized Term Weighting framework that learns to map BERT's contextualized text representations to context-aware term weights for sentences and passages. When applied to passages, DeepCT-Index produces term weights that can be stored in an ordinary inverted index for passage retrieval. When applied to query text, DeepCT-Query generates a weighted bag-of-words query. Both types of term weight can be used directly by typical first-stage retrieval algorithms. This is novel because most deep neural network based ranking models have higher computational costs, and thus are restricted to later-stage rankers. Experiments on four datasets demonstrate that DeepCT's deep contextualized text understanding greatly improves the accuracy of first-stage retrieval algorithms.", "year": 2019, "venue": "arXiv.org", "authors": [ "Zhuyun Dai", "Jamie Callan" ], "externalIds": { "DBLP": "journals/corr/abs-1910-10687", "MAG": "2982096936", "ArXiv": "1910.10687", "CorpusId": 204838389 }, "url": "https://www.semanticscholar.org/paper/3de1752cd0854e220fc41f0ccf7db913f846284c", "referenceCount": 38, "citationCount": 179, "influentialCitationCount": 32, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Information Needs, Queries, and Query Performance Prediction", "abstract": "The query performance prediction (QPP) task is to estimate the effectiveness of a search performed in response to a query with no relevance judgments. Existing QPP methods do not account for the effectiveness of a query in representing the underlying information need. We demonstrate the far-reaching implications of this reality using standard TREC-based evaluation of QPP methods: their relative prediction quality patterns vary with respect to the effectiveness of queries used to represent the information needs. Motivated by our findings, we revise the basic probabilistic formulation of the QPP task by accounting for the information need and its connection to the query. We further explore this connection by proposing a novel QPP approach that utilizes information about a set of queries representing the same information need. Predictors instantiated from our approach using a wide variety of existing QPP methods post prediction quality that substantially transcends that of applying these methods, as is standard, using a single query representing the information need. Additional in-depth empirical analysis of different aspects of our approach further attests to the crucial role of query effectiveness in QPP.", "year": 2019, "venue": "Annual International ACM SIGIR Conference on Research and Development in Information Retrieval", "authors": [ "Oleg Zendel", "Anna Shtok", "Fiana Raiber", "Oren Kurland", "J. Culpepper" ], "externalIds": { "MAG": "2953565332", "DBLP": "conf/sigir/ZendelSRKC19", "DOI": "10.1145/3331184.3331253", "CorpusId": 197928282 }, "url": "https://www.semanticscholar.org/paper/ce6aa7d941e1eb20b0c331a2cde0d3850c09bf5d", "referenceCount": 65, "citationCount": 39, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Passage Re-ranking with BERT", "abstract": "Recently, neural models pretrained on a language modeling task, such as ELMo (Peters et al., 2017), OpenAI GPT (Radford et al., 2018), and BERT (Devlin et al., 2018), have achieved impressive results on various natural language processing tasks such as question-answering and natural language inference. In this paper, we describe a simple re-implementation of BERT for query-based passage re-ranking. Our system is the state of the art on the TREC-CAR dataset and the top entry in the leaderboard of the MS MARCO passage retrieval task, outperforming the previous state of the art by 27% (relative) in MRR@10. The code to reproduce our results is available at this https URL", "year": 2019, "venue": "arXiv.org", "authors": [ "Rodrigo Nogueira", "Kyunghyun Cho" ], "externalIds": { "DBLP": "journals/corr/abs-1901-04085", "MAG": "2909544278", "ArXiv": "1901.04085", "CorpusId": 58004692 }, "url": "https://www.semanticscholar.org/paper/85e07116316e686bf787114ba10ca60f4ea7c5b2", "referenceCount": 24, "citationCount": 959, "influentialCitationCount": 151, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Pytrec_eval: An Extremely Fast Python Interface to trec_eval", "abstract": "We introduce pytrec_eval, a Python interface to the trec_eval information retrieval evaluation toolkit. pytrec_eval exposes the reference implementations of trec_eval within Python as a native extension. We show that pytrec_eval is around one order of magnitude faster than invoking trec_eval as a sub process from within Python. Compared to a native Python implementation of NDCG, pytrec_eval is twice as fast for practically-sized rankings. Finally, we demonstrate its effectiveness in an application where pytrec_eval is combined with Pyndri and the OpenAI Gym where query expansion is learned using Q-learning.", "year": 2018, "venue": "Annual International ACM SIGIR Conference on Research and Development in Information Retrieval", "authors": [ "Christophe Van Gysel", "M. de Rijke" ], "externalIds": { "MAG": "3103251620", "DBLP": "conf/sigir/GyselR18", "ArXiv": "1805.01597", "DOI": "10.1145/3209978.3210065", "CorpusId": 13691943 }, "url": "https://www.semanticscholar.org/paper/751b923c050dd7802d9493962b4b04ecbad89d80", "referenceCount": 7, "citationCount": 80, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Memory-Based Simple Heuristics as Attribute Substitution: Competitive Tests of Binary Choice Inference Models.", "abstract": "Some researchers on binary choice inference have argued that people make inferences based on simple heuristics, such as recognition, fluency, or familiarity. Others have argued that people make inferences based on available knowledge. To examine the boundary between heuristic and knowledge usage, we examine binary choice inference processes in terms of attribute substitution in heuristic use (Kahneman & Frederick, 2005). In this framework, it is predicted that people will rely on heuristic or knowledge-based inference depending on the subjective difficulty of the inference task. We conducted competitive tests of binary choice inference models representing simple heuristics (fluency and familiarity heuristics) and knowledge-based inference models. We found that a simple heuristic model (especially a familiarity heuristic model) explained inference patterns for subjectively difficult inference tasks, and that a knowledge-based inference model explained subjectively easy inference tasks. These results were consistent with the predictions of the attribute substitution framework. Issues on usage of simple heuristics and psychological processes are discussed.", "year": 2017, "venue": "Cognitive Sciences", "authors": [ "Hidehito Honda", "Toshihiko Matsuka", "K. Ueda" ], "externalIds": { "MAG": "2483129770", "DOI": "10.1111/cogs.12395", "CorpusId": 4358647, "PubMed": "27435359" }, "url": "https://www.semanticscholar.org/paper/1b8664f12b0c1dc85b6f393e8ceee670df719488", "referenceCount": 54, "citationCount": 18, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "MS MARCO: A Human Generated MAchine Reading COmprehension Dataset", "abstract": "This paper presents our recent work on the design and development of a new, large scale dataset, which we name MS MARCO, for MAchine Reading COmprehension. This new dataset is aimed to overcome a number of well-known weaknesses of previous publicly available datasets for the same task of reading comprehension and question answering. In MS MARCO, all questions are sampled from real anonymized user queries. The context passages, from which answers in the dataset are derived, are extracted from real web documents using the most advanced version of the Bing search engine. The answers to the queries are human generated. Finally, a subset of these queries has multiple answers. We aim to release one million queries and the corresponding answers in the dataset, which, to the best of our knowledge, is the most comprehensive real-world dataset of its kind in both quantity and quality. We are currently releasing 100,000 queries with their corresponding answers to inspire work in reading comprehension and question answering along with gathering feedback from the research community.", "year": 2016, "venue": "CoCo@NIPS", "authors": [ "Daniel Fernando Campos", "Tri Nguyen", "Mir Rosenberg", "Xia Song", "Jianfeng Gao", "Saurabh Tiwary", "Rangan Majumder", "L. Deng", "Bhaskar Mitra" ], "externalIds": { "DBLP": "conf/nips/NguyenRSGTMD16", "MAG": "2558203065", "ArXiv": "1611.09268", "CorpusId": 1289517 }, "url": "https://www.semanticscholar.org/paper/dd95f96e3322dcaee9b1e3f7871ecc3ebcd51bfe", "referenceCount": 30, "citationCount": 2279, "influentialCitationCount": 387, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Representativeness revisited: Attribute substitution in intuitive judgment.", "abstract": "The program of research now known as the heuristics and biases approach began with a survey of 84 participants at the 1969 meetings of the Mathematical Psychology Society and the American Psychological Association (Tversky & Kahneman, 1971). The respondents, including several authors of statistics texts, were asked realistic questions about the robustness of statistical estimates and the replicability of research results. The article commented tongue-in-heek on the prevalence of a belief that the law of large numbers applies to small numbers as well: Respondents placed too much confidence in the results of small samples, and their statistical judgments showed little sensitivity to sample size. The mathematical psychologists who participated in the survey not only should have known better – they did know better. Although their intuitive guesses were off the mark, most of them could have computed the correct answers on the back of an envelope. These sophisticated individuals apparently had access to two distinct approaches for answering statistical questions: one that is spontaneous, intuitive, effortless, and fast; and another that is deliberate, rule-governed, effortful, and slow. The persistence of large biases in the guesses of experts raised doubts about the educability of statistical intuitions. Moreover, it was known that the same biases affect choices in the real world, where researchers commonly select sample sizes that are too small to provide a fair test of their hypotheses (Cohen, 1969, 1992).", "year": 2002, "venue": "", "authors": [ "D. Kahneman", "S. Frederick" ], "externalIds": { "MAG": "1843246605", "DOI": "10.1017/CBO9780511808098.004", "CorpusId": 18516635 }, "url": "https://www.semanticscholar.org/paper/4069615a36c33e61ca309b8ceaeb628a10d441b5", "referenceCount": 1407, "citationCount": 3020, "influentialCitationCount": 200, "isOpenAccess": false, "fieldsOfStudy": [ "Psychology" ] }, { "title": "Proceedings of the 24th annual international ACM SIGIR conference on Research and development in information retrieval", "abstract": null, "year": 2001, "venue": "Annual International ACM SIGIR Conference on Research and Development in Information Retrieval", "authors": [ "D. Kraft", "W. Bruce Croft", "David J. Harper", "J. Zobel" ], "externalIds": { "MAG": "2070774296", "CorpusId": 208007619 }, "url": "https://www.semanticscholar.org/paper/423d666934a50e4042fca7d00ee0fb1fedcd948a", "referenceCount": 0, "citationCount": 67, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "InDi: Informative and Diverse Sampling for Dense Retrieval", "abstract": null, "year": 2024, "venue": "European Conference on Information Retrieval", "authors": [ "Nachshon Cohen", "Hedda Cohen Indelman", "Yaron Fairstein", "Guy Kushilevitz" ], "externalIds": { "DBLP": "conf/ecir/CohenIFK24", "DOI": "10.1007/978-3-031-56063-7_16", "CorpusId": 268752496 }, "url": "https://www.semanticscholar.org/paper/f9dff3b93058d230b9c481e279b137afde17a8bd", "referenceCount": 0, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Neural Re-rankers for Evidence Retrieval in the FEVEROUS Task", "abstract": "Computational fact-checking has gained a lot of traction in the machine learning and natural language processing communities. A plethora of solutions have been developed, but methods which leverage both structured and unstructured information to detect misinformation are of particular relevance. In this paper, we tackle the FEVEROUS (Fact Extraction and VERification Over Unstructured and Structured information) challenge which consists of an open source baseline system together with a benchmark dataset containing 87,026 verified claims. We extend this baseline model by improving the evidence retrieval module yielding the best evidence F1 score among the competitors in the challenge leaderboard while obtaining an overall FEVEROUS score of 0.20 (5th best ranked system).", "year": 2021, "venue": "FEVER", "authors": [ "Mohammed Saeed", "Giulio Alfarano", "Khai Nguyen", "Duc-Hong Pham", "Raphael Troncy", "Paolo Papotti" ], "externalIds": { "ACL": "2021.fever-1.12", "DOI": "10.18653/v1/2021.fever-1.12", "CorpusId": 241583584 }, "url": "https://www.semanticscholar.org/paper/d0194f7280caab63b2946b22ddf7ae3f327305fa", "referenceCount": 25, "citationCount": 9, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "2023a. Rankvicuna: Zero-shot listwise document reranking with open-source large language models", "abstract": null, "year": null, "venue": "Preprint", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2023. In-context learning of large language models for controlled dialogue summarization: A holistic benchmark and empirical analysis", "abstract": null, "year": null, "venue": "Pro-ceedings of the 4th New Frontiers in Summarization Workshop", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "few-shot setting", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "A relative preference between a pair of documents is an easier decision choice than estimating the relevance of a document to a query, making pairwise ranking a natural choice", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2022. Scaling instruction-finetuned language models", "abstract": null, "year": null, "venue": "arXiv preprint", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2022. Inpars: Data augmentation for information retrieval using large language models", "abstract": null, "year": null, "venue": "CoRR , abs/", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "single RTX 4090 GPU. B Pointwise and Setwise Few-shot", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2024b. A setwise approach for effective and highly efficient zero-shot ranking with large language models", "abstract": null, "year": null, "venue": "Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval , SIGIR 2024. ACM. A Implementation Details We apply PyTerrier bindings over each neural model and use Terrier’s ‘pytrec_eval’", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2023. Self-improving for zero-shot named entity recognition with large language models", "abstract": null, "year": null, "venue": "CoRR , abs/", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "8086–8098, Dublin,", "abstract": null, "year": null, "venue": "Ireland", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "its short max-context length, it is also ineffective for the list-wise setting. We now include these findings over TREC Deep Learning with only in point and list-wise settings. Table 5 shows", "abstract": null, "year": null, "venue": "that additional examples", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2024. Query performance prediction using relevance judgments generated by large language models", "abstract": null, "year": null, "venue": "CoRR", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "evaluation metrics. We used the HuggingFace implementations of monoT5 (Nogueira et al., 2020), Zephyr (Tunstall et al., 2023) and Flan-T5-XL (Chung et al., 2022)", "abstract": null, "year": null, "venue": "All models", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "MoJE: Mixture of Jailbreak Experts, Naive Tabular Classifiers as Guard for Prompt Attacks": { "paper_title": "MoJE: Mixture of Jailbreak Experts, Naive Tabular Classifiers as Guard for Prompt Attacks", "arxiv_id": "2409.17699v2", "keyword": "large language model", "authors": [ "Giandomenico Cornacchia", "Giulio Zizzo", "Kieran Fraser", "Muhammad Zaid Hamed", "Ambrish Rawat", "Mark Purcell" ], "references": [ { "title": "Great, Now Write an Article About That: The Crescendo Multi-Turn LLM Jailbreak Attack", "abstract": "Large Language Models (LLMs) have risen significantly in popularity and are increasingly being adopted across multiple applications. These LLMs are heavily aligned to resist engaging in illegal or unethical topics as a means to avoid contributing to responsible AI harms. However, a recent line of attacks, known as jailbreaks, seek to overcome this alignment. Intuitively, jailbreak attacks aim to narrow the gap between what the model can do and what it is willing to do. In this paper, we introduce a novel jailbreak attack called Crescendo. Unlike existing jailbreak methods, Crescendo is a simple multi-turn jailbreak that interacts with the model in a seemingly benign manner. It begins with a general prompt or question about the task at hand and then gradually escalates the dialogue by referencing the model's replies progressively leading to a successful jailbreak. We evaluate Crescendo on various public systems, including ChatGPT, Gemini Pro, Gemini-Ultra, LlaMA-2 70b and LlaMA-3 70b Chat, and Anthropic Chat. Our results demonstrate the strong efficacy of Crescendo, with it achieving high attack success rates across all evaluated models and tasks. Furthermore, we present Crescendomation, a tool that automates the Crescendo attack and demonstrate its efficacy against state-of-the-art models through our evaluations. Crescendomation surpasses other state-of-the-art jailbreaking techniques on the AdvBench subset dataset, achieving 29-61% higher performance on GPT-4 and 49-71% on Gemini-Pro. Finally, we also demonstrate Crescendo's ability to jailbreak multimodal models.", "year": 2024, "venue": "arXiv.org", "authors": [ "M. Russinovich", "Ahmed Salem", "Ronen Eldan" ], "externalIds": { "ArXiv": "2404.01833", "DBLP": "journals/corr/abs-2404-01833", "DOI": "10.48550/arXiv.2404.01833", "CorpusId": 268856920 }, "url": "https://www.semanticscholar.org/paper/2c4d2d889a1f0ff9598de829a001df11a95d3294", "referenceCount": 29, "citationCount": 20, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "RigorLLM: Resilient Guardrails for Large Language Models against Undesired Content", "abstract": "Recent advancements in Large Language Models (LLMs) have showcased remarkable capabilities across various tasks in different domains. However, the emergence of biases and the potential for generating harmful content in LLMs, particularly under malicious inputs, pose significant challenges. Current mitigation strategies, while effective, are not resilient under adversarial attacks. This paper introduces Resilient Guardrails for Large Language Models (RigorLLM), a novel framework designed to efficiently and effectively moderate harmful and unsafe inputs and outputs for LLMs. By employing a multi-faceted approach that includes energy-based training data augmentation through Langevin dynamics, optimizing a safe suffix for inputs via minimax optimization, and integrating a fusion-based model combining robust KNN with LLMs based on our data augmentation, RigorLLM offers a robust solution to harmful content moderation. Our experimental evaluations demonstrate that RigorLLM not only outperforms existing baselines like OpenAI API and Perspective API in detecting harmful content but also exhibits unparalleled resilience to jailbreaking attacks. The innovative use of constrained optimization and a fusion-based guardrail approach represents a significant step forward in developing more secure and reliable LLMs, setting a new standard for content moderation frameworks in the face of evolving digital threats.", "year": 2024, "venue": "International Conference on Machine Learning", "authors": [ "Zhuowen Yuan", "Zidi Xiong", "Yi Zeng", "Ning Yu", "Ruoxi Jia", "D. Song", "Bo Li" ], "externalIds": { "DBLP": "journals/corr/abs-2403-13031", "ArXiv": "2403.13031", "DOI": "10.48550/arXiv.2403.13031", "CorpusId": 268536710 }, "url": "https://www.semanticscholar.org/paper/2f4cc3f4a1c70cd5aca14c1304037491cd3aeb9b", "referenceCount": 37, "citationCount": 19, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Survey of Vulnerabilities in Large Language Models Revealed by Adversarial Attacks", "abstract": "Large Language Models (LLMs) are swiftly advancing in architecture and capability, and as they integrate more deeply into complex systems, the urgency to scrutinize their security properties grows. This paper surveys research in the emerging interdisciplinary field of adversarial attacks on LLMs, a subfield of trustworthy ML, combining the perspectives of Natural Language Processing and Security. Prior work has shown that even safety-aligned LLMs (via instruction tuning and reinforcement learning through human feedback) can be susceptible to adversarial attacks, which exploit weaknesses and mislead AI systems, as evidenced by the prevalence of `jailbreak' attacks on models like ChatGPT and Bard. In this survey, we first provide an overview of large language models, describe their safety alignment, and categorize existing research based on various learning structures: textual-only attacks, multi-modal attacks, and additional attack methods specifically targeting complex systems, such as federated learning or multi-agent systems. We also offer comprehensive remarks on works that focus on the fundamental sources of vulnerabilities and potential defenses. To make this field more accessible to newcomers, we present a systematic review of existing works, a structured typology of adversarial attack concepts, and additional resources, including slides for presentations on related topics at the 62nd Annual Meeting of the Association for Computational Linguistics (ACL'24).", "year": 2023, "venue": "arXiv.org", "authors": [ "Erfan Shayegani", "Md Abdullah Al Mamun", "Yu Fu", "Pedram Zaree", "Yue Dong", "Nael B. Abu-Ghazaleh" ], "externalIds": { "DBLP": "journals/corr/abs-2310-10844", "ArXiv": "2310.10844", "DOI": "10.48550/arXiv.2310.10844", "CorpusId": 264172191 }, "url": "https://www.semanticscholar.org/paper/4f63c5a89c7299a864c6c48aa1844fb0fe8c9437", "referenceCount": 182, "citationCount": 72, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Catastrophic Jailbreak of Open-source LLMs via Exploiting Generation", "abstract": "The rapid progress in open-source large language models (LLMs) is significantly advancing AI development. Extensive efforts have been made before model release to align their behavior with human values, with the primary goal of ensuring their helpfulness and harmlessness. However, even carefully aligned models can be manipulated maliciously, leading to unintended behaviors, known as\"jailbreaks\". These jailbreaks are typically triggered by specific text inputs, often referred to as adversarial prompts. In this work, we propose the generation exploitation attack, an extremely simple approach that disrupts model alignment by only manipulating variations of decoding methods. By exploiting different generation strategies, including varying decoding hyper-parameters and sampling methods, we increase the misalignment rate from 0% to more than 95% across 11 language models including LLaMA2, Vicuna, Falcon, and MPT families, outperforming state-of-the-art attacks with $30\\times$ lower computational cost. Finally, we propose an effective alignment method that explores diverse generation strategies, which can reasonably reduce the misalignment rate under our attack. Altogether, our study underscores a major failure in current safety evaluation and alignment procedures for open-source LLMs, strongly advocating for more comprehensive red teaming and better alignment before releasing such models. Our code is available at https://github.com/Princeton-SysML/Jailbreak_LLM.", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Yangsibo Huang", "Samyak Gupta", "Mengzhou Xia", "Kai Li", "Danqi Chen" ], "externalIds": { "DBLP": "journals/corr/abs-2310-06987", "ArXiv": "2310.06987", "DOI": "10.48550/arXiv.2310.06987", "CorpusId": 263835408 }, "url": "https://www.semanticscholar.org/paper/ac27dd71af3ee93e1129482ceececbae7dd0d0e8", "referenceCount": 51, "citationCount": 168, "influentialCitationCount": 20, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Fine-tuning Aligned Language Models Compromises Safety, Even When Users Do Not Intend To!", "abstract": "Optimizing large language models (LLMs) for downstream use cases often involves the customization of pre-trained LLMs through further fine-tuning. Meta's open release of Llama models and OpenAI's APIs for fine-tuning GPT-3.5 Turbo on custom datasets also encourage this practice. But, what are the safety costs associated with such custom fine-tuning? We note that while existing safety alignment infrastructures can restrict harmful behaviors of LLMs at inference time, they do not cover safety risks when fine-tuning privileges are extended to end-users. Our red teaming studies find that the safety alignment of LLMs can be compromised by fine-tuning with only a few adversarially designed training examples. For instance, we jailbreak GPT-3.5 Turbo's safety guardrails by fine-tuning it on only 10 such examples at a cost of less than $0.20 via OpenAI's APIs, making the model responsive to nearly any harmful instructions. Disconcertingly, our research also reveals that, even without malicious intent, simply fine-tuning with benign and commonly used datasets can also inadvertently degrade the safety alignment of LLMs, though to a lesser extent. These findings suggest that fine-tuning aligned LLMs introduces new safety risks that current safety infrastructures fall short of addressing -- even if a model's initial safety alignment is impeccable, it is not necessarily to be maintained after custom fine-tuning. We outline and critically analyze potential mitigations and advocate for further research efforts toward reinforcing safety protocols for the custom fine-tuning of aligned LLMs.", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Xiangyu Qi", "Yi Zeng", "Tinghao Xie", "Pin-Yu Chen", "Ruoxi Jia", "Prateek Mittal", "Peter Henderson" ], "externalIds": { "DBLP": "journals/corr/abs-2310-03693", "ArXiv": "2310.03693", "DOI": "10.48550/arXiv.2310.03693", "CorpusId": 263671523 }, "url": "https://www.semanticscholar.org/paper/0e0e706e13f160e74cac9556f28ab9a358c148d2", "referenceCount": 92, "citationCount": 284, "influentialCitationCount": 45, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Baseline Defenses for Adversarial Attacks Against Aligned Language Models", "abstract": "As Large Language Models quickly become ubiquitous, it becomes critical to understand their security vulnerabilities. Recent work shows that text optimizers can produce jailbreaking prompts that bypass moderation and alignment. Drawing from the rich body of work on adversarial machine learning, we approach these attacks with three questions: What threat models are practically useful in this domain? How do baseline defense techniques perform in this new domain? How does LLM security differ from computer vision? We evaluate several baseline defense strategies against leading adversarial attacks on LLMs, discussing the various settings in which each is feasible and effective. Particularly, we look at three types of defenses: detection (perplexity based), input preprocessing (paraphrase and retokenization), and adversarial training. We discuss white-box and gray-box settings and discuss the robustness-performance trade-off for each of the defenses considered. We find that the weakness of existing discrete optimizers for text, combined with the relatively high costs of optimization, makes standard adaptive attacks more challenging for LLMs. Future research will be needed to uncover whether more powerful optimizers can be developed, or whether the strength of filtering and preprocessing defenses is greater in the LLMs domain than it has been in computer vision.", "year": 2023, "venue": "arXiv.org", "authors": [ "Neel Jain", "Avi Schwarzschild", "Yuxin Wen", "Gowthami Somepalli", "John Kirchenbauer", "Ping-yeh Chiang", "Micah Goldblum", "Aniruddha Saha", "Jonas Geiping", "Tom Goldstein" ], "externalIds": { "DBLP": "journals/corr/abs-2309-00614", "ArXiv": "2309.00614", "DOI": "10.48550/arXiv.2309.00614", "CorpusId": 261494182 }, "url": "https://www.semanticscholar.org/paper/3e30a7ac4886b28eb50151f58e14a1d698cccd0e", "referenceCount": 75, "citationCount": 184, "influentialCitationCount": 44, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "LLM Self Defense: By Self Examination, LLMs Know They Are Being Tricked", "abstract": "Large language models (LLMs) are popular for high-quality text generation but can produce harmful content, even when aligned with human values through reinforcement learning. Adversarial prompts can bypass their safety measures. We propose LLM Self Defense, a simple approach to defend against these attacks by having an LLM screen the induced responses. Our method does not require any fine-tuning, input preprocessing, or iterative output generation. Instead, we incorporate the generated content into a pre-defined prompt and employ another instance of an LLM to analyze the text and predict whether it is harmful. We test LLM Self Defense on GPT 3.5 and Llama 2, two of the current most prominent LLMs against various types of attacks, such as forcefully inducing affirmative responses to prompts and prompt engineering attacks. Notably, LLM Self Defense succeeds in reducing the attack success rate to virtually 0 using both GPT 3.5 and Llama 2. The code is publicly available at https://github.com/poloclub/llm-self-defense", "year": 2023, "venue": "Tiny Papers @ ICLR", "authors": [ "Alec Helbling", "Mansi Phute", "Matthew Hull", "Duen Horng Chau" ], "externalIds": { "DBLP": "journals/corr/abs-2308-07308", "ArXiv": "2308.07308", "DOI": "10.48550/arXiv.2308.07308", "CorpusId": 260887487 }, "url": "https://www.semanticscholar.org/paper/2cdd5c3dc42c0df40bc8839709869af3560d4bfe", "referenceCount": 42, "citationCount": 94, "influentialCitationCount": 8, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Universal and Transferable Adversarial Attacks on Aligned Language Models", "abstract": "Because\"out-of-the-box\"large language models are capable of generating a great deal of objectionable content, recent work has focused on aligning these models in an attempt to prevent undesirable generation. While there has been some success at circumventing these measures -- so-called\"jailbreaks\"against LLMs -- these attacks have required significant human ingenuity and are brittle in practice. In this paper, we propose a simple and effective attack method that causes aligned language models to generate objectionable behaviors. Specifically, our approach finds a suffix that, when attached to a wide range of queries for an LLM to produce objectionable content, aims to maximize the probability that the model produces an affirmative response (rather than refusing to answer). However, instead of relying on manual engineering, our approach automatically produces these adversarial suffixes by a combination of greedy and gradient-based search techniques, and also improves over past automatic prompt generation methods. Surprisingly, we find that the adversarial prompts generated by our approach are quite transferable, including to black-box, publicly released LLMs. Specifically, we train an adversarial attack suffix on multiple prompts (i.e., queries asking for many different types of objectionable content), as well as multiple models (in our case, Vicuna-7B and 13B). When doing so, the resulting attack suffix is able to induce objectionable content in the public interfaces to ChatGPT, Bard, and Claude, as well as open source LLMs such as LLaMA-2-Chat, Pythia, Falcon, and others. In total, this work significantly advances the state-of-the-art in adversarial attacks against aligned language models, raising important questions about how such systems can be prevented from producing objectionable information. Code is available at github.com/llm-attacks/llm-attacks.", "year": 2023, "venue": "arXiv.org", "authors": [ "Andy Zou", "Zifan Wang", "J. Z. Kolter", "Matt Fredrikson" ], "externalIds": { "DBLP": "journals/corr/abs-2307-15043", "ArXiv": "2307.15043", "CorpusId": 260202961 }, "url": "https://www.semanticscholar.org/paper/47030369e97cc44d4b2e3cf1be85da0fd134904a", "referenceCount": 49, "citationCount": 724, "influentialCitationCount": 220, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Are aligned neural networks adversarially aligned?", "abstract": "Large language models are now tuned to align with the goals of their creators, namely to be\"helpful and harmless.\"These models should respond helpfully to user questions, but refuse to answer requests that could cause harm. However, adversarial users can construct inputs which circumvent attempts at alignment. In this work, we study adversarial alignment, and ask to what extent these models remain aligned when interacting with an adversarial user who constructs worst-case inputs (adversarial examples). These inputs are designed to cause the model to emit harmful content that would otherwise be prohibited. We show that existing NLP-based optimization attacks are insufficiently powerful to reliably attack aligned text models: even when current NLP-based attacks fail, we can find adversarial inputs with brute force. As a result, the failure of current attacks should not be seen as proof that aligned text models remain aligned under adversarial inputs. However the recent trend in large-scale ML models is multimodal models that allow users to provide images that influence the text that is generated. We show these models can be easily attacked, i.e., induced to perform arbitrary un-aligned behavior through adversarial perturbation of the input image. We conjecture that improved NLP attacks may demonstrate this same level of adversarial control over text-only models.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Nicholas Carlini", "Milad Nasr", "Christopher A. Choquette-Choo", "Matthew Jagielski", "Irena Gao", "Anas Awadalla", "Pang Wei Koh", "Daphne Ippolito", "Katherine Lee", "Florian Tramèr", "Ludwig Schmidt" ], "externalIds": { "ArXiv": "2306.15447", "DBLP": "journals/corr/abs-2306-15447", "DOI": "10.48550/arXiv.2306.15447", "CorpusId": 259262181 }, "url": "https://www.semanticscholar.org/paper/8724579d3f126e753a0451d98ff57b165f722e72", "referenceCount": 57, "citationCount": 140, "influentialCitationCount": 10, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Judging LLM-as-a-judge with MT-Bench and Chatbot Arena", "abstract": "Evaluating large language model (LLM) based chat assistants is challenging due to their broad capabilities and the inadequacy of existing benchmarks in measuring human preferences. To address this, we explore using strong LLMs as judges to evaluate these models on more open-ended questions. We examine the usage and limitations of LLM-as-a-judge, including position, verbosity, and self-enhancement biases, as well as limited reasoning ability, and propose solutions to mitigate some of them. We then verify the agreement between LLM judges and human preferences by introducing two benchmarks: MT-bench, a multi-turn question set; and Chatbot Arena, a crowdsourced battle platform. Our results reveal that strong LLM judges like GPT-4 can match both controlled and crowdsourced human preferences well, achieving over 80% agreement, the same level of agreement between humans. Hence, LLM-as-a-judge is a scalable and explainable way to approximate human preferences, which are otherwise very expensive to obtain. Additionally, we show our benchmark and traditional benchmarks complement each other by evaluating several variants of LLaMA and Vicuna. The MT-bench questions, 3K expert votes, and 30K conversations with human preferences are publicly available at https://github.com/lm-sys/FastChat/tree/main/fastchat/llm_judge.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Lianmin Zheng", "Wei-Lin Chiang", "Ying Sheng", "Siyuan Zhuang", "Zhanghao Wu", "Yonghao Zhuang", "Zi Lin", "Zhuohan Li", "Dacheng Li", "E. Xing", "Haotong Zhang", "Joseph Gonzalez", "Ion Stoica" ], "externalIds": { "ArXiv": "2306.05685", "DBLP": "journals/corr/abs-2306-05685", "DOI": "10.48550/arXiv.2306.05685", "CorpusId": 259129398 }, "url": "https://www.semanticscholar.org/paper/a0a79dad89857a96f8f71b14238e5237cbfc4787", "referenceCount": 59, "citationCount": 2057, "influentialCitationCount": 335, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Exploiting Programmatic Behavior of LLMs: Dual-Use Through Standard Security Attacks", "abstract": "Recent advances in instruction-following large language models (LLMs) have led to dramatic improvements in a range of NLP tasks. Unfortunately, we find that the same improved capabilities amplify the dual-use risks for malicious purposes of these models. Dual-use is difficult to prevent as instruction-following capabilities now enable standard attacks from computer security. The capabilities of these instruction-following LLMs provide strong economic incentives for dual-use by malicious actors. In particular, we show that instruction-following LLMs can produce targeted malicious content, including hate speech and scams, bypassing in-the-wild defenses implemented by LLM API vendors. Our analysis shows that this content can be generated economically and at cost of $125-500 \\times$ cheaper than human effort alone. Together, our findings suggest that LLMs will increasingly attract more sophisticated adversaries and attacks, and addressing these attacks may require new approaches to mitigations.", "year": 2023, "venue": "2024 IEEE Security and Privacy Workshops (SPW)", "authors": [ "Daniel Kang", "Xuechen Li", "Ion Stoica", "Carlos Guestrin", "M. Zaharia", "Tatsunori Hashimoto" ], "externalIds": { "DBLP": "journals/corr/abs-2302-05733", "ArXiv": "2302.05733", "DOI": "10.1109/SPW63631.2024.00018", "CorpusId": 256827239 }, "url": "https://www.semanticscholar.org/paper/0cf694b8f85ab2e11d45595de211a15cfbadcd22", "referenceCount": 51, "citationCount": 168, "influentialCitationCount": 17, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Generative Language Models and Automated Influence Operations: Emerging Threats and Potential Mitigations", "abstract": "Generative language models have improved drastically, and can now produce realistic text outputs that are difficult to distinguish from human-written content. For malicious actors, these language models bring the promise of automating the creation of convincing and misleading text for use in influence operations. This report assesses how language models might change influence operations in the future, and what steps can be taken to mitigate this threat. We lay out possible changes to the actors, behaviors, and content of online influence operations, and provide a framework for stages of the language model-to-influence operations pipeline that mitigations could target (model construction, model access, content dissemination, and belief formation). While no reasonable mitigation can be expected to fully prevent the threat of AI-enabled influence operations, a combination of multiple mitigations may make an important difference.", "year": 2023, "venue": "arXiv.org", "authors": [ "Josh A. Goldstein", "Girish Sastry", "Micah Musser", "Renee DiResta", "M. Gentzel", "Katerina Sedova" ], "externalIds": { "DBLP": "journals/corr/abs-2301-04246", "ArXiv": "2301.04246", "DOI": "10.48550/arXiv.2301.04246", "CorpusId": 255595557 }, "url": "https://www.semanticscholar.org/paper/c9ad9d69d7568110dd5527598a92c7f8b335eef4", "referenceCount": 199, "citationCount": 172, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Training language models to follow instructions with human feedback", "abstract": "Making language models bigger does not inherently make them better at following a user's intent. For example, large language models can generate outputs that are untruthful, toxic, or simply not helpful to the user. In other words, these models are not aligned with their users. In this paper, we show an avenue for aligning language models with user intent on a wide range of tasks by fine-tuning with human feedback. Starting with a set of labeler-written prompts and prompts submitted through the OpenAI API, we collect a dataset of labeler demonstrations of the desired model behavior, which we use to fine-tune GPT-3 using supervised learning. We then collect a dataset of rankings of model outputs, which we use to further fine-tune this supervised model using reinforcement learning from human feedback. We call the resulting models InstructGPT. In human evaluations on our prompt distribution, outputs from the 1.3B parameter InstructGPT model are preferred to outputs from the 175B GPT-3, despite having 100x fewer parameters. Moreover, InstructGPT models show improvements in truthfulness and reductions in toxic output generation while having minimal performance regressions on public NLP datasets. Even though InstructGPT still makes simple mistakes, our results show that fine-tuning with human feedback is a promising direction for aligning language models with human intent.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Long Ouyang", "Jeff Wu", "Xu Jiang", "Diogo Almeida", "Carroll L. Wainwright", "Pamela Mishkin", "Chong Zhang", "Sandhini Agarwal", "Katarina Slama", "Alex Ray", "John Schulman", "Jacob Hilton", "Fraser Kelton", "Luke E. Miller", "Maddie Simens", "Amanda Askell", "P. Welinder", "P. Christiano", "J. Leike", "Ryan J. Lowe" ], "externalIds": { "DBLP": "conf/nips/Ouyang0JAWMZASR22", "ArXiv": "2203.02155", "CorpusId": 246426909 }, "url": "https://www.semanticscholar.org/paper/d766bffc357127e0dc86dd69561d5aeb520d6f4c", "referenceCount": 83, "citationCount": 8493, "influentialCitationCount": 1115, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Challenges in Detoxifying Language Models", "abstract": "Large language models (LM) generate remarkably fluent text and can be efficiently adapted across NLP tasks. Measuring and guaranteeing the quality of generated text in terms of safety is imperative for deploying LMs in the real world; to this end, prior work often relies on automatic evaluation of LM toxicity. We critically discuss this approach, evaluate several toxicity mitigation strategies with respect to both automatic and human evaluation, and analyze consequences of toxicity mitigation in terms of model bias and LM quality. We demonstrate that while basic intervention strategies can effectively optimize previously established automatic metrics on the RealToxicityPrompts dataset, this comes at the cost of reduced LM coverage for both texts about, and dialects of, marginalized groups. Additionally, we find that human raters often disagree with high automatic toxicity scores after strong toxicity reduction interventions -- highlighting further the nuances involved in careful evaluation of LM toxicity.", "year": 2021, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Johannes Welbl", "Amelia Glaese", "J. Uesato", "Sumanth Dathathri", "John F. J. Mellor", "Lisa Anne Hendricks", "Kirsty Anderson", "Pushmeet Kohli", "Ben Coppin", "Po-Sen Huang" ], "externalIds": { "DBLP": "journals/corr/abs-2109-07445", "ArXiv": "2109.07445", "DOI": "10.18653/v1/2021.findings-emnlp.210", "CorpusId": 237513578 }, "url": "https://www.semanticscholar.org/paper/d64e57b9780f30f5b49bf620fdfb8584651b7f85", "referenceCount": 62, "citationCount": 158, "influentialCitationCount": 15, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "RealToxicityPrompts: Evaluating Neural Toxic Degeneration in Language Models", "abstract": "Pretrained neural language models (LMs) are prone to generating racist, sexist, or otherwise toxic language which hinders their safe deployment. We investigate the extent to which pretrained LMs can be prompted to generate toxic language, and the effectiveness of controllable text generation algorithms at preventing such toxic degeneration. We create and release RealToxicityPrompts, a dataset of 100K naturally occurring, sentence-level prompts derived from a large corpus of English web text, paired with toxicity scores from a widely-used toxicity classifier. Using RealToxicityPrompts, we find that pretrained LMs can degenerate into toxic text even from seemingly innocuous prompts. We empirically assess several controllable generation methods, and find that while data- or compute-intensive methods (e.g., adaptive pretraining on non-toxic data) are more effective at steering away from toxicity than simpler solutions (e.g., banning “bad” words), no current method is failsafe against neural toxic degeneration. To pinpoint the potential cause of such persistent toxic degeneration, we analyze two web text corpora used to pretrain several LMs (including GPT-2; Radford et. al, 2019), and find a significant amount of offensive, factually unreliable, and otherwise toxic content. Our work provides a test bed for evaluating toxic generations by LMs and stresses the need for better data selection processes for pretraining.", "year": 2020, "venue": "Findings", "authors": [ "Samuel Gehman", "Suchin Gururangan", "Maarten Sap", "Yejin Choi", "Noah A. Smith" ], "externalIds": { "MAG": "3088599783", "ACL": "2020.findings-emnlp.301", "DBLP": "journals/corr/abs-2009-11462", "ArXiv": "2009.11462", "DOI": "10.18653/v1/2020.findings-emnlp.301", "CorpusId": 221878771 }, "url": "https://www.semanticscholar.org/paper/399e7d8129c60818ee208f236c8dda17e876d21f", "referenceCount": 87, "citationCount": 903, "influentialCitationCount": 135, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "All the News That’s Fit to Fabricate: AI-Generated Text as a Tool of Media Misinformation", "abstract": "Abstract Online misinformation has become a constant; only the way actors create and distribute that information is changing. Advances in artificial intelligence (AI) such as GPT-2 mean that actors can now synthetically generate text in ways that mimic the style and substance of human-created news stories. We carried out three original experiments to study whether these AI-generated texts are credible and can influence opinions on foreign policy. The first evaluated human perceptions of AI-generated text relative to an original story. The second investigated the interaction between partisanship and AI-generated news. The third examined the distributions of perceived credibility across different AI model sizes. We find that individuals are largely incapable of distinguishing between AI- and human-generated text; partisanship affects the perceived credibility of the story; and exposure to the text does little to change individuals’ policy views. The findings have important implications in understanding AI in online misinformation campaigns.", "year": 2020, "venue": "Journal of Experimental Political Science", "authors": [ "S. Kreps", "Miles McCain", "Miles Brundage" ], "externalIds": { "MAG": "3032046549", "DOI": "10.1017/XPS.2020.37", "CorpusId": 219800227 }, "url": "https://www.semanticscholar.org/paper/9ffcb3624f2637b5d0fe28c61ec8472293cfebc7", "referenceCount": 38, "citationCount": 153, "influentialCitationCount": 10, "isOpenAccess": true, "fieldsOfStudy": [ "Psychology", "Sociology" ] }, { "title": "Feature Selection via Mutual Information: New Theoretical Insights", "abstract": "Mutual information has been successfully adopted in filter feature-selection methods to assess both the relevancy of a subset of features in predicting the target variable and the redundancy with respect to other variables. However, existing algorithms are mostly heuristic and do not offer any guarantee on the proposed solution. In this paper, we provide novel theoretical results showing that conditional mutual information naturally arises when bounding the ideal regression/classification errors achieved by different subsets of features. Leveraging on these insights, we propose a novel stopping condition for backward and forward greedy methods which ensures that the ideal prediction error using the selected feature subset remains bounded by a user-specified threshold. We provide numerical simulations to support our theoretical claims and compare to common heuristic methods.", "year": 2019, "venue": "IEEE International Joint Conference on Neural Network", "authors": [ "Mario Beraha", "Alberto Maria Metelli", "M. Papini", "Andrea Tirinzoni", "Marcello Restelli" ], "externalIds": { "DBLP": "journals/corr/abs-1907-07384", "MAG": "2977730413", "ArXiv": "1907.07384", "DOI": "10.1109/IJCNN.2019.8852410", "CorpusId": 197430817 }, "url": "https://www.semanticscholar.org/paper/b7b2af61625ef06fe8f1cb0189b496e46b2c21d9", "referenceCount": 34, "citationCount": 57, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "DE-BERTA: DECODING-ENHANCED BERT WITH DISEN-TANGLED ATTENTION", "abstract": null, "year": 2021, "venue": "International Conference on Learning Representations", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Induction of decision trees", "abstract": null, "year": 2004, "venue": "Machine-mediated learning", "authors": [ "J. R. Quinlan" ], "externalIds": { "MAG": "2466432605", "DOI": "10.1007/BF00116251", "CorpusId": 13252401 }, "url": "https://www.semanticscholar.org/paper/6ccb34bf2122304af5cbecf54402ee3d970e43f2", "referenceCount": 36, "citationCount": 15592, "influentialCitationCount": 1146, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "2023. ”Do Anything Now”: Characterizing and Evaluating In-The-Wild Jailbreak Prompts on Large Language Models", "abstract": null, "year": null, "venue": "CoRR", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2023. Fine-Tuned DeBERTa-v3 for Prompt Injection Detection", "abstract": null, "year": null, "venue": "ProtectAI.com.", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "PhoCoLens: Photorealistic and Consistent Reconstruction in Lensless Imaging": { "paper_title": "PhoCoLens: Photorealistic and Consistent Reconstruction in Lensless Imaging", "arxiv_id": "2409.17996v1", "keyword": "diffusion model", "authors": [ "Xin Cai", "Zhiyuan You", "Hailong Zhang", "Wentao Liu", "Jinwei Gu", "Tianfan Xue" ], "references": [ { "title": "MWDNs: reconstruction in multi-scale feature spaces for lensless imaging.", "abstract": "Lensless cameras, consisting of only a sensor and a mask, are small and flexible enough to be used in many applications with stringent scale constraints. These mask-based imagers encode scenes in caustic patterns. Most existing reconstruction algorithms rely on multiple iterations based on physical model for deconvolution followed by deep learning for perception, among which the main limitation of reconstruction quality is the mismatch between the ideal and the real model. To solve the problem, we in this work learned a class of multi Wiener deconvolution networks (MWDNs), deconvoluting in multi-scale feature spaces with Wiener filters to reduce the information loss and improving the accuracy of the given model by correcting the inputs. A comparison between the proposed and the state-of-the-art algorithms shows that ours achieves much better images and performs well in real-world environments. In addition, our method takes greater advantage of the computational time due to the abandonment of iterations.", "year": 2023, "venue": "Optics Express", "authors": [ "Ying Li", "Zhengdai Li", "KaiYu Chen", "Youming Guo", "C. Rao" ], "externalIds": { "DOI": "10.1364/oe.501970", "CorpusId": 264392884, "PubMed": "38017997" }, "url": "https://www.semanticscholar.org/paper/fc7a7bc33f0465c8e075de7ccd7fc6c36f71a8e3", "referenceCount": 0, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "DiffBIR: Towards Blind Image Restoration with Generative Diffusion Prior", "abstract": "We present DiffBIR, a general restoration pipeline that could handle different blind image restoration tasks in a unified framework. DiffBIR decouples blind image restoration problem into two stages: 1) degradation removal: removing image-independent content; 2) information regeneration: generating the lost image content. Each stage is developed independently but they work seamlessly in a cascaded manner. In the first stage, we use restoration modules to remove degradations and obtain high-fidelity restored results. For the second stage, we propose IRControlNet that leverages the generative ability of latent diffusion models to generate realistic details. Specifically, IRControlNet is trained based on specially produced condition images without distracting noisy content for stable generation performance. Moreover, we design a region-adaptive restoration guidance that can modify the denoising process during inference without model re-training, allowing users to balance realness and fidelity through a tunable guidance scale. Extensive experiments have demonstrated DiffBIR's superiority over state-of-the-art approaches for blind image super-resolution, blind face restoration and blind image denoising tasks on both synthetic and real-world datasets. The code is available at https://github.com/XPixelGroup/DiffBIR.", "year": 2023, "venue": "arXiv.org", "authors": [ "X. Lin", "Jingwen He", "Zi-Yuan Chen", "Zhaoyang Lyu", "Ben Fei", "Bo Dai", "Wanli Ouyang", "Y. Qiao", "Chao Dong" ], "externalIds": { "DBLP": "journals/corr/abs-2308-15070", "ArXiv": "2308.15070", "DOI": "10.48550/arXiv.2308.15070", "CorpusId": 261276317 }, "url": "https://www.semanticscholar.org/paper/858f0643110ccccb6a9103747f2169fecfb98668", "referenceCount": 97, "citationCount": 85, "influentialCitationCount": 23, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Angle Sensitive Pixels for Lensless Imaging on Spherical Sensors", "abstract": "We propose OrbCam, a lensless architecture for imaging with spherical sensors. Prior work in lensless imager techniques have focused largely on using planar sensors; for such designs, it is important to use a modulation element, e.g. amplitude or phase masks, to construct a invertible imaging system. In contrast, we show that the diversity of pixel orientations on a curved surface is sufficient to improve the conditioning of the mapping between the scene and the sensor. Hence, when imaging on a spherical sensor, all pixels can have the same angular response function such that the lensless imager is comprised of pixels that are identical to each other and differ only in their orientations. We provide the computational tools for the design of the angular response of the pixels in a spherical sensor that leads to well-conditioned and noise-robust measurements. We validate our design in both simulation and a lab prototype. The implications of our design is that the lensless imaging can be enabled easily for curved and flexible surfaces thereby opening up a new set of application domains.", "year": 2023, "venue": "arXiv.org", "authors": [ "Yi Hua", "Yongyi Zhao", "Aswin C. Sankaranarayanan" ], "externalIds": { "ArXiv": "2306.15953", "DBLP": "journals/corr/abs-2306-15953", "DOI": "10.48550/arXiv.2306.15953", "CorpusId": 259274860 }, "url": "https://www.semanticscholar.org/paper/e5346ed488ba5bc649357795af2dad9977befae9", "referenceCount": 23, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Exploiting Diffusion Prior for Real-World Image Super-Resolution", "abstract": "We present a novel approach to leverage prior knowledge encapsulated in pre-trained text-to-image diffusion models for blind super-resolution (SR). Specifically, by employing our time-aware encoder, we can achieve promising restoration results without altering the pre-trained synthesis model, thereby preserving the generative prior and minimizing training cost. To remedy the loss of fidelity caused by the inherent stochasticity of diffusion models, we introduce a controllable feature wrapping module that allows users to balance quality and fidelity by simply adjusting a scalar value during the inference process. Moreover, we develop a progressive aggregation sampling strategy to overcome the fixed-size constraints of pre-trained diffusion models, enabling adaptation to resolutions of any size. A comprehensive evaluation of our method using both synthetic and real-world benchmarks demonstrates its superiority over current state-of-the-art approaches.", "year": 2023, "venue": "International Journal of Computer Vision", "authors": [ "Jianyi Wang", "Zongsheng Yue", "Shangchen Zhou", "Kelvin C. K. Chan", "Chen Change Loy" ], "externalIds": { "DBLP": "journals/corr/abs-2305-07015", "ArXiv": "2305.07015", "DOI": "10.48550/arXiv.2305.07015", "CorpusId": 258615282 }, "url": "https://www.semanticscholar.org/paper/315d7a58fada47c5729645f0af8ddfaa0743f82f", "referenceCount": 106, "citationCount": 125, "influentialCitationCount": 37, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "T2I-Adapter: Learning Adapters to Dig out More Controllable Ability for Text-to-Image Diffusion Models", "abstract": "The incredible generative ability of large-scale text-to-image (T2I) models has demonstrated strong power of learning complex structures and meaningful semantics. However, relying solely on text prompts cannot fully take advantage of the knowledge learned by the model, especially when flexible and accurate controlling (e.g., structure and color) is needed. In this paper, we aim to ``dig out\" the capabilities that T2I models have implicitly learned, and then explicitly use them to control the generation more granularly. Specifically, we propose to learn low-cost T2I-Adapters to align internal knowledge in T2I models with external control signals, while freezing the original large T2I models. In this way, we can train various adapters according to different conditions, achieving rich control and editing effects in the color and structure of the generation results. Further, the proposed T2I-Adapters have attractive properties of practical value, such as composability and generalization ability. Extensive experiments demonstrate that our T2I-Adapter has promising generation quality and a wide range of applications. Our code is available at https://github.com/TencentARC/T2I-Adapter.", "year": 2023, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Chong Mou", "Xintao Wang", "Liangbin Xie", "Jing Zhang", "Zhongang Qi", "Ying Shan", "Xiaohu Qie" ], "externalIds": { "DBLP": "conf/aaai/MouWXW0QS24", "ArXiv": "2302.08453", "DOI": "10.48550/arXiv.2302.08453", "CorpusId": 256900833 }, "url": "https://www.semanticscholar.org/paper/58842cdca3ea68f7b9e638b288fc247a6f26dafc", "referenceCount": 61, "citationCount": 603, "influentialCitationCount": 79, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Adding Conditional Control to Text-to-Image Diffusion Models", "abstract": "We present ControlNet, a neural network architecture to add spatial conditioning controls to large, pretrained text-to-image diffusion models. ControlNet locks the production-ready large diffusion models, and reuses their deep and robust encoding layers pretrained with billions of images as a strong backbone to learn a diverse set of conditional controls. The neural architecture is connected with \"zero convolutions\" (zero-initialized convolution layers) that progressively grow the parameters from zero and ensure that no harmful noise could affect the finetuning. We test various conditioning controls, e.g., edges, depth, segmentation, human pose, etc., with Stable Diffusion, using single or multiple conditions, with or without prompts. We show that the training of ControlNets is robust with small (<50k) and large (>1m) datasets. Extensive results show that ControlNet may facilitate wider applications to control image diffusion models.", "year": 2023, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Lvmin Zhang", "Anyi Rao", "Maneesh Agrawala" ], "externalIds": { "DBLP": "journals/corr/abs-2302-05543", "ArXiv": "2302.05543", "DOI": "10.1109/ICCV51070.2023.00355", "CorpusId": 256827727 }, "url": "https://www.semanticscholar.org/paper/efbe97d20c4ffe356e8826c01dc550bacc405add", "referenceCount": 119, "citationCount": 2416, "influentialCitationCount": 489, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Zero-Shot Image Restoration Using Denoising Diffusion Null-Space Model", "abstract": "Most existing Image Restoration (IR) models are task-specific, which can not be generalized to different degradation operators. In this work, we propose the Denoising Diffusion Null-Space Model (DDNM), a novel zero-shot framework for arbitrary linear IR problems, including but not limited to image super-resolution, colorization, inpainting, compressed sensing, and deblurring. DDNM only needs a pre-trained off-the-shelf diffusion model as the generative prior, without any extra training or network modifications. By refining only the null-space contents during the reverse diffusion process, we can yield diverse results satisfying both data consistency and realness. We further propose an enhanced and robust version, dubbed DDNM+, to support noisy restoration and improve restoration quality for hard tasks. Our experiments on several IR tasks reveal that DDNM outperforms other state-of-the-art zero-shot IR methods. We also demonstrate that DDNM+ can solve complex real-world applications, e.g., old photo restoration.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Yinhuai Wang", "Jiwen Yu", "Jian Zhang" ], "externalIds": { "ArXiv": "2212.00490", "DBLP": "journals/corr/abs-2212-00490", "DOI": "10.48550/arXiv.2212.00490", "CorpusId": 254125609 }, "url": "https://www.semanticscholar.org/paper/3a75ed3e9e81c9db573ef73d20e2c66c12aaedf8", "referenceCount": 44, "citationCount": 276, "influentialCitationCount": 51, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Privacy-Enhancing Optical Embeddings for Lensless Classification", "abstract": "Lensless imaging can provide visual privacy due to the highly multiplexed characteristic of its measurements. However, this alone is a weak form of security, as various adversarial attacks can be designed to invert the one-to-many scene mapping of such cameras. In this work, we enhance the privacy provided by lensless imaging by (1) downsampling at the sensor and (2) using a programmable mask with variable patterns as our optical encoder. We build a prototype from a low-cost LCD and Raspberry Pi components, for a total cost of around 100 USD. This very low price point allows our system to be deployed and lever-aged in a broad range of applications. In our experiments, we first demonstrate the viability and reconfigura-bility of our system by applying it to various classification tasks: MNIST, CelebA (face attributes), and CIFAR10. By jointly optimizing the mask pattern and a digital classifier in an end-to-end fashion, low-dimensional, privacy-enhancing embeddings are learned directly at the sensor. Secondly, we show how the proposed system, through variable mask patterns, can thwart adversaries that attempt to invert the system (1) via plaintext attacks or (2) in the event of camera parameters leaks. We demonstrate the defense of our system to both risks, with 55 % and 26 % drops in image quality metrics for attacks based on model-based convex optimization and generative neural networks respectively. We open-source a wave propagation and camera simulator needed for end-to-end optimization, the training software, and a library for interfacing with the camera.", "year": 2022, "venue": "arXiv.org", "authors": [ "Eric Bezzam", "M. Vetterli", "Matthieu Simeoni" ], "externalIds": { "ArXiv": "2211.12864", "DBLP": "journals/corr/abs-2211-12864", "DOI": "10.48550/arXiv.2211.12864", "CorpusId": 253801857 }, "url": "https://www.semanticscholar.org/paper/eebdc7d38d50f13c04b6e7026744f36e8892d9e5", "referenceCount": 56, "citationCount": 3, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Classifier-Free Diffusion Guidance", "abstract": "Classifier guidance is a recently introduced method to trade off mode coverage and sample fidelity in conditional diffusion models post training, in the same spirit as low temperature sampling or truncation in other types of generative models. Classifier guidance combines the score estimate of a diffusion model with the gradient of an image classifier and thereby requires training an image classifier separate from the diffusion model. It also raises the question of whether guidance can be performed without a classifier. We show that guidance can be indeed performed by a pure generative model without such a classifier: in what we call classifier-free guidance, we jointly train a conditional and an unconditional diffusion model, and we combine the resulting conditional and unconditional score estimates to attain a trade-off between sample quality and diversity similar to that obtained using classifier guidance.", "year": 2022, "venue": "arXiv.org", "authors": [ "Jonathan Ho" ], "externalIds": { "ArXiv": "2207.12598", "DBLP": "journals/corr/abs-2207-12598", "DOI": "10.48550/arXiv.2207.12598", "CorpusId": 249145348 }, "url": "https://www.semanticscholar.org/paper/af9f365ed86614c800f082bd8eb14be76072ad16", "referenceCount": 25, "citationCount": 2396, "influentialCitationCount": 313, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Exploring CLIP for Assessing the Look and Feel of Images", "abstract": "Measuring the perception of visual content is a long-standing problem in computer vision. Many mathematical models have been developed to evaluate the look or quality of an image. Despite the effectiveness of such tools in quantifying degradations such as noise and blurriness levels, such quantification is loosely coupled with human language. When it comes to more abstract perception about the feel of visual content, existing methods can only rely on supervised models that are explicitly trained with labeled data collected via laborious user study. In this paper, we go beyond the conventional paradigms by exploring the rich visual language prior encapsulated in Contrastive Language-Image Pre-training (CLIP) models for assessing both the quality perception (look) and abstract perception (feel) of images without explicit task-specific training. In particular, we discuss effective prompt designs and show an effective prompt pairing strategy to harness the prior. We also provide extensive experiments on controlled datasets and Image Quality Assessment (IQA) benchmarks. Our results show that CLIP captures meaningful priors that generalize well to different perceptual assessments.", "year": 2022, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Jianyi Wang", "Kelvin C. K. Chan", "Chen Change Loy" ], "externalIds": { "ArXiv": "2207.12396", "DBLP": "journals/corr/abs-2207-12396", "DOI": "10.48550/arXiv.2207.12396", "CorpusId": 251040466 }, "url": "https://www.semanticscholar.org/paper/03ae89e796b1a8b566ae1554fab65c8c88b3a55f", "referenceCount": 74, "citationCount": 242, "influentialCitationCount": 34, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Improving Diffusion Models for Inverse Problems using Manifold Constraints", "abstract": "Recently, diffusion models have been used to solve various inverse problems in an unsupervised manner with appropriate modifications to the sampling process. However, the current solvers, which recursively apply a reverse diffusion step followed by a projection-based measurement consistency step, often produce suboptimal results. By studying the generative sampling path, here we show that current solvers throw the sample path off the data manifold, and hence the error accumulates. To address this, we propose an additional correction term inspired by the manifold constraint, which can be used synergistically with the previous solvers to make the iterations close to the manifold. The proposed manifold constraint is straightforward to implement within a few lines of code, yet boosts the performance by a surprisingly large margin. With extensive experiments, we show that our method is superior to the previous methods both theoretically and empirically, producing promising results in many applications such as image inpainting, colorization, and sparse-view computed tomography. Code available https://github.com/HJ-harry/MCG_diffusion", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Hyungjin Chung", "Byeongsu Sim", "Dohoon Ryu", "J. C. Ye" ], "externalIds": { "DBLP": "conf/nips/ChungSRY22", "ArXiv": "2206.00941", "DOI": "10.48550/arXiv.2206.00941", "CorpusId": 249282628 }, "url": "https://www.semanticscholar.org/paper/b3f5cf32178bcbed91aa5303b70963c6463f48a2", "referenceCount": 63, "citationCount": 282, "influentialCitationCount": 35, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "MANIQA: Multi-dimension Attention Network for No-Reference Image Quality Assessment", "abstract": "No-Reference Image Quality Assessment (NR-IQA) aims to assess the perceptual quality of images in accordance with human subjective perception. Unfortunately, existing NR-IQA methods are far from meeting the needs of predicting accurate quality scores on GAN-based distortion images. To this end, we propose Multi-dimension Attention Network for no-reference Image Quality Assessment (MANIQA) to improve the performance on GAN-based distortion. We firstly extract features via ViT, then to strengthen global and local interactions, we propose the Transposed Attention Block (TAB) and the Scale Swin Transformer Block (SSTB). These two modules apply attention mechanisms across the channel and spatial dimension, respectively. In this multi-dimensional manner, the modules cooperatively increase the interaction among different regions of images globally and locally. Finally, a dual branch structure for patch-weighted quality prediction is applied to predict the final score depending on the weight of each patch’s score. Experimental results demonstrate that MANIQA outperforms state-of-the-art methods on four standard datasets (LIVE, TID2013, CSIQ, and KADID-10K) by a large margin. Besides, our method ranked first place in the final testing phase of the NTIRE 2022 Perceptual Image Quality Assessment Challenge Track 2: No-Reference. Codes and models are available at https://github.com/IIGROUP/MANIQA.", "year": 2022, "venue": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "authors": [ "Sidi Yang", "Tianhe Wu", "Shu Shi", "S. Gong", "Ming Cao", "Jiahao Wang", "Yujiu Yang" ], "externalIds": { "ArXiv": "2204.08958", "DBLP": "conf/cvpr/YangWSLGCWY22", "DOI": "10.1109/CVPRW56347.2022.00126", "CorpusId": 248240148 }, "url": "https://www.semanticscholar.org/paper/d2171803c7aae785f05e9f298f6025f4252262c6", "referenceCount": 61, "citationCount": 159, "influentialCitationCount": 20, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Unrolled Primal-Dual Networks for Lensless Cameras", "abstract": "Conventional models for lensless imaging assume that each measurement results from convolving a given scene with a single experimentally measured point-spread function. These models fail to simulate lensless cameras truthfully, as these models do not account for optical aberrations or scenes with depth variations. Our work shows that learning a supervised primal-dual reconstruction method results in image quality matching state of the art in the literature without demanding a large network capacity. We show that embedding learnable forward and adjoint models improves the reconstruction quality of lensless images (+5dB PSNR) compared to works that assume a fixed point-spread function.", "year": 2022, "venue": "Optics Express", "authors": [ "Oliver Kingshott", "N. Antipa", "E. Bostan", "K. Akşit" ], "externalIds": { "DBLP": "journals/corr/abs-2203-04353", "ArXiv": "2203.04353", "DOI": "10.1364/OE.475521", "CorpusId": 247318710, "PubMed": "36558589" }, "url": "https://www.semanticscholar.org/paper/198d958ff333b9368e7c7038a0715643a98582d0", "referenceCount": 32, "citationCount": 7, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering", "Medicine" ] }, { "title": "Denoising Diffusion Restoration Models", "abstract": "Many interesting tasks in image restoration can be cast as linear inverse problems. A recent family of approaches for solving these problems uses stochastic algorithms that sample from the posterior distribution of natural images given the measurements. However, efficient solutions often require problem-specific supervised training to model the posterior, whereas unsupervised methods that are not problem-specific typically rely on inefficient iterative methods. This work addresses these issues by introducing Denoising Diffusion Restoration Models (DDRM), an efficient, unsupervised posterior sampling method. Motivated by variational inference, DDRM takes advantage of a pre-trained denoising diffusion generative model for solving any linear inverse problem. We demonstrate DDRM's versatility on several image datasets for super-resolution, deblurring, inpainting, and colorization under various amounts of measurement noise. DDRM outperforms the current leading unsupervised methods on the diverse ImageNet dataset in reconstruction quality, perceptual quality, and runtime, being 5x faster than the nearest competitor. DDRM also generalizes well for natural images out of the distribution of the observed ImageNet training set.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Bahjat Kawar", "Michael Elad", "Stefano Ermon", "Jiaming Song" ], "externalIds": { "ArXiv": "2201.11793", "DBLP": "journals/corr/abs-2201-11793", "CorpusId": 246411364 }, "url": "https://www.semanticscholar.org/paper/3d3c5fcbc40aadccceda58d3d9c5cd00588ea0b7", "referenceCount": 70, "citationCount": 552, "influentialCitationCount": 73, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "High-Resolution Image Synthesis with Latent Diffusion Models", "abstract": "By decomposing the image formation process into a sequential application of denoising autoencoders, diffusion models (DMs) achieve state-of-the-art synthesis results on image data and beyond. Additionally, their formulation allows for a guiding mechanism to control the image generation process without retraining. However, since these models typically operate directly in pixel space, optimization of powerful DMs often consumes hundreds of GPU days and inference is expensive due to sequential evaluations. To enable DM training on limited computational resources while retaining their quality and flexibility, we apply them in the latent space of powerful pretrained autoencoders. In contrast to previous work, training diffusion models on such a representation allows for the first time to reach a near-optimal point between complexity reduction and detail preservation, greatly boosting visual fidelity. By introducing cross-attention layers into the model architecture, we turn diffusion models into powerful and flexible generators for general conditioning inputs such as text or bounding boxes and high-resolution synthesis becomes possible in a convolutional manner. Our latent diffusion models (LDMs) achieve new state of the art scores for image inpainting and class-conditional image synthesis and highly competitive performance on various tasks, including unconditional image generation, text-to-image synthesis, and super-resolution, while significantly reducing computational requirements compared to pixel-based DMs.", "year": 2021, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Robin Rombach", "A. Blattmann", "Dominik Lorenz", "Patrick Esser", "B. Ommer" ], "externalIds": { "ArXiv": "2112.10752", "DBLP": "journals/corr/abs-2112-10752", "DOI": "10.1109/CVPR52688.2022.01042", "CorpusId": 245335280 }, "url": "https://www.semanticscholar.org/paper/c10075b3746a9f3dd5811970e93c8ca3ad39b39d", "referenceCount": 110, "citationCount": 9847, "influentialCitationCount": 2744, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep learning for fast spatially-varying deconvolution", "abstract": null, "year": 2021, "venue": "Optica", "authors": [ "Kyrollos Yanny", "Kristina Monakhova", "Richard W. Shuai", "L. Waller" ], "externalIds": { "DOI": "10.1364/optica.442438", "CorpusId": 245113663 }, "url": "https://www.semanticscholar.org/paper/10b81abc93f1ea7d6615e365452edbb1670dd3b5", "referenceCount": 0, "citationCount": 60, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "Solving Inverse Problems in Medical Imaging with Score-Based Generative Models", "abstract": "Reconstructing medical images from partial measurements is an important inverse problem in Computed Tomography (CT) and Magnetic Resonance Imaging (MRI). Existing solutions based on machine learning typically train a model to directly map measurements to medical images, leveraging a training dataset of paired images and measurements. These measurements are typically synthesized from images using a fixed physical model of the measurement process, which hinders the generalization capability of models to unknown measurement processes. To address this issue, we propose a fully unsupervised technique for inverse problem solving, leveraging the recently introduced score-based generative models. Specifically, we first train a score-based generative model on medical images to capture their prior distribution. Given measurements and a physical model of the measurement process at test time, we introduce a sampling method to reconstruct an image consistent with both the prior and the observed measurements. Our method does not assume a fixed measurement process during training, and can thus be flexibly adapted to different measurement processes at test time. Empirically, we observe comparable or better performance to supervised learning techniques in several medical imaging tasks in CT and MRI, while demonstrating significantly better generalization to unknown measurement processes.", "year": 2021, "venue": "International Conference on Learning Representations", "authors": [ "Yang Song", "Liyue Shen", "Lei Xing", "Stefano Ermon" ], "externalIds": { "ArXiv": "2111.08005", "DBLP": "conf/iclr/0011S0E22", "CorpusId": 244130146 }, "url": "https://www.semanticscholar.org/paper/49f6dbf4ead6a8a3d26f9cf218a654f2f3d1d896", "referenceCount": 52, "citationCount": 386, "influentialCitationCount": 40, "isOpenAccess": false, "fieldsOfStudy": [ "Engineering", "Computer Science", "Mathematics" ] }, { "title": "Palette: Image-to-Image Diffusion Models", "abstract": "This paper develops a unified framework for image-to-image translation based on conditional diffusion models and evaluates this framework on four challenging image-to-image translation tasks, namely colorization, inpainting, uncropping, and JPEG restoration. Our simple implementation of image-to-image diffusion models outperforms strong GAN and regression baselines on all tasks, without task-specific hyper-parameter tuning, architecture customization, or any auxiliary loss or sophisticated new techniques needed. We uncover the impact of an L2 vs. L1 loss in the denoising diffusion objective on sample diversity, and demonstrate the importance of self-attention in the neural architecture through empirical studies. Importantly, we advocate a unified evaluation protocol based on ImageNet, with human evaluation and sample quality scores (FID, Inception Score, Classification Accuracy of a pre-trained ResNet-50, and Perceptual Distance against original images). We expect this standardized evaluation protocol to play a role in advancing image-to-image translation research. Finally, we show that a generalist, multi-task diffusion model performs as well or better than task-specific specialist counterparts. Check out https://diffusion-palette.github.io/ for an overview of the results and code.", "year": 2021, "venue": "International Conference on Computer Graphics and Interactive Techniques", "authors": [ "Chitwan Saharia", "William Chan", "Huiwen Chang", "Chris A. Lee", "Jonathan Ho", "Tim Salimans", "David J. Fleet", "Mohammad Norouzi" ], "externalIds": { "DBLP": "journals/corr/abs-2111-05826", "ArXiv": "2111.05826", "DOI": "10.1145/3528233.3530757", "CorpusId": 243938678 }, "url": "https://www.semanticscholar.org/paper/37c9c4e7648f639c0b36f150fc6c6c90b3682f4a", "referenceCount": 114, "citationCount": 1199, "influentialCitationCount": 71, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MUSIQ: Multi-scale Image Quality Transformer", "abstract": "Image quality assessment (IQA) is an important research topic for understanding and improving visual experience. The current state-of-the-art IQA methods are based on convolutional neural networks (CNNs). The performance of CNN-based models is often compromised by the fixed shape constraint in batch training. To accommodate this, the input images are usually resized and cropped to a fixed shape, causing image quality degradation. To address this, we design a multi-scale image quality Transformer (MUSIQ) to process native resolution images with varying sizes and aspect ratios. With a multi-scale image representation, our proposed method can capture image quality at different granularities. Furthermore, a novel hash-based 2D spatial embedding and a scale embedding is proposed to support the positional embedding in the multi-scale representation. Experimental results verify that our method can achieve state-of-the-art performance on multiple large scale IQA datasets such as PaQ-2-PiQ [41], SPAQ [11], and KonIQ-10k [16]. 1", "year": 2021, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Junjie Ke", "Qifei Wang", "Yilin Wang", "P. Milanfar", "Feng Yang" ], "externalIds": { "DBLP": "journals/corr/abs-2108-05997", "ArXiv": "2108.05997", "DOI": "10.1109/ICCV48922.2021.00510", "CorpusId": 237048383 }, "url": "https://www.semanticscholar.org/paper/e3d06054af531ee2f42270d43100b309c28546ef", "referenceCount": 53, "citationCount": 359, "influentialCitationCount": 51, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ILVR: Conditioning Method for Denoising Diffusion Probabilistic Models", "abstract": "Denoising diffusion probabilistic models (DDPM) have shown remarkable performance in unconditional image generation. However, due to the stochasticity of the generative process in DDPM, it is challenging to generate images with the desired semantics. In this work, we propose Iterative Latent Variable Refinement (ILVR), a method to guide the generative process in DDPM to generate high-quality images based on a given reference image. Here, the refinement of the generative process in DDPM enables a single DDPM to sample images from various sets directed by the reference image. The proposed ILVR method generates high-quality images while controlling the generation. The controllability of our method allows adaptation of a single DDPM without any additional learning in various image generation tasks, such as generation from various downsampling factors, multi-domain image translation, paint-to-image, and editing with scribbles.", "year": 2021, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Jooyoung Choi", "Sungwon Kim", "Yonghyun Jeong", "Youngjune Gwon", "Sungroh Yoon" ], "externalIds": { "DBLP": "journals/corr/abs-2108-02938", "ArXiv": "2108.02938", "DOI": "10.1109/iccv48922.2021.01410", "CorpusId": 236950721 }, "url": "https://www.semanticscholar.org/paper/cda3fbbac6734b603bee363b0938e9baa924aa78", "referenceCount": 59, "citationCount": 550, "influentialCitationCount": 54, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Image Super-Resolution via Iterative Refinement", "abstract": "We present SR3, an approach to image Super-Resolution via Repeated Refinement. SR3 adapts denoising diffusion probabilistic models (Ho et al. 2020), (Sohl-Dickstein et al. 2015) to image-to-image translation, and performs super-resolution through a stochastic iterative denoising process. Output images are initialized with pure Gaussian noise and iteratively refined using a U-Net architecture that is trained on denoising at various noise levels, conditioned on a low-resolution input image. SR3 exhibits strong performance on super-resolution tasks at different magnification factors, on faces and natural images. We conduct human evaluation on a standard 8× face super-resolution task on CelebA-HQ for which SR3 achieves a fool rate close to 50%, suggesting photo-realistic outputs, while GAN baselines do not exceed a fool rate of 34%. We evaluate SR3 on a 4× super-resolution task on ImageNet, where SR3 outperforms baselines in human evaluation and classification accuracy of a ResNet-50 classifier trained on high-resolution images. We further show the effectiveness of SR3 in cascaded image generation, where a generative model is chained with super-resolution models to synthesize high-resolution images with competitive FID scores on the class-conditional 256×256 ImageNet generation challenge.", "year": 2021, "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", "authors": [ "Chitwan Saharia", "Jonathan Ho", "William Chan", "Tim Salimans", "David J. Fleet", "Mohammad Norouzi" ], "externalIds": { "DBLP": "journals/corr/abs-2104-07636", "ArXiv": "2104.07636", "DOI": "10.1109/TPAMI.2022.3204461", "CorpusId": 233241040, "PubMed": "36094974" }, "url": "https://www.semanticscholar.org/paper/8a1ea7b6e7e834d146ad782be5d63f57f806a9cc", "referenceCount": 74, "citationCount": 1305, "influentialCitationCount": 130, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering", "Medicine" ] }, { "title": "FlatNet: Towards Photorealistic Scene Reconstruction From Lensless Measurements", "abstract": "Lensless imaging has emerged as a potential solution towards realizing ultra-miniature cameras by eschewing the bulky lens in a traditional camera. Without a focusing lens, the lensless cameras rely on computational algorithms to recover the scenes from multiplexed measurements. However, the current iterative-optimization-based reconstruction algorithms produce noisier and perceptually poorer images. In this work, we propose a non-iterative deep learning-based reconstruction approach that results in orders of magnitude improvement in image quality for lensless reconstructions. Our approach, called FlatNet, lays down a framework for reconstructing high-quality photorealistic images from mask-based lensless cameras, where the camera's forward model formulation is known. FlatNet consists of two stages: (1) an inversion stage that maps the measurement into a space of intermediate reconstruction by learning parameters within the forward model formulation, and (2) a perceptual enhancement stage that improves the perceptual quality of this intermediate reconstruction. These stages are trained together in an end-to-end manner. We show high-quality reconstructions by performing extensive experiments on real and challenging scenes using two different types of lensless prototypes: one which uses a separable forward model and another, which uses a more general non-separable cropped-convolution model. Our end-to-end approach is fast, produces photorealistic reconstructions, and is easy to adopt for other mask-based lensless cameras.", "year": 2020, "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", "authors": [ "Salman Siddique Khan", "Varun Sundar", "Vivek Boominathan", "A. Veeraraghavan", "K. Mitra" ], "externalIds": { "ArXiv": "2010.15440", "DBLP": "journals/corr/abs-2010-15440", "MAG": "3094016573", "DOI": "10.1109/TPAMI.2020.3033882", "CorpusId": 225083554, "PubMed": "33104508" }, "url": "https://www.semanticscholar.org/paper/b2e2db4a4769fc76aba67422990a3f8c2c0cc6b8", "referenceCount": 46, "citationCount": 51, "influentialCitationCount": 9, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine", "Engineering" ] }, { "title": "Miniscope3D: optimized single-shot miniature 3D fluorescence microscopy", "abstract": null, "year": 2020, "venue": "Light: Science & Applications", "authors": [ "Kyrollos Yanny", "N. Antipa", "W. Liberti", "S. Dehaeck", "Kristina Monakhova", "F. Liu", "Konlin Shen", "Ren Ng", "L. Waller" ], "externalIds": { "MAG": "3089710967", "PubMedCentral": "7532148", "DBLP": "journals/corr/abs-2010-05382", "ArXiv": "2010.05382", "DOI": "10.1038/s41377-020-00403-7", "CorpusId": 222112371, "PubMed": "33082940" }, "url": "https://www.semanticscholar.org/paper/c6eac8a767e2930db71294f36e3190f1b44469d4", "referenceCount": 37, "citationCount": 100, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Materials Science", "Computer Science", "Engineering", "Physics" ] }, { "title": "Denoising Diffusion Probabilistic Models", "abstract": "We present high quality image synthesis results using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics. Our best results are obtained by training on a weighted variational bound designed according to a novel connection between diffusion probabilistic models and denoising score matching with Langevin dynamics, and our models naturally admit a progressive lossy decompression scheme that can be interpreted as a generalization of autoregressive decoding. On the unconditional CIFAR10 dataset, we obtain an Inception score of 9.46 and a state-of-the-art FID score of 3.17. On 256x256 LSUN, we obtain sample quality similar to ProgressiveGAN. Our implementation is available at this https URL", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Jonathan Ho", "Ajay Jain", "P. Abbeel" ], "externalIds": { "ArXiv": "2006.11239", "MAG": "3100572490", "DBLP": "journals/corr/abs-2006-11239", "CorpusId": 219955663 }, "url": "https://www.semanticscholar.org/paper/5c126ae3421f05768d8edd97ecd44b1364e2c99a", "referenceCount": 73, "citationCount": 10778, "influentialCitationCount": 2337, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "PhlatCam: Designed Phase-Mask Based Thin Lensless Camera", "abstract": "We demonstrate a versatile thin lensless camera with a designed phase-mask placed at sub-2 mm from an imaging CMOS sensor. Using wave optics and phase retrieval methods, we present a general-purpose framework to create phase-masks that achieve desired sharp point-spread-functions (PSFs) for desired camera thicknesses. From a single 2D encoded measurement, we show the reconstruction of high-resolution 2D images, computational refocusing, and 3D imaging. This ability is made possible by our proposed high-performance contour-based PSF. The heuristic contour-based PSF is designed using concepts in signal processing to achieve maximal information transfer to a bit-depth limited sensor. Due to the efficient coding, we can use fast linear methods for high-quality image reconstructions and switch to iterative nonlinear methods for higher fidelity reconstructions and 3D imaging.", "year": 2020, "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", "authors": [ "Vivek Boominathan", "Jesse K. Adams", "Jacob T. Robinson", "A. Veeraraghavan" ], "externalIds": { "DBLP": "journals/pami/BoominathanARV20", "MAG": "3019205968", "DOI": "10.1109/TPAMI.2020.2987489", "CorpusId": 216107941, "PubMed": "32324539" }, "url": "https://www.semanticscholar.org/paper/548b73cd341f963f905093b950145a8155417f16", "referenceCount": 49, "citationCount": 77, "influentialCitationCount": 8, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "On-chip fluorescence microscopy with a random microlens diffuser.", "abstract": "We present an on-chip, widefield fluorescence microscope, which consists of a diffuser placed a few millimeters away from a traditional image sensor. The diffuser replaces the optics of a microscope, resulting in a compact and easy-to-assemble system with a practical working distance of over 1.5 mm. Furthermore, the diffuser encodes volumetric information, enabling refocusability in post-processing and three-dimensional (3D) imaging of sparse samples from a single acquisition. Reconstruction of images from the raw data requires a precise model of the system, so we introduce a practical calibration scheme and a physics-based forward model to efficiently account for the spatially-varying point spread function (PSF). To improve performance in low-light, we propose a random microlens diffuser, which consists of many small lenslets randomly placed on the mask surface and yields PSFs that are robust to noise. We build an experimental prototype and demonstrate our system on both planar and 3D samples.", "year": 2020, "venue": "Optics Express", "authors": [ "Grace Kuo", "Fanglin Linda Liu", "Irene Grossrubatscher", "Ren Ng", "L. Waller" ], "externalIds": { "MAG": "3005494017", "DOI": "10.1364/oe.382055", "CorpusId": 213727591, "PubMed": "32225465" }, "url": "https://www.semanticscholar.org/paper/afaf5649fb69ecf8f6f190115c489161b5d94dcf", "referenceCount": 31, "citationCount": 41, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Physics" ] }, { "title": "Learned reconstructions for practical mask-based lensless imaging", "abstract": "Mask-based lensless imagers are smaller and lighter than traditional lensed cameras. In these imagers, the sensor does not directly record an image of the scene; rather, a computational algorithm reconstructs it. Typically, mask-based lensless imagers use a model-based reconstruction approach that suffers from long compute times and a heavy reliance on both system calibration and heuristically chosen denoisers. In this work, we address these limitations using a bounded-compute, trainable neural network to reconstruct the image. We leverage our knowledge of the physical system by unrolling a traditional model-based optimization algorithm, whose parameters we optimize using experimentally gathered ground-truth data. Optionally, images produced by the unrolled network are then fed into a jointly-trained denoiser. As compared to traditional methods, our architecture achieves better perceptual image quality and runs 20× faster, enabling interactive previewing of the scene. We explore a spectrum between model-based and deep learning methods, showing the benefits of using an intermediate approach. Finally, we test our network on images taken in the wild with a prototype mask-based camera, demonstrating that our network generalizes to natural images.", "year": 2019, "venue": "Optics Express", "authors": [ "Kristina Monakhova", "Joshua Yurtsever", "Grace Kuo", "N. Antipa", "Kyrollos Yanny", "L. Waller" ], "externalIds": { "MAG": "2974677603", "ArXiv": "1908.11502", "DBLP": "journals/corr/abs-1908-11502", "DOI": "10.1364/OE.27.028075", "CorpusId": 201698397, "PubMed": "31684566" }, "url": "https://www.semanticscholar.org/paper/fb162635dff4ef25313f486449be97d4ab9ee363", "referenceCount": 29, "citationCount": 97, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Physics", "Engineering", "Medicine" ] }, { "title": "Generative Modeling by Estimating Gradients of the Data Distribution", "abstract": "We introduce a new generative model where samples are produced via Langevin dynamics using gradients of the data distribution estimated with score matching. Because gradients can be ill-defined and hard to estimate when the data resides on low-dimensional manifolds, we perturb the data with different levels of Gaussian noise, and jointly estimate the corresponding scores, i.e., the vector fields of gradients of the perturbed data distribution for all noise levels. For sampling, we propose an annealed Langevin dynamics where we use gradients corresponding to gradually decreasing noise levels as the sampling process gets closer to the data manifold. Our framework allows flexible model architectures, requires no sampling during training or the use of adversarial methods, and provides a learning objective that can be used for principled model comparisons. Our models produce samples comparable to GANs on MNIST, CelebA and CIFAR-10 datasets, achieving a new state-of-the-art inception score of 8.87 on CIFAR-10. Additionally, we demonstrate that our models learn effective representations via image inpainting experiments.", "year": 2019, "venue": "Neural Information Processing Systems", "authors": [ "Yang Song", "Stefano Ermon" ], "externalIds": { "MAG": "2971034910", "ArXiv": "1907.05600", "DBLP": "conf/nips/SongE19", "CorpusId": 196470871 }, "url": "https://www.semanticscholar.org/paper/965359b3008ab50dd04e171551220ec0e7f83aba", "referenceCount": 68, "citationCount": 2763, "influentialCitationCount": 330, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Deep null space learning for inverse problems: convergence analysis and rates", "abstract": "Recently, deep learning based methods appeared as a new paradigm for solving inverse problems. These methods empirically show excellent performance but lack of theoretical justification; in particular, no results on the regularization properties are available. In particular, this is the case for two-step deep learning approaches, where a classical reconstruction method is applied to the data in a first step and a trained deep neural network is applied to improve results in a second step. In this paper, we close the gap between practice and theory for a particular network structure in a two-step approach. For that purpose, we propose using so-called null space networks and introduce the concept of -regularization. Combined with a standard regularization method as reconstruction layer, the proposed deep null space learning approach is shown to be a -regularization method; convergence rates are also derived. The proposed null space network structure naturally preserves data consistency which is considered as key property of neural networks for solving inverse problems.", "year": 2018, "venue": "Inverse Problems", "authors": [ "Johannes Schwab", "Stephan Antholzer", "M. Haltmeier" ], "externalIds": { "MAG": "2808122338", "ArXiv": "1806.06137", "DOI": "10.1088/1361-6420/aaf14a", "CorpusId": 119152310 }, "url": "https://www.semanticscholar.org/paper/954c88092b3edffa1768f4a77b94677e2781d2ad", "referenceCount": 30, "citationCount": 102, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Physics" ] }, { "title": "The Unreasonable Effectiveness of Deep Features as a Perceptual Metric", "abstract": "While it is nearly effortless for humans to quickly assess the perceptual similarity between two images, the underlying processes are thought to be quite complex. Despite this, the most widely used perceptual metrics today, such as PSNR and SSIM, are simple, shallow functions, and fail to account for many nuances of human perception. Recently, the deep learning community has found that features of the VGG network trained on ImageNet classification has been remarkably useful as a training loss for image synthesis. But how perceptual are these so-called \"perceptual losses\"? What elements are critical for their success? To answer these questions, we introduce a new dataset of human perceptual similarity judgments. We systematically evaluate deep features across different architectures and tasks and compare them with classic metrics. We find that deep features outperform all previous metrics by large margins on our dataset. More surprisingly, this result is not restricted to ImageNet-trained VGG features, but holds across different deep architectures and levels of supervision (supervised, self-supervised, or even unsupervised). Our results suggest that perceptual similarity is an emergent property shared across deep visual representations.", "year": 2018, "venue": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition", "authors": [ "Richard Zhang", "Phillip Isola", "Alexei A. Efros", "Eli Shechtman", "Oliver Wang" ], "externalIds": { "ArXiv": "1801.03924", "MAG": "2783879794", "DBLP": "journals/corr/abs-1801-03924", "DOI": "10.1109/CVPR.2018.00068", "CorpusId": 4766599 }, "url": "https://www.semanticscholar.org/paper/c468bbde6a22d961829e1970e6ad5795e05418d1", "referenceCount": 71, "citationCount": 8862, "influentialCitationCount": 1545, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DiffuserCam: Lensless Single-exposure 3D Imaging", "abstract": "We demonstrate a compact and easy-to-build computational camera for single-shot 3D imaging. Our lensless system consists solely of a diffuser placed in front of a standard image sensor. Every point within the volumetric field-of-view projects a unique pseudorandom pattern of caustics on the sensor. By using a physical approximation and simple calibration scheme, we solve the large-scale inverse problem in a computationally efficient way. The caustic patterns enable compressed sensing, which exploits sparsity in the sample to solve for more 3D voxels than pixels on the 2D sensor. Our 3D voxel grid is chosen to match the experimentally measured two-point optical resolution across the field-of-view, resulting in 100 million voxels being reconstructed from a single 1.3 megapixel image. However, the effective resolution varies significantly with scene content. Because this effect is common to a wide range of computational cameras, we provide new theory for analyzing resolution in such systems.", "year": 2017, "venue": "arXiv.org", "authors": [ "N. Antipa", "Grace Kuo", "Reinhard Heckel", "B. Mildenhall", "E. Bostan", "Ren Ng", "L. Waller" ], "externalIds": { "MAG": "2762946633", "DBLP": "journals/corr/abs-1710-02134", "ArXiv": "1710.02134", "DOI": "10.1364/OPTICA.5.000001", "CorpusId": 24679393 }, "url": "https://www.semanticscholar.org/paper/9cffc0c80b1ae3c1b773b761f37c66e58890639e", "referenceCount": 43, "citationCount": 349, "influentialCitationCount": 26, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FlatCam: Thin, Lensless Cameras Using Coded Aperture and Computation", "abstract": "FlatCam is a thin form-factor lensless camera that consists of a coded mask placed on top of a bare, conventional sensor array. Unlike a traditional, lens-based camera, where an image of the scene is directly recorded on the sensor pixels, each pixel in FlatCam records a linear combination of light from multiple scene elements. A computational algorithm is then used to demultiplex the recorded measurements and reconstruct an image of the scene. FlatCam is an instance of a coded aperture imaging system; however, unlike the vast majority of related work, we place the coded mask extremely close to the image sensor that enables thin and flat form-factor imaging devices. We employ a separable mask to ensure that both calibration and image reconstruction are scalable in terms of memory requirements and computational complexity. We demonstrate the potential of the FlatCam design using two prototypes: one at visible wavelengths and one at infrared wavelengths.", "year": 2017, "venue": "IEEE Transactions on Computational Imaging", "authors": [ "M. Salman Asif", "A. Ayremlou", "Aswin C. Sankaranarayanan", "A. Veeraraghavan", "Richard Baraniuk" ], "externalIds": { "MAG": "2487564216", "DBLP": "journals/tci/AsifASVB17", "DOI": "10.1109/TCI.2016.2593662", "CorpusId": 3797472 }, "url": "https://www.semanticscholar.org/paper/8e219746bed1e55c3e0f866ac9fe09219730b9f7", "referenceCount": 41, "citationCount": 160, "influentialCitationCount": 15, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Image restoration for three-dimensional fluorescence microscopy using an orthonormal basis for efficient representation of depth-variant point-spread functions.", "abstract": "A depth-variant (DV) image restoration algorithm for wide field fluorescence microscopy, using an orthonormal basis decomposition of DV point-spread functions (PSFs), is investigated in this study. The efficient PSF representation is based on a previously developed principal component analysis (PCA), which is computationally intensive. We present an approach developed to reduce the number of DV PSFs required for the PCA computation, thereby making the PCA-based approach computationally tractable for thick samples. Restoration results from both synthetic and experimental images show consistency and that the proposed algorithm addresses efficiently depth-induced aberration using a small number of principal components. Comparison of the PCA-based algorithm with a previously-developed strata-based DV restoration algorithm demonstrates that the proposed method improves performance by 50% in terms of accuracy and simultaneously reduces the processing time by 64% using comparable computational resources.", "year": 2015, "venue": "Biomedical Optics Express", "authors": [ "N. Patwary", "C. Preza" ], "externalIds": { "MAG": "2467927714", "DOI": "10.1364/BOE.6.003826", "CorpusId": 21846062, "PubMed": "26504634" }, "url": "https://www.semanticscholar.org/paper/941bea45eb0ad9daeef299896b4b0da6c46b9a62", "referenceCount": 25, "citationCount": 30, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "U-Net: Convolutional Networks for Biomedical Image Segmentation", "abstract": null, "year": 2015, "venue": "International Conference on Medical Image Computing and Computer-Assisted Intervention", "authors": [ "O. Ronneberger", "P. Fischer", "T. Brox" ], "externalIds": { "ArXiv": "1505.04597", "MAG": "1901129140", "DBLP": "journals/corr/RonnebergerFB15", "DOI": "10.1007/978-3-319-24574-4_28", "CorpusId": 3719281 }, "url": "https://www.semanticscholar.org/paper/6364fdaa0a0eccd823a779fcdd489173f938e91a", "referenceCount": 18, "citationCount": 66494, "influentialCitationCount": 9274, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "An efficient augmented Lagrangian method with applications to total variation minimization", "abstract": null, "year": 2013, "venue": "Computational optimization and applications", "authors": [ "Chengbo Li", "W. Yin", "Hong Jiang", "Yin Zhang" ], "externalIds": { "DBLP": "journals/coap/LiYJZ13", "MAG": "2130120519", "DOI": "10.1007/s10589-013-9576-1", "CorpusId": 12859529 }, "url": "https://www.semanticscholar.org/paper/9eab80f0c345cee51ca27f0b41a71a3f5e6b2c9b", "referenceCount": 46, "citationCount": 541, "influentialCitationCount": 84, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Fluorescence microscopy three-dimensional depth variant point spread function interpolation using Zernike moments", "abstract": "In three-dimensional fluorescence microscopy the point spread function (PSF) changes with depth, inducing errors in the restored images when these variations are neglected during the deconvolution of thick specimens. Some deconvolution algorithms have been developed to take the depth variations of the PSF into consideration. For these algorithms, the accuracy of the estimated structures depends on the knowledge of the PSF at various depths. We propose an alternative to measuring all required PSFs at different depths. The needed PSFs are interpolated from a limited measured PSF set using a method based on Zernike moments. The proposed method offers the possibility to obtain an accurate PSF interpolation at different depths using only a few measured ones.", "year": 2011, "venue": "", "authors": [ "Elie Maalouf", "B. Colicchio", "A. Dieterlen" ], "externalIds": { "MAG": "2148082545", "DOI": "10.1364/JOSAA.28.001864", "CorpusId": 2403801 }, "url": "https://www.semanticscholar.org/paper/fd1499df9c5e96ee9b149d98d6dd3967b11e574a", "referenceCount": 32, "citationCount": 35, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Physics" ] }, { "title": "P2C2: Programmable pixel compressive camera for high speed imaging", "abstract": "We describe an imaging architecture for compressive video sensing termed programmable pixel compressive camera (P2C2). P2C2 allows us to capture fast phenomena at frame rates higher than the camera sensor. In P2C2, each pixel has an independent shutter that is modulated at a rate higher than the camera frame-rate. The observed intensity at a pixel is an integration of the incoming light modulated by its specific shutter. We propose a reconstruction algorithm that uses the data from P2C2 along with additional priors about videos to perform temporal super-resolution. We model the spatial redundancy of videos using sparse representations and the temporal redundancy using brightness constancy constraints inferred via optical flow. We show that by modeling such spatio-temporal redundancies in a video volume, one can faithfully recover the underlying high-speed video frames from the observed low speed coded video. The imaging architecture and the reconstruction algorithm allows us to achieve temporal super-resolution without loss in spatial resolution. We implement a prototype of P2C2 using an LCOS modulator and recover several videos at 200 fps using a 25 fps camera.", "year": 2011, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Dikpal Reddy", "A. Veeraraghavan", "R. Chellappa" ], "externalIds": { "MAG": "2092680585", "DBLP": "conf/cvpr/ReddyVC11", "DOI": "10.1109/CVPR.2011.5995542", "CorpusId": 6983152 }, "url": "https://www.semanticscholar.org/paper/7315aa6450a53b3592347677cabbc659208795d6", "referenceCount": 25, "citationCount": 236, "influentialCitationCount": 20, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Distributed Optimization and Statistical Learning via the Alternating Direction Method of Multipliers", "abstract": "Many problems of recent interest in statistics and machine learning can be posed in the framework of convex optimization. Due to the explosion in size and complexity of modern datasets, it is increasingly important to be able to solve problems with a very large number of features or training examples. As a result, both the decentralized collection or storage of these datasets as well as accompanying distributed solution methods are either necessary or at least highly desirable. In this review, we argue that the alternating direction method of multipliers is well suited to distributed convex optimization, and in particular to large-scale problems arising in statistics, machine learning, and related areas. The method was developed in the 1970s, with roots in the 1950s, and is equivalent or closely related to many other algorithms, such as dual decomposition, the method of multipliers, Douglas–Rachford splitting, Spingarn's method of partial inverses, Dykstra's alternating projections, Bregman iterative algorithms for l1 problems, proximal methods, and others. After briefly surveying the theory and history of the algorithm, we discuss applications to a wide variety of statistical and machine learning problems of recent interest, including the lasso, sparse logistic regression, basis pursuit, covariance selection, support vector machines, and many others. We also discuss general distributed optimization, extensions to the nonconvex setting, and efficient implementation, including some details on distributed MPI and Hadoop MapReduce implementations.", "year": 2011, "venue": "Found. Trends Mach. Learn.", "authors": [ "Stephen P. Boyd", "Neal Parikh", "Eric Chu", "Borja Peleato", "Jonathan Eckstein" ], "externalIds": { "DBLP": "journals/ftml/BoydPCPE11", "MAG": "2164278908", "CorpusId": 51789432 }, "url": "https://www.semanticscholar.org/paper/85e4dbcff0b63773db298562ae3fff258eea195f", "referenceCount": 183, "citationCount": 4810, "influentialCitationCount": 961, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Parallel Product-Convolution approach for representing the depth varying Point Spread Functions in 3D widefield microscopy based on principal component analysis.", "abstract": "We address the problem of computational representation of image formation in 3D widefield fluorescence microscopy with depth varying spherical aberrations. We first represent 3D depth-dependent point spread functions (PSFs) as a weighted sum of basis functions that are obtained by principal component analysis (PCA) of experimental data. This representation is then used to derive an approximating structure that compactly expresses the depth variant response as a sum of few depth invariant convolutions pre-multiplied by a set of 1D depth functions, where the convolving functions are the PCA-derived basis functions. The model offers an efficient and convenient trade-off between complexity and accuracy. For a given number of approximating PSFs, the proposed method results in a much better accuracy than the strata based approximation scheme that is currently used in the literature. In addition to yielding better accuracy, the proposed methods automatically eliminate the noise in the measured PSFs.", "year": 2010, "venue": "Optics Express", "authors": [ "M. Arigovindan", "J. Shaevitz", "J. Mcgowan", "J. Sedat", "D. Agard" ], "externalIds": { "MAG": "2114463330", "DOI": "10.1364/OE.18.006461", "CorpusId": 263600666, "PubMed": "20389670" }, "url": "https://www.semanticscholar.org/paper/5ae7cd4cb33bd0bb504dbf77b613e718ae08d8d4", "referenceCount": 10, "citationCount": 11, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Image quality assessment: from error visibility to structural similarity", "abstract": "Objective methods for assessing perceptual image quality traditionally attempted to quantify the visibility of errors (differences) between a distorted image and a reference image using a variety of known properties of the human visual system. Under the assumption that human visual perception is highly adapted for extracting structural information from a scene, we introduce an alternative complementary framework for quality assessment based on the degradation of structural information. As a specific example of this concept, we develop a structural similarity index and demonstrate its promise through a set of intuitive examples, as well as comparison to both subjective ratings and state-of-the-art objective methods on a database of images compressed with JPEG and JPEG2000. A MATLAB implementation of the proposed algorithm is available online at http://www.cns.nyu.edu//spl sim/lcv/ssim/.", "year": 2004, "venue": "IEEE Transactions on Image Processing", "authors": [ "Zhou Wang", "A. Bovik", "H. Sheikh", "Eero P. Simoncelli" ], "externalIds": { "MAG": "2133665775", "DBLP": "journals/tip/WangBSS04", "DOI": "10.1109/TIP.2003.819861", "CorpusId": 207761262, "PubMed": "15376593" }, "url": "https://www.semanticscholar.org/paper/eae2e0fa72e898c289365c0af16daf57a7a6cf40", "referenceCount": 61, "citationCount": 43876, "influentialCitationCount": 7833, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Improving noise", "abstract": "Two deficiencies in the original Noise algorithm are corrected: second order interpolation discontinuity and unoptimal gradient computation. With these defects corrected, Noise both looks better and runs faster. The latter change also makes it easier to define a uniform mathematical reference standard.", "year": 2002, "venue": "International Conference on Computer Graphics and Interactive Techniques", "authors": [ "K. Perlin" ], "externalIds": { "DBLP": "conf/siggraph/Perlin02", "MAG": "2336763592", "DOI": "10.1145/566570.566636", "CorpusId": 207550606 }, "url": "https://www.semanticscholar.org/paper/a6fd5071b73f542c79bd08d409c5f73de38dac5d", "referenceCount": 6, "citationCount": 652, "influentialCitationCount": 32, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Introduction to Fourier optics", "abstract": "The second edition of this respected text considerably expands the original and reflects the tremendous advances made in the discipline since 1968. All material has been thoroughly updated and several new sections explore recent progress in important areas, such as wavelength modulation, analog information processing, and holography. Fourier analysis is a ubiquitous tool with applications in diverse areas of physics and engineering. This book explores these applications in the field of optics with a special emphasis on applications to diffraction, imaging, optical data processing, and holography. This book can be used as a textbook to satisfy the needs of several different types of courses, and it is directed toward both engineers ad physicists. By varying the emphasis on different topics and specific applications, the book can be used successfully in a wide range of basic Fourier Optics or Optical Signal Processing courses.", "year": 1969, "venue": "", "authors": [ "J. Goodman" ], "externalIds": { "MAG": "1658472922", "DOI": "10.1063/1.3035549", "CorpusId": 118908270 }, "url": "https://www.semanticscholar.org/paper/5e3eb22c476b889eecbb380d012231d819edf156", "referenceCount": 0, "citationCount": 14455, "influentialCitationCount": 888, "isOpenAccess": true, "fieldsOfStudy": [ "Physics" ] }, { "title": "Extrapolation, Interpolation, and Smoothing of Stationary Time Series, with Engineering Applications", "abstract": "Extrapolation interpolation and smoothing of stationary. extrapolation interpolation and smoothing of stationary. extrapolation interpolation and smoothing of stationary. on causal extrapolation of sequences with applications to. wiener n the extrapolation interpolation and smoothing. extrapolation interpolation and smoothing of stationary. stochastic processes interpolation of encyclopedia of. extrapolation interpolation and smoothing of stationary. extrapolation semantic scholar. extrapolation interpolation and smoothing of stationary. kakutani review nobert wiener extrapolation. extrapolation interpolation and smoothing of stationary. extrapolation interpolation and smoothing of stationary. wiener filter. extrapolation interpolation and smoothing of stationary. stationary time series financial definition of stationary. extrapolation interpolation and smoothing of stationary. pdf extrapolation of stationary random fields. extrapolation interpolation and smoothing of stationary. download pdf extrapolation interpolation and smoothing. extrapolation interpolation and smoothing of stationary. extrapolation interpolation and smoothing of stationary. stochastic process extrapolation of a article about. download pdf extrapolation interpolation and smoothing. extrapolation interpolation and smoothing of stationary. extrapolation interpolation and smoothing of stationary. recursive extrapolation interpolation and smoothing of. stationary tones interference cancellation using adaptive. norbert wiener american mathematician britannica.", "year": 1949, "venue": "", "authors": [ "N. Wiener" ], "externalIds": { "MAG": "2997829126", "DOI": "10.2307/2981007", "CorpusId": 125829144 }, "url": "https://www.semanticscholar.org/paper/cb2ffb67def387b584a4bca8a6d40e1c7be44f72", "referenceCount": 0, "citationCount": 1503, "influentialCitationCount": 76, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Robust Reconstruction With Deep Learning to Handle Model Mismatch in Lensless Imaging", "abstract": "Mask-based lensless imaging is an emerging imaging modality, which replaces the lenses with optical elements and makes use of computation to reconstruct images from the multiplexed measurements. Most existing reconstruction algorithms are implemented assuming that the forward imaging process is a convolution operation, with a point spread function based on the system model. In reality, there is model mismatch, leading to inferior image reconstruction results. In this paper, we investigate the impact of model mismatch in mask-based lensless imaging and for the first time, illustrate the accumulated artifacts and information loss due to mismatch error in the state-of-the-art approaches, which perform model-based reconstruction and learning-based enhancement in separate stages. To overcome this, we develop a novel physics-informed deep learning architecture that aims at addressing such mismatch error. The proposed hybrid reconstruction network consists of both unrolled model-based optimization to apply system physics and deep learning layers for model correction. Besides a cascaded enhancement network, we introduce a data-driven branch in parallel, making use of both input measurement and all intermediate outputs from the model-based layers to correct the bias and compensate for the information loss due to model mismatch. The effectiveness and robustness of the proposed model mismatch compensation network, referred to as MMCN, is demonstrated on real lensless images. Experimental results show noticeably better performance for MMCN compared with the alternative methods.", "year": 2021, "venue": "IEEE Transactions on Computational Imaging", "authors": [ "Tianjiao Zeng", "E. Lam" ], "externalIds": { "DBLP": "journals/tci/ZengL21", "DOI": "10.1109/TCI.2021.3114542", "CorpusId": 238993931 }, "url": "https://www.semanticscholar.org/paper/32da7a1472104327d5fed6776ee2cdcc5e4860bc", "referenceCount": 0, "citationCount": 17, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A method for stochastic optimization", "abstract": null, "year": 2014, "venue": "arXiv preprint", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "If the contribution is primarily a new model architecture, the paper should describe the architecture clearly and fully", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "For initial submissions, do not include any information that would break anonymity (if applicable), such as the institution conducting the review", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Joint Localization and Planning using Diffusion": { "paper_title": "Joint Localization and Planning using Diffusion", "arxiv_id": "2409.17995v1", "keyword": "diffusion model", "authors": [ "L. Lao Beyer", "S. Karaman" ], "references": [ { "title": "Diffusion Forcing: Next-token Prediction Meets Full-Sequence Diffusion", "abstract": "This paper presents Diffusion Forcing, a new training paradigm where a diffusion model is trained to denoise a set of tokens with independent per-token noise levels. We apply Diffusion Forcing to sequence generative modeling by training a causal next-token prediction model to generate one or several future tokens without fully diffusing past ones. Our approach is shown to combine the strengths of next-token prediction models, such as variable-length generation, with the strengths of full-sequence diffusion models, such as the ability to guide sampling to desirable trajectories. Our method offers a range of additional capabilities, such as (1) rolling-out sequences of continuous tokens, such as video, with lengths past the training horizon, where baselines diverge and (2) new sampling and guiding schemes that uniquely profit from Diffusion Forcing's variable-horizon and causal architecture, and which lead to marked performance gains in decision-making and planning tasks. In addition to its empirical success, our method is proven to optimize a variational lower bound on the likelihoods of all subsequences of tokens drawn from the true joint distribution. Project website: https://boyuan.space/diffusion-forcing", "year": 2024, "venue": "arXiv.org", "authors": [ "Boyuan Chen", "Diego Marti Monso", "Yilun Du", "Max Simchowitz", "Russ Tedrake", "Vincent Sitzmann" ], "externalIds": { "ArXiv": "2407.01392", "DBLP": "journals/corr/abs-2407-01392", "DOI": "10.48550/arXiv.2407.01392", "CorpusId": 270869622 }, "url": "https://www.semanticscholar.org/paper/40d63dc2b465c9081e4efc5a19514da151e97fe7", "referenceCount": 63, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DiffLoc: Diffusion Model for Outdoor LiDAR Localization", "abstract": "Absolute pose regression (APR) estimates global pose in an end-to-end manner, achieving impressive results in learn-based LiDAR localization. However, compared to the top-performing methods reliant on 3D-3D correspondence matching, APR's accuracy still has room for improvement. We recognize APR's lack of robust features learning and iterative denoising process leads to suboptimal results. In this paper, we propose DiffLoc, a novel framework that formulates LiDAR localization as a conditional generation of poses. First, we propose to utilize the foundation model and static-object-aware pool to learn robust features. Second, we incorporate the iterative denoising process into APR via a diffusion model conditioned on the learned geometrically robust features. In addition, due to the unique nature of diffusion models, we propose to adapt our models to two additional applications: (1) using multiple inferences to evaluate pose uncertainty, and (2) seamlessly introducing geometric constraints on denoising steps to improve prediction accuracy. Extensive experiments conducted on the Oxford Radar RobotCar and NCLT datasets demonstrate that DiffLoc outperforms better than the state-of-the-art methods. Especially on the NCLT dataset, we achieve 35% and 34.7% improvement on position and orientation accuracy, respectively. Our code is released at https://github.com/liw95/DiffLoc.", "year": 2024, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Wen Li", "Yuyang Yang", "Shangshu Yu", "Guosheng Hu", "Chenglu Wen", "Ming Cheng", "Cheng Wang" ], "externalIds": { "DOI": "10.1109/CVPR52733.2024.01425", "CorpusId": 272724899 }, "url": "https://www.semanticscholar.org/paper/05f0123827eaf8dba7aa195d8b76336161867d3f", "referenceCount": 51, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "DiPPeR: Diffusion-based 2D Path Planner applied on Legged Robots", "abstract": "In this work, we present DiPPeR, a novel and fast 2D path planning framework for quadrupedal locomotion, leveraging diffusion-driven techniques. Our contributions include a scalable dataset generator for map images and corresponding trajectories, an image-conditioned diffusion planner for mobile robots, and a training/inference pipeline employing CNNs. We validate our approach in several mazes, as well as in real-world deployment scenarios on Boston Dynamic’s Spot and Unitree’s Go1 robots. DiPPeR performs on average 23 times faster for trajectory generation against both search based and data driven path planning algorithms with an average of 87% consistency in producing feasible paths of various length in maps of variable size, and obstacle structure. Website: https://rpl-cs-ucl.github.io/DiPPeR/", "year": 2023, "venue": "IEEE International Conference on Robotics and Automation", "authors": [ "Jianwei Liu", "Maria Stamatopoulou", "Dimitrios Kanoulas" ], "externalIds": { "ArXiv": "2310.07842", "DBLP": "journals/corr/abs-2310-07842", "DOI": "10.1109/ICRA57147.2024.10610013", "CorpusId": 263909103 }, "url": "https://www.semanticscholar.org/paper/c5900d8caa778b82f29a879673360aa7f8fcd520", "referenceCount": 36, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Diffusion Policy: Visuomotor Policy Learning via Action Diffusion", "abstract": "This paper introduces Diffusion Policy, a new way of generating robot behavior by representing a robot's visuomotor policy as a conditional denoising diffusion process. We benchmark Diffusion Policy across 12 different tasks from 4 different robot manipulation benchmarks and find that it consistently outperforms existing state-of-the-art robot learning methods with an average improvement of 46.9%. Diffusion Policy learns the gradient of the action-distribution score function and iteratively optimizes with respect to this gradient field during inference via a series of stochastic Langevin dynamics steps. We find that the diffusion formulation yields powerful advantages when used for robot policies, including gracefully handling multimodal action distributions, being suitable for high-dimensional action spaces, and exhibiting impressive training stability. To fully unlock the potential of diffusion models for visuomotor policy learning on physical robots, this paper presents a set of key technical contributions including the incorporation of receding horizon control, visual conditioning, and the time-series diffusion transformer. We hope this work will help motivate a new generation of policy learning techniques that are able to leverage the powerful generative modeling capabilities of diffusion models. Code, data, and training details is publicly available diffusion-policy.cs.columbia.edu", "year": 2023, "venue": "Robotics: Science and Systems", "authors": [ "Cheng Chi", "S. Feng", "Yilun Du", "Zhenjia Xu", "Eric A. Cousineau", "B. Burchfiel", "Shuran Song" ], "externalIds": { "DBLP": "journals/corr/abs-2303-04137", "ArXiv": "2303.04137", "DOI": "10.48550/arXiv.2303.04137", "CorpusId": 257378658 }, "url": "https://www.semanticscholar.org/paper/bdba3bd30a49ea4c5b20b43dbd8f0eb59e9d80e2", "referenceCount": 58, "citationCount": 430, "influentialCitationCount": 117, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "SE(3) diffusion model with application to protein backbone generation", "abstract": "The design of novel protein structures remains a challenge in protein engineering for applications across biomedicine and chemistry. In this line of work, a diffusion model over rigid bodies in 3D (referred to as frames) has shown success in generating novel, functional protein backbones that have not been observed in nature. However, there exists no principled methodological framework for diffusion on SE(3), the space of orientation preserving rigid motions in R3, that operates on frames and confers the group invariance. We address these shortcomings by developing theoretical foundations of SE(3) invariant diffusion models on multiple frames followed by a novel framework, FrameDiff, for learning the SE(3) equivariant score over multiple frames. We apply FrameDiff on monomer backbone generation and find it can generate designable monomers up to 500 amino acids without relying on a pretrained protein structure prediction network that has been integral to previous methods. We find our samples are capable of generalizing beyond any known protein structure.", "year": 2023, "venue": "International Conference on Machine Learning", "authors": [ "Jason Yim", "Brian L. Trippe", "Valentin De Bortoli", "Emile Mathieu", "A. Doucet", "R. Barzilay", "T. Jaakkola" ], "externalIds": { "DBLP": "conf/icml/YimTBMDBJ23", "ArXiv": "2302.02277", "DOI": "10.48550/arXiv.2302.02277", "CorpusId": 256615935 }, "url": "https://www.semanticscholar.org/paper/8f3b40d23e5a38d11ce89a4ca8d3227d4bd4d90b", "referenceCount": 71, "citationCount": 126, "influentialCitationCount": 18, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Biology", "Mathematics" ] }, { "title": "SE(3)-DiffusionFields: Learning smooth cost functions for joint grasp and motion optimization through diffusion", "abstract": "Multi-objective optimization problems are ubiquitous in robotics, e.g., the optimization of a robot manipulation task requires a joint consideration of grasp pose configurations, collisions and joint limits. While some demands can be easily hand-designed, e.g., the smoothness of a trajectory, several task-specific objectives need to be learned from data. This work introduces a method for learning data-driven SE(3) cost functions as diffusion models. Diffusion models can represent highly-expressive multimodal distributions and exhibit proper gradients over the entire space due to their score-matching training objective. Learning costs as diffusion models allows their seamless integration with other costs into a single differentiable objective function, enabling joint gradient-based motion optimization. In this work, we focus on learning SE(3) diffusion models for 6DoF grasping, giving rise to a novel framework for joint grasp and motion optimization without needing to decouple grasp selection from trajectory generation. We evaluate the representation power of our SE(3) diffusion models w.r.t. classical generative models, and we showcase the superior performance of our proposed optimization framework in a series of simulated and real-world robotic manipulation tasks against representative baselines. Videos, code and additional details are available at: https://sites.google.com/view/se3dif", "year": 2022, "venue": "IEEE International Conference on Robotics and Automation", "authors": [ "Julen Urain", "Niklas Funk", "Jan Peters", "G. Chalvatzaki" ], "externalIds": { "DBLP": "conf/icra/UrainFPC23", "ArXiv": "2209.03855", "DOI": "10.1109/ICRA48891.2023.10161569", "CorpusId": 252367206 }, "url": "https://www.semanticscholar.org/paper/4ec93b084414d39fcfaab3cf6a28b1bc20509ab7", "referenceCount": 79, "citationCount": 67, "influentialCitationCount": 8, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Diffusion Policies as an Expressive Policy Class for Offline Reinforcement Learning", "abstract": "Offline reinforcement learning (RL), which aims to learn an optimal policy using a previously collected static dataset, is an important paradigm of RL. Standard RL methods often perform poorly in this regime due to the function approximation errors on out-of-distribution actions. While a variety of regularization methods have been proposed to mitigate this issue, they are often constrained by policy classes with limited expressiveness that can lead to highly suboptimal solutions. In this paper, we propose representing the policy as a diffusion model, a recent class of highly-expressive deep generative models. We introduce Diffusion Q-learning (Diffusion-QL) that utilizes a conditional diffusion model to represent the policy. In our approach, we learn an action-value function and we add a term maximizing action-values into the training loss of the conditional diffusion model, which results in a loss that seeks optimal actions that are near the behavior policy. We show the expressiveness of the diffusion model-based policy, and the coupling of the behavior cloning and policy improvement under the diffusion model both contribute to the outstanding performance of Diffusion-QL. We illustrate the superiority of our method compared to prior works in a simple 2D bandit example with a multimodal behavior policy. We then show that our method can achieve state-of-the-art performance on the majority of the D4RL benchmark tasks.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Zhendong Wang", "Jonathan J. Hunt", "Mingyuan Zhou" ], "externalIds": { "DBLP": "conf/iclr/WangHZ23", "ArXiv": "2208.06193", "DOI": "10.48550/arXiv.2208.06193", "CorpusId": 251554821 }, "url": "https://www.semanticscholar.org/paper/2cbea7615ebecea2c414d8fbad47d5d258a5c3b4", "referenceCount": 45, "citationCount": 213, "influentialCitationCount": 39, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Elucidating the Design Space of Diffusion-Based Generative Models", "abstract": "We argue that the theory and practice of diffusion-based generative models are currently unnecessarily convoluted and seek to remedy the situation by presenting a design space that clearly separates the concrete design choices. This lets us identify several changes to both the sampling and training processes, as well as preconditioning of the score networks. Together, our improvements yield new state-of-the-art FID of 1.79 for CIFAR-10 in a class-conditional setting and 1.97 in an unconditional setting, with much faster sampling (35 network evaluations per image) than prior designs. To further demonstrate their modular nature, we show that our design changes dramatically improve both the efficiency and quality obtainable with pre-trained score networks from previous work, including improving the FID of a previously trained ImageNet-64 model from 2.07 to near-SOTA 1.55, and after re-training with our proposed improvements to a new SOTA of 1.36.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Tero Karras", "M. Aittala", "Timo Aila", "S. Laine" ], "externalIds": { "DBLP": "journals/corr/abs-2206-00364", "ArXiv": "2206.00364", "DOI": "10.48550/arXiv.2206.00364", "CorpusId": 249240415 }, "url": "https://www.semanticscholar.org/paper/2f4c451922e227cbbd4f090b74298445bbd900d0", "referenceCount": 65, "citationCount": 1135, "influentialCitationCount": 301, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Planning with Diffusion for Flexible Behavior Synthesis", "abstract": "Model-based reinforcement learning methods often use learning only for the purpose of estimating an approximate dynamics model, offloading the rest of the decision-making work to classical trajectory optimizers. While conceptually simple, this combination has a number of empirical shortcomings, suggesting that learned models may not be well-suited to standard trajectory optimization. In this paper, we consider what it would look like to fold as much of the trajectory optimization pipeline as possible into the modeling problem, such that sampling from the model and planning with it become nearly identical. The core of our technical approach lies in a diffusion probabilistic model that plans by iteratively denoising trajectories. We show how classifier-guided sampling and image inpainting can be reinterpreted as coherent planning strategies, explore the unusual and useful properties of diffusion-based planning methods, and demonstrate the effectiveness of our framework in control settings that emphasize long-horizon decision-making and test-time flexibility.", "year": 2022, "venue": "International Conference on Machine Learning", "authors": [ "Michael Janner", "Yilun Du", "J. Tenenbaum", "S. Levine" ], "externalIds": { "ArXiv": "2205.09991", "DBLP": "journals/corr/abs-2205-09991", "DOI": "10.48550/arXiv.2205.09991", "CorpusId": 248965046 }, "url": "https://www.semanticscholar.org/paper/3ebdd3db0dd91069fa0cd31cbf8308b60b1b565e", "referenceCount": 76, "citationCount": 383, "influentialCitationCount": 81, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Riemannian Score-Based Generative Modeling", "abstract": "Score-based generative models (SGMs) are a powerful class of generative models that exhibit remarkable empirical performance. Score-based generative modelling (SGM) consists of a ``noising'' stage, whereby a diffusion is used to gradually add Gaussian noise to data, and a generative model, which entails a ``denoising'' process defined by approximating the time-reversal of the diffusion. Existing SGMs assume that data is supported on a Euclidean space, i.e. a manifold with flat geometry. In many domains such as robotics, geoscience or protein modelling, data is often naturally described by distributions living on Riemannian manifolds and current SGM techniques are not appropriate. We introduce here Riemannian Score-based Generative Models (RSGMs), a class of generative models extending SGMs to Riemannian manifolds. We demonstrate our approach on a variety of manifolds, and in particular with earth and climate science spherical data.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Valentin De Bortoli", "Emile Mathieu", "M. Hutchinson", "James Thornton", "Y. Teh", "A. Doucet" ], "externalIds": { "DBLP": "conf/nips/BortoliMHTTD22", "ArXiv": "2202.02763", "CorpusId": 246634114 }, "url": "https://www.semanticscholar.org/paper/7d2ff802094eb24bed1faa363a8d07947905be3e", "referenceCount": 168, "citationCount": 129, "influentialCitationCount": 14, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Efficient and Robust LiDAR-Based End-to-End Navigation", "abstract": "Deep learning has been used to demonstrate end-to-end neural network learning for autonomous vehicle control from raw sensory input. While LiDAR sensors provide reliably accurate information, existing end-to-end driving solutions are mainly based on cameras since processing 3D data requires a large memory footprint and computation cost. On the other hand, increasing the robustness of these systems is also critical; however, even estimating the model’s uncertainty is very challenging due to the cost of sampling-based methods. In this paper, we present an efficient and robust LiDAR-based end-to-end navigation framework. We first introduce Fast-LiDARNet that is based on sparse convolution kernel optimization and hardware-aware model design. We then propose Hybrid Evidential Fusion that directly estimates the uncertainty of the prediction from only a single forward pass and then fuses the control predictions intelligently. We evaluate our system on a full-scale vehicle and demonstrate lane-stable as well as navigation capabilities. In the presence of out-of-distribution events (e.g., sensor failures), our system significantly improves robustness and reduces the number of takeovers in the real world.", "year": 2021, "venue": "IEEE International Conference on Robotics and Automation", "authors": [ "Zhijian Liu", "Alexander Amini", "Sibo Zhu", "S. Karaman", "Song Han", "D. Rus" ], "externalIds": { "ArXiv": "2105.09932", "DBLP": "conf/icra/LiuAZK0R21", "DOI": "10.1109/ICRA48506.2021.9561299", "CorpusId": 234790472 }, "url": "https://www.semanticscholar.org/paper/8e7412e8e71664f730ed815c8eea1f60ad350d7c", "referenceCount": 52, "citationCount": 42, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Diffusion Models Beat GANs on Image Synthesis", "abstract": "We show that diffusion models can achieve image sample quality superior to the current state-of-the-art generative models. We achieve this on unconditional image synthesis by finding a better architecture through a series of ablations. For conditional image synthesis, we further improve sample quality with classifier guidance: a simple, compute-efficient method for trading off diversity for fidelity using gradients from a classifier. We achieve an FID of 2.97 on ImageNet 128$\\times$128, 4.59 on ImageNet 256$\\times$256, and 7.72 on ImageNet 512$\\times$512, and we match BigGAN-deep even with as few as 25 forward passes per sample, all while maintaining better coverage of the distribution. Finally, we find that classifier guidance combines well with upsampling diffusion models, further improving FID to 3.94 on ImageNet 256$\\times$256 and 3.85 on ImageNet 512$\\times$512. We release our code at https://github.com/openai/guided-diffusion", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Prafulla Dhariwal", "Alex Nichol" ], "externalIds": { "ArXiv": "2105.05233", "DBLP": "journals/corr/abs-2105-05233", "CorpusId": 234357997 }, "url": "https://www.semanticscholar.org/paper/64ea8f180d0682e6c18d1eb688afdb2027c02794", "referenceCount": 81, "citationCount": 5177, "influentialCitationCount": 661, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Score-Based Generative Modeling through Stochastic Differential Equations", "abstract": "Creating noise from data is easy; creating data from noise is generative modeling. We present a stochastic differential equation (SDE) that smoothly transforms a complex data distribution to a known prior distribution by slowly injecting noise, and a corresponding reverse-time SDE that transforms the prior distribution back into the data distribution by slowly removing the noise. Crucially, the reverse-time SDE depends only on the time-dependent gradient field (\\aka, score) of the perturbed data distribution. By leveraging advances in score-based generative modeling, we can accurately estimate these scores with neural networks, and use numerical SDE solvers to generate samples. We show that this framework encapsulates previous approaches in score-based generative modeling and diffusion probabilistic modeling, allowing for new sampling procedures and new modeling capabilities. In particular, we introduce a predictor-corrector framework to correct errors in the evolution of the discretized reverse-time SDE. We also derive an equivalent neural ODE that samples from the same distribution as the SDE, but additionally enables exact likelihood computation, and improved sampling efficiency. In addition, we provide a new way to solve inverse problems with score-based models, as demonstrated with experiments on class-conditional generation, image inpainting, and colorization. Combined with multiple architectural improvements, we achieve record-breaking performance for unconditional image generation on CIFAR-10 with an Inception score of 9.89 and FID of 2.20, a competitive likelihood of 2.99 bits/dim, and demonstrate high fidelity generation of 1024 x 1024 images for the first time from a score-based generative model.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Yang Song", "Jascha Narain Sohl-Dickstein", "Diederik P. Kingma", "Abhishek Kumar", "Stefano Ermon", "Ben Poole" ], "externalIds": { "DBLP": "journals/corr/abs-2011-13456", "ArXiv": "2011.13456", "MAG": "3110257065", "CorpusId": 227209335 }, "url": "https://www.semanticscholar.org/paper/633e2fbfc0b21e959a244100937c5853afca4853", "referenceCount": 66, "citationCount": 4108, "influentialCitationCount": 956, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Denoising Diffusion Probabilistic Models", "abstract": "We present high quality image synthesis results using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics. Our best results are obtained by training on a weighted variational bound designed according to a novel connection between diffusion probabilistic models and denoising score matching with Langevin dynamics, and our models naturally admit a progressive lossy decompression scheme that can be interpreted as a generalization of autoregressive decoding. On the unconditional CIFAR10 dataset, we obtain an Inception score of 9.46 and a state-of-the-art FID score of 3.17. On 256x256 LSUN, we obtain sample quality similar to ProgressiveGAN. Our implementation is available at this https URL", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Jonathan Ho", "Ajay Jain", "P. Abbeel" ], "externalIds": { "ArXiv": "2006.11239", "MAG": "3100572490", "DBLP": "journals/corr/abs-2006-11239", "CorpusId": 219955663 }, "url": "https://www.semanticscholar.org/paper/5c126ae3421f05768d8edd97ecd44b1364e2c99a", "referenceCount": 73, "citationCount": 10778, "influentialCitationCount": 2337, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Variational End-to-End Navigation and Localization", "abstract": "Deep learning has revolutionized the ability to learn “end-to-end” autonomous vehicle control directly from raw sensory data. While there have been recent extensions to handle forms of navigation instruction, these works are unable to capture the full distribution of possible actions that could be taken and to reason about localization of the robot within the environment. In this paper, we extend end-to-end driving networks with the ability to perform point-to-point navigation as well as probabilistic localization using only noisy GPS data. We define a novel variational network capable of learning from raw camera data of the environment as well as higher level roadmaps to predict (1) a full probability distribution over the possible control commands; and (2) a deterministic control command capable of navigating on the route specified within the map. Additionally, we formulate how our model can be used to localize the robot according to correspondences between the map and the observed visual road topology, inspired by the rough localization that human drivers can perform. We test our algorithms on real-world driving data that the vehicle has never driven through before, and integrate our point-topoint navigation algorithms onboard a full-scale autonomous vehicle for real-time performance. Our localization algorithm is also evaluated over a new set of roads and intersections to demonstrates rough pose localization even in situations without any GPS prior.", "year": 2018, "venue": "IEEE International Conference on Robotics and Automation", "authors": [ "Alexander Amini", "G. Rosman", "S. Karaman", "D. Rus" ], "externalIds": { "DBLP": "conf/icra/AminiRKR19", "MAG": "2901976130", "ArXiv": "1811.10119", "DOI": "10.1109/ICRA.2019.8793579", "CorpusId": 53751389 }, "url": "https://www.semanticscholar.org/paper/2deda3617b8ea9bd32f84e313d78048380a3c725", "referenceCount": 36, "citationCount": 107, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "FiLM: Visual Reasoning with a General Conditioning Layer", "abstract": "\n \n We introduce a general-purpose conditioning method for neural networks called FiLM: Feature-wise Linear Modulation. FiLM layers influence neural network computation via a simple, feature-wise affine transformation based on conditioning information. We show that FiLM layers are highly effective for visual reasoning - answering image-related questions which require a multi-step, high-level process - a task which has proven difficult for standard deep learning methods that do not explicitly model reasoning. Specifically, we show on visual reasoning tasks that FiLM layers 1) halve state-of-the-art error for the CLEVR benchmark, 2) modulate features in a coherent manner, 3) are robust to ablations and architectural modifications, and 4) generalize well to challenging, new data from few examples or even zero-shot.\n \n", "year": 2017, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Ethan Perez", "Florian Strub", "H. D. Vries", "Vincent Dumoulin", "Aaron C. Courville" ], "externalIds": { "DBLP": "journals/corr/abs-1709-07871", "MAG": "2951555602", "ArXiv": "1709.07871", "DOI": "10.1609/aaai.v32i1.11671", "CorpusId": 19119291 }, "url": "https://www.semanticscholar.org/paper/7cfa5c97164129ce3630511f639040d28db1d4b7", "referenceCount": 44, "citationCount": 1776, "influentialCitationCount": 186, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Deep Residual Learning for Image Recognition", "abstract": "Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. We provide comprehensive empirical evidence showing that these residual networks are easier to optimize, and can gain accuracy from considerably increased depth. On the ImageNet dataset we evaluate residual nets with a depth of up to 152 layers - 8× deeper than VGG nets [40] but still having lower complexity. An ensemble of these residual nets achieves 3.57% error on the ImageNet test set. This result won the 1st place on the ILSVRC 2015 classification task. We also present analysis on CIFAR-10 with 100 and 1000 layers. The depth of representations is of central importance for many visual recognition tasks. Solely due to our extremely deep representations, we obtain a 28% relative improvement on the COCO object detection dataset. Deep residual nets are foundations of our submissions to ILSVRC & COCO 2015 competitions1, where we also won the 1st places on the tasks of ImageNet detection, ImageNet localization, COCO detection, and COCO segmentation.", "year": 2015, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Kaiming He", "X. Zhang", "Shaoqing Ren", "Jian Sun" ], "externalIds": { "DBLP": "conf/cvpr/HeZRS16", "MAG": "2949650786", "ArXiv": "1512.03385", "DOI": "10.1109/cvpr.2016.90", "CorpusId": 206594692 }, "url": "https://www.semanticscholar.org/paper/2c03df8b48bf3fa39054345bafabfeff15bfd11d", "referenceCount": 53, "citationCount": 172713, "influentialCitationCount": 28229, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "U-Net: Convolutional Networks for Biomedical Image Segmentation", "abstract": null, "year": 2015, "venue": "International Conference on Medical Image Computing and Computer-Assisted Intervention", "authors": [ "O. Ronneberger", "P. Fischer", "T. Brox" ], "externalIds": { "ArXiv": "1505.04597", "MAG": "1901129140", "DBLP": "journals/corr/RonnebergerFB15", "DOI": "10.1007/978-3-319-24574-4_28", "CorpusId": 3719281 }, "url": "https://www.semanticscholar.org/paper/6364fdaa0a0eccd823a779fcdd489173f938e91a", "referenceCount": 18, "citationCount": 66494, "influentialCitationCount": 9274, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Gaussian Distributions on Lie Groups and Their Application to Statistical Shape Analysis", "abstract": null, "year": 2003, "venue": "Information Processing in Medical Imaging", "authors": [ "P. T. Fletcher", "S. Joshi", "Conglin Lu", "S. Pizer" ], "externalIds": { "MAG": "1604808697", "DBLP": "conf/ipmi/FletcherJLP03", "DOI": "10.1007/978-3-540-45087-0_38", "CorpusId": 9229827, "PubMed": "15344479" }, "url": "https://www.semanticscholar.org/paper/e4fd06cc2e2b70748ca88ae6653ba1c7f6358280", "referenceCount": 18, "citationCount": 80, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Medicine", "Computer Science" ] }, { "title": "Loss-Guided Diffusion Models for Plug-and-Play Controllable Generation", "abstract": "We consider guiding denoising diffusion models with general differentiable loss functions in a plug-and-play fashion, enabling controllable generation without additional training. This paradigm, termed Loss-Guided Diffusion (LGD), can easily be integrated into all diffusion models and leverage various efficient samplers. Despite the benefits, the resulting guidance term is, unfortunately, an intractable integral and needs to be approximated. Existing methods compute the guidance term based on a point estimate. However, we show that such approaches have significant errors over the scale of the approximations. To address this issue, we propose a Monte Carlo method that uses multiple samples from a suitable distribution to reduce bias. Our method is effective in various synthetic and real-world settings, including image super-resolution, text or label-conditional image generation, and controllable motion synthesis. Notably, we show how our method can be applied to control a pretrained motion diffusion model to follow certain paths and avoid obstacles that are proven challenging to prior methods.", "year": 2023, "venue": "International Conference on Machine Learning", "authors": [ "Jiaming Song", "Qinsheng Zhang", "Hongxu Yin", "M. Mardani", "Ming-Yu Liu", "J. Kautz", "Yongxin Chen", "Arash Vahdat" ], "externalIds": { "DBLP": "conf/icml/SongZYM0KCV23", "CorpusId": 260957043 }, "url": "https://www.semanticscholar.org/paper/464df3e306a5b9a6a7131a582637aaef3de058f2", "referenceCount": 57, "citationCount": 49, "influentialCitationCount": 10, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "“Pcrdif-fusion: Diffusion probabilistic models for point cloud registration,”", "abstract": null, "year": null, "venue": "arXiv preprint", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "“Ceres Solver,”", "abstract": null, "year": null, "venue": "github", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Consistent estimation of generative model representations in the data kernel perspective space": { "paper_title": "Consistent estimation of generative model representations in the data kernel perspective space", "arxiv_id": "2409.17308v1", "keyword": "diffusion model", "authors": [ "Aranyak Acharyya", "Michael W. Trosset", "Carey E. Priebe", "Hayden S. Helm" ], "references": [ { "title": "The Llama 3 Herd of Models", "abstract": "Modern artificial intelligence (AI) systems are powered by foundation models. This paper presents a new set of foundation models, called Llama 3. It is a herd of language models that natively support multilinguality, coding, reasoning, and tool usage. Our largest model is a dense Transformer with 405B parameters and a context window of up to 128K tokens. This paper presents an extensive empirical evaluation of Llama 3. We find that Llama 3 delivers comparable quality to leading language models such as GPT-4 on a plethora of tasks. We publicly release Llama 3, including pre-trained and post-trained versions of the 405B parameter language model and our Llama Guard 3 model for input and output safety. The paper also presents the results of experiments in which we integrate image, video, and speech capabilities into Llama 3 via a compositional approach. We observe this approach performs competitively with the state-of-the-art on image, video, and speech recognition tasks. The resulting models are not yet being broadly released as they are still under development.", "year": 2024, "venue": "arXiv.org", "authors": [ "Abhimanyu Dubey", "Abhinav Jauhri", "Abhinav Pandey", "Abhishek Kadian", "Ahmad Al-Dahle", "Aiesha Letman", "Akhil Mathur", "Alan Schelten", "Amy Yang", "Angela Fan", "Anirudh Goyal", "Anthony Hartshorn", "Aobo Yang", "Archi Mitra", "Archie Sravankumar", "Artem Korenev", "Arthur Hinsvark", "Arun Rao", "Aston Zhang", "Aurelien Rodriguez", "Austen Gregerson", "Ava Spataru", "Baptiste Rozière", "Bethany Biron", "Binh Tang", "Bobbie Chern", "C. Caucheteux", "Chaya Nayak", "Chloe Bi", "Chris Marra", "Chris McConnell", "Christian Keller", "Christophe Touret", "Chunyang Wu", "Corinne Wong", "Cristian Cantón Ferrer", "Cyrus Nikolaidis", "Damien Allonsius", "Daniel Song", "Danielle Pintz", "Danny Livshits", "David Esiobu", "Dhruv Choudhary", "Dhruv Mahajan", "Diego Garcia-Olano", "Diego Perino", "Dieuwke Hupkes", "Egor Lakomkin", "Ehab A. AlBadawy", "Elina Lobanova", "Emily Dinan", "Eric Michael Smith", "Filip Radenovic", "Frank Zhang", "Gabriele Synnaeve", "Gabrielle Lee", "Georgia Lewis Anderson", "Graeme Nail", "Grégoire Mialon", "Guanglong Pang", "Guillem Cucurell", "Hailey Nguyen", "Hannah Korevaar", "Hu Xu", "Hugo Touvron", "Iliyan Zarov", "Imanol Arrieta Ibarra", "Isabel M. Kloumann", "Ishan Misra", "Ivan Evtimov", "Jade Copet", "Jaewon Lee", "J. Geffert", "Jana Vranes", "Jason Park", "Jay Mahadeokar", "Jeet Shah", "J. V. D. Linde", "Jennifer Billock", "Jenny Hong", "Jenya Lee", "Jeremy Fu", "Jianfeng Chi", "Jianyu Huang", "Jiawen Liu", "Jie Wang", "Jiecao Yu", "Joanna Bitton", "Joe Spisak", "Jongsoo Park", "Joseph Rocca", "Joshua Johnstun", "Joshua Saxe", "Ju-Qing Jia", "Kalyan Vasuden Alwala", "K. Upasani", "Kate Plawiak", "Keqian Li", "K. Heafield", "Kevin Stone", "Khalid El-Arini", "Krithika Iyer", "Kshitiz Malik", "Kuenley Chiu", "Kunal Bhalla", "Lauren Rantala-Yeary", "L. Maaten", "Lawrence Chen", "Liang Tan", "Liz Jenkins", "Louis Martin", "Lovish Madaan", "Lubo Malo", "Lukas Blecher", "Lukas Landzaat", "Luke de Oliveira", "Madeline C. Muzzi", "M. Pasupuleti", "Mannat Singh", "Manohar Paluri", "Marcin Kardas", "Mathew Oldham", "Mathieu Rita", "Maya Pavlova", "M. Kambadur", "Mike Lewis", "Min Si", "Mitesh Kumar Singh", "Mona Hassan", "Naman Goyal", "Narjes Torabi", "Nikolay Bashlykov", "Nikolay Bogoychev", "Niladri S. Chatterji", "Olivier Duchenne", "Onur cCelebi", "Patrick Alrassy", "Pengchuan Zhang", "Pengwei Li", "Petar Vasić", "Peter Weng", "Prajjwal Bhargava", "Pratik Dubal", "Praveen Krishnan", "Punit Singh Koura", "Puxin Xu", "Qing He", "Qingxiao Dong", "Ragavan Srinivasan", "Raj Ganapathy", "Ramon Calderer", "Ricardo Silveira Cabral", "Robert Stojnic", "Roberta Raileanu", "Rohit Girdhar", "Rohit Patel", "Romain Sauvestre", "Ronnie Polidoro", "Roshan Sumbaly", "Ross Taylor", "Ruan Silva", "Rui Hou", "Rui Wang", "Saghar Hosseini", "Sahana Chennabasappa", "Sanjay Singh", "Sean Bell", "Seohyun Sonia Kim", "Sergey Edunov", "Shaoliang Nie", "Sharan Narang", "S. Raparthy", "Sheng Shen", "Shengye Wan", "Shruti Bhosale", "Shun Zhang", "Simon Vandenhende", "Soumya Batra", "Spencer Whitman", "Sten Sootla", "Stephane Collot", "Suchin Gururangan", "S. Borodinsky", "Tamar Herman", "Tara Fowler", "Tarek Sheasha", "Thomas Georgiou", "Thomas Scialom", "Tobias Speckbacher", "Todor Mihaylov", "Tong Xiao", "Ujjwal Karn", "Vedanuj Goswami", "Vibhor Gupta", "Vignesh Ramanathan", "Viktor Kerkez", "Vincent Gonguet", "Virginie Do", "Vish Vogeti", "Vladan Petrovic", "Weiwei Chu", "Wenhan Xiong", "Wenyin Fu", "Whitney Meers", "Xavier Martinet", "Xiaodong Wang", "Xiaoqing Ellen Tan", "Xinfeng Xie", "Xuchao Jia", "Xuewei Wang", "Yaelle Goldschlag", "Yashesh Gaur", "Yasmine Babaei", "Yiqian Wen", "Yiwen Song", "Yuchen Zhang", "Yue Li", "Yuning Mao", "Zacharie Delpierre Coudert", "Zhengxu Yan", "Zhengxing Chen", "Zoe Papakipos", "Aaditya K. Singh", "Aaron Grattafiori", "Abha Jain", "Adam Kelsey", "Adam Shajnfeld", "Adi Gangidi", "Adolfo Victoria", "Ahuva Goldstand", "Ajay Menon", "Ajay Sharma", "Alex Boesenberg", "Alex Vaughan", "Alexei Baevski", "Allie Feinstein", "A. Kallet", "Amit Sangani", "Anam Yunus", "Andrei Lupu", "Andres Alvarado", "Andrew Caples", "Andrew Gu", "Andrew Ho", "Andrew Poulton", "Andrew Ryan", "Ankit Ramchandani", "Annie Franco", "Aparajita Saraf", "Arkabandhu Chowdhury", "Ashley Gabriel", "Ashwin Bharambe", "Assaf Eisenman", "Azadeh Yazdan", "Beau James", "Ben Maurer", "Ben Leonhardi", "Bernie Huang", "Beth Loyd", "Beto De Paola", "Bhargavi Paranjape", "Bing Liu", "Bo Wu", "Boyu Ni", "Braden Hancock", "Bram Wasti", "Brandon Spence", "Brani Stojkovic", "Brian Gamido", "Britt Montalvo", "Carl Parker", "Carly Burton", "Catalina Mejia", "Changhan Wang", "Changkyu Kim", "Chao Zhou", "Chester Hu", "Ching-Hsiang Chu", "Chris Cai", "Chris Tindal", "Christoph Feichtenhofer", "Damon Civin", "Dana Beaty", "Daniel Kreymer", "Shang-Wen Li", "Danny Wyatt", "David Adkins", "David Xu", "Davide Testuggine", "Delia David", "Devi Parikh", "Diana Liskovich", "Didem Foss", "Dingkang Wang", "Duc Le", "Dustin Holland", "Edward Dowling", "Eissa Jamil", "Elaine Montgomery", "Eleonora Presani", "Emily Hahn", "Emily Wood", "Erik Brinkman", "Esteban Arcaute", "Evan Dunbar", "Evan Smothers", "Fei Sun", "Felix Kreuk", "Feng Tian", "Firat Ozgenel", "Francesco Caggioni", "Francisco Guzm'an", "Frank J. Kanayet", "Frank Seide", "Gabriela Medina Florez", "Gabriella Schwarz", "Gada Badeer", "Georgia Swee", "Gil Halpern", "G. Thattai", "Grant Herman", "Grigory G. Sizov", "Guangyi Zhang", "Guna Lakshminarayanan", "Hamid Shojanazeri", "Han Zou", "Hannah Wang", "Han Zha", "Haroun Habeeb", "Harrison Rudolph", "Helen Suk", "Henry Aspegren", "Hunter Goldman", "Igor Molybog", "I. Tufanov", "Irina-Elena Veliche", "Itai Gat", "Jake Weissman", "James Geboski", "James Kohli", "Japhet Asher", "Jean-Baptiste Gaya", "Jeff Marcus", "Jeff Tang", "Jennifer Chan", "Jenny Zhen", "Jeremy Reizenstein", "Jeremy Teboul", "Jessica Zhong", "Jian Jin", "Jingyi Yang", "Joe Cummings", "Jon Carvill", "Jon Shepard", "Jonathan McPhie", "Jonathan Torres", "Josh Ginsburg", "Junjie Wang", "Kaixing(Kai) Wu", "U. KamHou", "Karan Saxena", "Karthik Prasad", "Kartikay Khandelwal", "Katayoun Zand", "Kathy Matosich", "K. Veeraraghavan", "Kelly Michelena", "Keqian Li", "Kun Huang", "Kunal Chawla", "Kushal Lakhotia", "Kyle Huang", "Lailin Chen", "Lakshya Garg", "A. Lavender", "Leandro Silva", "Lee Bell", "Lei Zhang", "Liangpeng Guo", "Licheng Yu", "Liron Moshkovich", "Luca Wehrstedt", "Madian Khabsa", "Manav Avalani", "Manish Bhatt", "Maria Tsimpoukelli", "Martynas Mankus", "Matan Hasson", "M. Lennie", "Matthias Reso", "Maxim Groshev", "Maxim Naumov", "Maya Lathi", "Meghan Keneally", "M. Seltzer", "Michal Valko", "Michelle Restrepo", "Mihir Patel", "Mik Vyatskov", "Mikayel Samvelyan", "Mike Clark", "Mike Macey", "Mike Wang", "Miquel Jubert Hermoso", "Mo Metanat", "Mohammad Rastegari", "Munish Bansal", "Nandhini Santhanam", "Natascha Parks", "Natasha White", "Navyata Bawa", "Nayan Singhal", "Nick Egebo", "Nicolas Usunier", "Nikolay Pavlovich Laptev", "Ning Dong", "Ning Zhang", "Norman Cheng", "Oleg Chernoguz", "Olivia Hart", "Omkar Salpekar", "Ozlem Kalinli", "Parkin Kent", "Parth Parekh", "Paul Saab", "Pavan Balaji", "Pedro Rittner", "Philip Bontrager", "Pierre Roux", "Piotr Dollár", "Polina Zvyagina", "Prashant Ratanchandani", "Pritish Yuvraj", "Qian Liang", "Rachad Alao", "Rachel Rodriguez", "Rafi Ayub", "Raghotham Murthy", "Raghu Nayani", "Rahul Mitra", "Raymond Li", "Rebekkah Hogan", "Robin Battey", "Rocky Wang", "Rohan Maheswari", "Russ Howes", "Ruty Rinott", "Sai Jayesh Bondu", "Samyak Datta", "Sara Chugh", "Sara Hunt", "Sargun Dhillon", "Sasha Sidorov", "Satadru Pan", "Saurabh Verma", "Seiji Yamamoto", "Sharadh Ramaswamy", "Shaun Lindsay", "Sheng Feng", "Shenghao Lin", "S. Zha", "Shiva Shankar", "Shuqiang Zhang", "Sinong Wang", "Sneha Agarwal", "S. Sajuyigbe", "Soumith Chintala", "Stephanie Max", "Stephen Chen", "Steve Kehoe", "Steve Satterfield", "Sudarshan Govindaprasad", "Sumit Gupta", "Sung-Bae Cho", "Sunny Virk", "Suraj Subramanian", "Sy Choudhury", "Sydney Goldman", "Tal Remez", "Tamar Glaser", "Tamara Best", "Thilo Kohler", "Thomas Robinson", "Tianhe Li", "Tianjun Zhang", "Tim Matthews", "Timothy Chou", "Tzook Shaked", "Varun Vontimitta", "Victoria Ajayi", "Victoria Montanez", "Vijai Mohan", "Vinay Satish Kumar", "Vishal Mangla", "Vlad Ionescu", "V. Poenaru", "Vlad T. Mihailescu", "Vladimir Ivanov", "Wei Li", "Wenchen Wang" ], "externalIds": { "DBLP": "journals/corr/abs-2407-21783", "ArXiv": "2407.21783", "DOI": "10.48550/arXiv.2407.21783", "CorpusId": 271571434 }, "url": "https://www.semanticscholar.org/paper/6520557cc3bfd198f960cc8cb6151c3474321bd8", "referenceCount": 0, "citationCount": 286, "influentialCitationCount": 58, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Tracking the perspectives of interacting language models", "abstract": "Large language models (LLMs) are capable of producing high quality information at unprecedented rates. As these models continue to entrench themselves in society, the content they produce will become increasingly pervasive in databases that are, in turn, incorporated into the pre-training data, fine-tuning data, retrieval data, etc. of other language models. In this paper we formalize the idea of a communication network of LLMs and introduce a method for representing the perspective of individual models within a collection of LLMs. Given these tools we systematically study information diffusion in the communication network of LLMs in various simulated settings.", "year": 2024, "venue": "arXiv.org", "authors": [ "Hayden S. Helm", "Brandon Duderstadt", "Youngser Park", "Carey E. Priebe" ], "externalIds": { "ArXiv": "2406.11938", "DBLP": "journals/corr/abs-2406-11938", "DOI": "10.48550/arXiv.2406.11938", "CorpusId": 270562000 }, "url": "https://www.semanticscholar.org/paper/8aadf59ac62821683a5475a45cbc1a89e372027a", "referenceCount": 42, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Continuous Multidimensional Scaling", "abstract": "Multidimensional scaling (MDS) is the act of embedding proximity information about a set of $n$ objects in $d$-dimensional Euclidean space. As originally conceived by the psychometric community, MDS was concerned with embedding a fixed set of proximities associated with a fixed set of objects. Modern concerns, e.g., that arise in developing asymptotic theories for statistical inference on random graphs, more typically involve studying the limiting behavior of a sequence of proximities associated with an increasing set of objects. Standard results from the theory of point-to-set maps imply that, if $n$ is fixed and a sequence of proximities converges, then the limit of the embedded structures is the embedded structure of the limiting proximities. But what if $n$ increases? It then becomes necessary to reformulate MDS so that the entire sequence of embedding problems can be viewed as a sequence of optimization problems in a fixed space. We present such a reformulation and derive some consequences.", "year": 2024, "venue": "arXiv.org", "authors": [ "M. Trosset", "Carey E. Priebe" ], "externalIds": { "DBLP": "journals/corr/abs-2402-04436", "ArXiv": "2402.04436", "DOI": "10.48550/arXiv.2402.04436", "CorpusId": 267522816 }, "url": "https://www.semanticscholar.org/paper/5b60ed985272d70e3ddaa553362fbd09977ccd2e", "referenceCount": 13, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Nomic Embed: Training a Reproducible Long Context Text Embedder", "abstract": "This technical report describes the training of nomic-embed-text-v1, the first fully reproducible, open-source, open-weights, open-data, 8192 context length English text embedding model that outperforms both OpenAI Ada-002 and OpenAI text-embedding-3-small on short and long-context tasks. We release the training code and model weights under an Apache 2 license. In contrast with other open-source models, we release a training data loader with 235 million curated text pairs that allows for the full replication of nomic-embed-text-v1. You can find code and data to replicate the model at https://github.com/nomic-ai/contrastors", "year": 2024, "venue": "arXiv.org", "authors": [ "Zach Nussbaum", "John X. Morris", "Brandon Duderstadt", "Andriy Mulyar" ], "externalIds": { "ArXiv": "2402.01613", "DBLP": "journals/corr/abs-2402-01613", "DOI": "10.48550/arXiv.2402.01613", "CorpusId": 267406738 }, "url": "https://www.semanticscholar.org/paper/03bdd9cbb3b768ff3e96c97b28e106748b6e4fd0", "referenceCount": 66, "citationCount": 37, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Comparing Foundation Models using Data Kernels", "abstract": "Recent advances in self-supervised learning and neural network scaling have enabled the creation of large models, known as foundation models, which can be easily adapted to a wide range of downstream tasks. The current paradigm for comparing foundation models involves evaluating them with aggregate metrics on various benchmark datasets. This method of model comparison is heavily dependent on the chosen evaluation metric, which makes it unsuitable for situations where the ideal metric is either not obvious or unavailable. In this work, we present a methodology for directly comparing the embedding space geometry of foundation models, which facilitates model comparison without the need for an explicit evaluation metric. Our methodology is grounded in random graph theory and enables valid hypothesis testing of embedding similarity on a per-datum basis. Further, we demonstrate how our methodology can be extended to facilitate population level model comparison. In particular, we show how our framework can induce a manifold of models equipped with a distance function that correlates strongly with several downstream metrics. We remark on the utility of this population level model comparison as a first step towards a taxonomic science of foundation models.", "year": 2023, "venue": "arXiv.org", "authors": [ "Brandon Duderstadt", "Hayden S. Helm", "Carey E. Priebe" ], "externalIds": { "ArXiv": "2305.05126", "DBLP": "journals/corr/abs-2305-05126", "DOI": "10.48550/arXiv.2305.05126", "CorpusId": 258564434 }, "url": "https://www.semanticscholar.org/paper/4df06525ed45f0c72d9cea99b74670d85a703470", "referenceCount": 31, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Perspectives on Large Language Models for Relevance Judgment", "abstract": "When asked, large language models~(LLMs) like ChatGPT claim that they can assist with relevance judgments but it is not clear whether automated judgments can reliably be used in evaluations of retrieval systems. In this perspectives paper, we discuss possible ways for~LLMs to support relevance judgments along with concerns and issues that arise. We devise a human--machine collaboration spectrum that allows to categorize different relevance judgment strategies, based on how much humans rely on machines. For the extreme point of 'fully automated judgments', we further include a pilot experiment on whether LLM-based relevance judgments correlate with judgments from trained human assessors. We conclude the paper by providing opposing perspectives for and against the use of~LLMs for automatic relevance judgments, and a compromise perspective, informed by our analyses of the literature, our preliminary experimental evidence, and our experience as IR~researchers.", "year": 2023, "venue": "International Conference on the Theory of Information Retrieval", "authors": [ "G. Faggioli", "Laura Dietz", "C. Clarke", "Gianluca Demartini", "Matthias Hagen", "C. Hauff", "N. Kando", "E. Kanoulas", "Martin Potthast", "Benno Stein", "Henning Wachsmuth" ], "externalIds": { "DBLP": "journals/corr/abs-2304-09161", "ArXiv": "2304.09161", "DOI": "10.1145/3578337.3605136", "CorpusId": 258187001 }, "url": "https://www.semanticscholar.org/paper/c8b271d6bf6f3906edb012c62a5ba1193e9c74ae", "referenceCount": 98, "citationCount": 68, "influentialCitationCount": 10, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Planning with Large Language Models for Code Generation", "abstract": "Existing large language model-based code generation pipelines typically use beam search or sampling algorithms during the decoding process. Although the programs they generate achieve high token-matching-based scores, they often fail to compile or generate incorrect outputs. The main reason is that conventional Transformer decoding algorithms may not be the best choice for code generation. In this work, we propose a novel Transformer decoding algorithm, Planning-Guided Transformer Decoding (PG-TD), that uses a planning algorithm to do lookahead search and guide the Transformer to generate better programs. Specifically, instead of simply optimizing the likelihood of the generated sequences, the Transformer makes use of a planner to generate candidate programs and test them on public test cases. The Transformer can therefore make more informed decisions and generate tokens that will eventually lead to higher-quality programs. We also design a mechanism that shares information between the Transformer and the planner to make our algorithm computationally efficient. We empirically evaluate our framework with several large language models as backbones on public coding challenge benchmarks, showing that 1) it can generate programs that consistently achieve higher performance compared with competing baseline methods; 2) it enables controllable code generation, such as concise codes and highly-commented codes by optimizing modified objective.", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Shun Zhang", "Zhenfang Chen", "Yikang Shen", "Mingyu Ding", "J. Tenenbaum", "Chuang Gan" ], "externalIds": { "DBLP": "journals/corr/abs-2303-05510", "ArXiv": "2303.05510", "DOI": "10.48550/arXiv.2303.05510", "CorpusId": 257427177 }, "url": "https://www.semanticscholar.org/paper/407b9e9478ba6bff43ce4b20e8b6cb2b303477d2", "referenceCount": 48, "citationCount": 75, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Make-A-Video: Text-to-Video Generation without Text-Video Data", "abstract": "We propose Make-A-Video -- an approach for directly translating the tremendous recent progress in Text-to-Image (T2I) generation to Text-to-Video (T2V). Our intuition is simple: learn what the world looks like and how it is described from paired text-image data, and learn how the world moves from unsupervised video footage. Make-A-Video has three advantages: (1) it accelerates training of the T2V model (it does not need to learn visual and multimodal representations from scratch), (2) it does not require paired text-video data, and (3) the generated videos inherit the vastness (diversity in aesthetic, fantastical depictions, etc.) of today's image generation models. We design a simple yet effective way to build on T2I models with novel and effective spatial-temporal modules. First, we decompose the full temporal U-Net and attention tensors and approximate them in space and time. Second, we design a spatial temporal pipeline to generate high resolution and frame rate videos with a video decoder, interpolation model and two super resolution models that can enable various applications besides T2V. In all aspects, spatial and temporal resolution, faithfulness to text, and quality, Make-A-Video sets the new state-of-the-art in text-to-video generation, as determined by both qualitative and quantitative measures.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Uriel Singer", "Adam Polyak", "Thomas Hayes", "Xiaoyue Yin", "Jie An", "Songyang Zhang", "Qiyuan Hu", "Harry Yang", "Oron Ashual", "Oran Gafni", "Devi Parikh", "Sonal Gupta", "Yaniv Taigman" ], "externalIds": { "DBLP": "conf/iclr/SingerPH00ZHYAG23", "ArXiv": "2209.14792", "CorpusId": 252595919 }, "url": "https://www.semanticscholar.org/paper/1e33716e8820b867d5a8aaebab44c2d3135ea4ac", "referenceCount": 51, "citationCount": 907, "influentialCitationCount": 77, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "VQGAN-CLIP: Open Domain Image Generation and Editing with Natural Language Guidance", "abstract": "Generating and editing images from open domain text prompts is a challenging task that heretofore has required expensive and specially trained models. We demonstrate a novel methodology for both tasks which is capable of producing images of high visual quality from text prompts of significant semantic complexity without any training by using a multimodal encoder to guide image generations. We demonstrate on a variety of tasks how using CLIP [37] to guide VQGAN [11] produces higher visual quality outputs than prior, less flexible approaches like DALL-E [38], GLIDE [33] and Open-Edit [24], despite not being trained for the tasks presented. Our code is available in a public repository.", "year": 2022, "venue": "European Conference on Computer Vision", "authors": [ "Katherine Crowson", "Stella Biderman", "Daniel Kornis", "Dashiell Stander", "Eric Hallahan", "Louis Castricato", "Edward Raff" ], "externalIds": { "DBLP": "journals/corr/abs-2204-08583", "ArXiv": "2204.08583", "DOI": "10.48550/arXiv.2204.08583", "CorpusId": 248239727 }, "url": "https://www.semanticscholar.org/paper/6979ce65b9f657672cd3a0b9217ead51511c1838", "referenceCount": 57, "citationCount": 321, "influentialCitationCount": 30, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Mental State Classification Using Multi-Graph Features", "abstract": "We consider the problem of extracting features from passive, multi-channel electroencephalogram (EEG) devices for downstream inference tasks related to high-level mental states such as stress and cognitive load. Our proposed feature extraction method uses recently developed spectral-based multi-graph tools and applies them to the time series of graphs implied by the statistical dependence structure (e.g., correlation) amongst the multiple sensors. We study the features in the context of two datasets each consisting of at least 30 participants and recorded using multi-channel EEG systems. We compare the classification performance of a classifier trained on the proposed features to a classifier trained on the traditional band power-based features in three settings and find that the two feature sets offer complementary predictive information. We conclude by showing that the importance of particular channels and pairs of channels for classification when using the proposed features is neuroscientifically valid.", "year": 2022, "venue": "Frontiers in Human Neuroscience", "authors": [ "Guodong Chen", "Hayden S. Helm", "Kate Lytvynets", "Weiwei Yang", "Carey E. Priebe" ], "externalIds": { "DBLP": "journals/corr/abs-2203-00516", "PubMedCentral": "9307990", "ArXiv": "2203.00516", "DOI": "10.3389/fnhum.2022.930291", "CorpusId": 247187688, "PubMed": "35880106" }, "url": "https://www.semanticscholar.org/paper/4fbfafb3db146e554301b0d1aeda0e8873dbfaa4", "referenceCount": 47, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Engineering", "Computer Science", "Mathematics" ] }, { "title": "Text and Code Embeddings by Contrastive Pre-Training", "abstract": "Text embeddings are useful features in many applications such as semantic search and computing text similarity. Previous work typically trains models customized for different use cases, varying in dataset choice, training objective and model architecture. In this work, we show that contrastive pre-training on unsupervised data at scale leads to high quality vector representations of text and code. The same unsupervised text embeddings that achieve new state-of-the-art results in linear-probe classification also display impressive semantic search capabilities and sometimes even perform competitively with fine-tuned models. On linear-probe classification accuracy averaging over 7 tasks, our best unsupervised model achieves a relative improvement of 4% and 1.8% over previous best unsupervised and supervised text embedding models respectively. The same text embeddings when evaluated on large-scale semantic search attains a relative improvement of 23.4%, 14.7%, and 10.6% over previous best unsupervised methods on MSMARCO, Natural Questions and TriviaQA benchmarks, respectively. Similarly to text embeddings, we train code embedding models on (text, code) pairs, obtaining a 20.8% relative improvement over prior best work on code search.", "year": 2022, "venue": "arXiv.org", "authors": [ "Arvind Neelakantan", "Tao Xu", "Raul Puri", "Alec Radford", "Jesse Michael Han", "Jerry Tworek", "Qiming Yuan", "N. Tezak", "Jong Wook Kim", "Chris Hallacy", "Johannes Heidecke", "Pranav Shyam", "Boris Power", "Tyna Eloundou Nekoul", "Girish Sastry", "Gretchen Krueger", "D. Schnurr", "F. Such", "K. Hsu", "Madeleine Thompson", "Tabarak Khan", "Toki Sherbakov", "Joanne Jang", "P. Welinder", "Lilian Weng" ], "externalIds": { "ArXiv": "2201.10005", "DBLP": "journals/corr/abs-2201-10005", "CorpusId": 246275593 }, "url": "https://www.semanticscholar.org/paper/6d7d4fca9840504f630e9bea6acaa07322a6e889", "referenceCount": 87, "citationCount": 316, "influentialCitationCount": 30, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Multitask Prompted Training Enables Zero-Shot Task Generalization", "abstract": "Large language models have recently been shown to attain reasonable zero-shot generalization on a diverse set of tasks (Brown et al., 2020). It has been hypothesized that this is a consequence of implicit multitask learning in language models' pretraining (Radford et al., 2019). Can zero-shot generalization instead be directly induced by explicit multitask learning? To test this question at scale, we develop a system for easily mapping any natural language tasks into a human-readable prompted form. We convert a large set of supervised datasets, each with multiple prompts with diverse wording. These prompted datasets allow for benchmarking the ability of a model to perform completely held-out tasks. We fine-tune a pretrained encoder-decoder model (Raffel et al., 2020; Lester et al., 2021) on this multitask mixture covering a wide variety of tasks. The model attains strong zero-shot performance on several standard datasets, often outperforming models up to 16x its size. Further, our approach attains strong performance on a subset of tasks from the BIG-bench benchmark, outperforming models up to 6x its size. All trained models are available at https://github.com/bigscience-workshop/t-zero and all prompts are available at https://github.com/bigscience-workshop/promptsource.", "year": 2021, "venue": "International Conference on Learning Representations", "authors": [ "Victor Sanh", "Albert Webson", "Colin Raffel", "Stephen H. Bach", "Lintang Sutawika", "Zaid Alyafeai", "Antoine Chaffin", "Arnaud Stiegler", "Teven Le Scao", "Arun Raja", "Manan Dey", "M Saiful Bari", "Canwen Xu", "Urmish Thakker", "Shanya Sharma", "Eliza Szczechla", "Taewoon Kim", "Gunjan Chhablani", "Nihal V. Nayak", "Debajyoti Datta", "Jonathan D. Chang", "Mike Tian-Jian Jiang", "Han Wang", "Matteo Manica", "Sheng Shen", "Zheng-Xin Yong", "Harshit Pandey", "Rachel Bawden", "Thomas Wang", "Trishala Neeraj", "Jos Rozen", "Abheesht Sharma", "Andrea Santilli", "Thibault Févry", "Jason Alan Fries", "Ryan Teehan", "Stella Biderman", "Leo Gao", "T. Bers", "Thomas Wolf", "Alexander M. Rush" ], "externalIds": { "ArXiv": "2110.08207", "DBLP": "conf/iclr/SanhWRBSACSRDBX22", "CorpusId": 239009562 }, "url": "https://www.semanticscholar.org/paper/17dd3555fd1ccf1141cf984347fa1b3fd6b009ca", "referenceCount": 0, "citationCount": 1475, "influentialCitationCount": 131, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Inducing a hierarchy for multi-class classification problems", "abstract": "In applications where categorical labels follow a natural hierarchy, classification methods that exploit the label structure often outperform those that do not. Un-fortunately, the majority of classification datasets do not come pre-equipped with a hierarchical structure and classical flat classifiers must be employed. In this paper, we investigate a class of methods that induce a hierarchy that can similarly improve classification performance over flat classifiers. The class of methods follows the structure of first clustering the conditional distributions and subsequently using a hierarchical classifier with the induced hierarchy. We demonstrate the effectiveness of the class of methods both for discovering a latent hierarchy and for improving accuracy in principled simulation settings and three real data applications.", "year": 2021, "venue": "arXiv.org", "authors": [ "Hayden S. Helm", "Weiwei Yang", "Sujeeth Bharadwaj", "Kate Lytvynets", "Oriana Riva", "Christopher M. White", "Ali Geisa", "C. Priebe" ], "externalIds": { "ArXiv": "2102.10263", "DBLP": "journals/corr/abs-2102-10263", "CorpusId": 231986089 }, "url": "https://www.semanticscholar.org/paper/1b219e6a8fb98d5677369079b2b3164e9e1b5f4a", "referenceCount": 42, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Variability and heritability of mouse brain structure: Microscopic MRI atlases and connectomes for diverse strains", "abstract": null, "year": 2020, "venue": "NeuroImage", "authors": [ "Nian Wang", "R. J. Anderson", "D. Ashbrook", "Vivek Gopalakrishnan", "Youngser Park", "Carey E. Priebe", "Y. Qi", "Rick Laoprasert", "J. Vogelstein", "Robert W. Williams", "G. A. Johnson" ], "externalIds": { "MAG": "3078197095", "DBLP": "journals/neuroimage/WangAAGPPQLVWJ20", "DOI": "10.1016/j.neuroimage.2020.117274", "CorpusId": 221142151, "PubMed": "32818613" }, "url": "https://www.semanticscholar.org/paper/2f7cccbfa3553eb42f58d1b0ef4887a9c429896b", "referenceCount": 60, "citationCount": 34, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Biology", "Computer Science" ] }, { "title": "Language Models are Few-Shot Learners", "abstract": "Recent work has demonstrated substantial gains on many NLP tasks and benchmarks by pre-training on a large corpus of text followed by fine-tuning on a specific task. While typically task-agnostic in architecture, this method still requires task-specific fine-tuning datasets of thousands or tens of thousands of examples. By contrast, humans can generally perform a new language task from only a few examples or from simple instructions - something which current NLP systems still largely struggle to do. Here we show that scaling up language models greatly improves task-agnostic, few-shot performance, sometimes even reaching competitiveness with prior state-of-the-art fine-tuning approaches. Specifically, we train GPT-3, an autoregressive language model with 175 billion parameters, 10x more than any previous non-sparse language model, and test its performance in the few-shot setting. For all tasks, GPT-3 is applied without any gradient updates or fine-tuning, with tasks and few-shot demonstrations specified purely via text interaction with the model. GPT-3 achieves strong performance on many NLP datasets, including translation, question-answering, and cloze tasks, as well as several tasks that require on-the-fly reasoning or domain adaptation, such as unscrambling words, using a novel word in a sentence, or performing 3-digit arithmetic. At the same time, we also identify some datasets where GPT-3's few-shot learning still struggles, as well as some datasets where GPT-3 faces methodological issues related to training on large web corpora. Finally, we find that GPT-3 can generate samples of news articles which human evaluators have difficulty distinguishing from articles written by humans. We discuss broader societal impacts of this finding and of GPT-3 in general.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Tom B. Brown", "Benjamin Mann", "Nick Ryder", "Melanie Subbiah", "J. Kaplan", "Prafulla Dhariwal", "Arvind Neelakantan", "Pranav Shyam", "Girish Sastry", "Amanda Askell", "Sandhini Agarwal", "Ariel Herbert-Voss", "Gretchen Krueger", "T. Henighan", "R. Child", "A. Ramesh", "Daniel M. Ziegler", "Jeff Wu", "Clemens Winter", "Christopher Hesse", "Mark Chen", "Eric Sigler", "Ma-teusz Litwin", "Scott Gray", "B. Chess", "Jack Clark", "Christopher Berner", "Sam McCandlish", "Alec Radford", "I. Sutskever", "Dario Amodei" ], "externalIds": { "ArXiv": "2005.14165", "DBLP": "conf/nips/BrownMRSKDNSSAA20", "MAG": "3030163527", "CorpusId": 218971783 }, "url": "https://www.semanticscholar.org/paper/90abbc2cf38462b954ae1b772fac9532e2ccd8b0", "referenceCount": 146, "citationCount": 30854, "influentialCitationCount": 3529, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", "abstract": "BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new state-of-the-art performance on sentence-pair regression tasks like semantic textual similarity (STS). However, it requires that both sentences are fed into the network, which causes a massive computational overhead: Finding the most similar pair in a collection of 10,000 sentences requires about 50 million inference computations (~65 hours) with BERT. The construction of BERT makes it unsuitable for semantic similarity search as well as for unsupervised tasks like clustering. In this publication, we present Sentence-BERT (SBERT), a modification of the pretrained BERT network that use siamese and triplet network structures to derive semantically meaningful sentence embeddings that can be compared using cosine-similarity. This reduces the effort for finding the most similar pair from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while maintaining the accuracy from BERT. We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning tasks, where it outperforms other state-of-the-art sentence embeddings methods.", "year": 2019, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Nils Reimers", "Iryna Gurevych" ], "externalIds": { "DBLP": "journals/corr/abs-1908-10084", "ACL": "D19-1410", "ArXiv": "1908.10084", "MAG": "2971193649", "DOI": "10.18653/v1/D19-1410", "CorpusId": 201646309 }, "url": "https://www.semanticscholar.org/paper/93d63ec754f29fa22572615320afe0521f7ec66d", "referenceCount": 38, "citationCount": 9373, "influentialCitationCount": 1441, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "GraSPy: Graph Statistics in Python", "abstract": "We introduce GraSPy, a Python library devoted to statistical inference, machine learning, and visualization of random graphs and graph populations. This package provides flexible and easy-to-use algorithms for analyzing and understanding graphs with a scikit-learn compliant API. GraSPy can be downloaded from Python Package Index (PyPi), and is released under the Apache 2.0 open-source license. The documentation and all releases are available at this https URL.", "year": 2019, "venue": "Journal of machine learning research", "authors": [ "Jaewon Chung", "B. Pedigo", "Eric W. Bridgeford", "B. Varjavand", "Hayden S. Helm", "J. Vogelstein" ], "externalIds": { "DBLP": "journals/jmlr/ChungPBVHV19", "MAG": "2987403824", "ArXiv": "1904.05329", "CorpusId": 131774186 }, "url": "https://www.semanticscholar.org/paper/85cb7f9164310fcf51207e314e159ee8d149838f", "referenceCount": 26, "citationCount": 33, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Deep Voice: Real-time Neural Text-to-Speech", "abstract": "We present Deep Voice, a production-quality text-to-speech system constructed entirely from deep neural networks. Deep Voice lays the groundwork for truly end-to-end neural speech synthesis. The system comprises five major building blocks: a segmentation model for locating phoneme boundaries, a grapheme-to-phoneme conversion model, a phoneme duration prediction model, a fundamental frequency prediction model, and an audio synthesis model. For the segmentation model, we propose a novel way of performing phoneme boundary detection with deep neural networks using connectionist temporal classification (CTC) loss. For the audio synthesis model, we implement a variant of WaveNet that requires fewer parameters and trains faster than the original. By using a neural network for each component, our system is simpler and more flexible than traditional text-to-speech systems, where each component requires laborious feature engineering and extensive domain expertise. Finally, we show that inference with our system can be performed faster than real time and describe optimized WaveNet inference kernels on both CPU and GPU that achieve up to 400x speedups over existing implementations.", "year": 2017, "venue": "International Conference on Machine Learning", "authors": [ "Sercan Ö. Arik", "Mike Chrzanowski", "Adam Coates", "G. Diamos", "Andrew Gibiansky", "Yongguo Kang", "Xian Li", "John Miller", "Andrew Ng", "Jonathan Raiman", "Shubho Sengupta", "M. Shoeybi" ], "externalIds": { "MAG": "2964281804", "DBLP": "conf/icml/ArikCCDGKLMNRSS17", "ArXiv": "1702.07825", "CorpusId": 5580515 }, "url": "https://www.semanticscholar.org/paper/63880b57b95de8afd73036e55b9c4bccb7a528b9", "referenceCount": 30, "citationCount": 583, "influentialCitationCount": 38, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Mathematical Statistics : Basic Ideas and Selected Topics, Volumes I-II Package", "abstract": null, "year": 2015, "venue": "", "authors": [ "P. Bickel", "K. Doksum" ], "externalIds": { "MAG": "2889151444", "DOI": "10.1201/9781315369266", "CorpusId": 227008753 }, "url": "https://www.semanticscholar.org/paper/67d5f03c4921e75a9c0dd5bd699ad74512e52eb8", "referenceCount": 0, "citationCount": 88, "influentialCitationCount": 16, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Efficient Estimation of Word Representations in Vector Space", "abstract": "We propose two novel model architectures for computing continuous vector\nrepresentations of words from very large data sets. The quality of these\nrepresentations is measured in a word similarity task, and the results are\ncompared to the previously best performing techniques based on different types\nof neural networks. We observe large improvements in accuracy at much lower\ncomputational cost, i.e. it takes less than a day to learn high quality word\nvectors from a 1.6 billion words data set. Furthermore, we show that these\nvectors provide state-of-the-art performance on our test set for measuring\nsyntactic and semantic word similarities.", "year": 2013, "venue": "International Conference on Learning Representations", "authors": [ "Tomas Mikolov", "Kai Chen", "G. Corrado", "J. Dean" ], "externalIds": { "MAG": "2950577311", "DBLP": "journals/corr/abs-1301-3781", "ArXiv": "1301.3781", "CorpusId": 5959482 }, "url": "https://www.semanticscholar.org/paper/f6b51c8753a871dc94ff32152c00c01e94f90f09", "referenceCount": 36, "citationCount": 29655, "influentialCitationCount": 4069, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Modern Multidimensional Scaling: Theory and Applications", "abstract": "Fundamentals of MDS.- The Four Purposes of Multidimensional Scaling.- Constructing MDS Representations.- MDS Models and Measures of Fit.- Three Applications of MDS.- MDS and Facet Theory.- How to Obtain Proximities.- MDS Models and Solving MDS Problems.- Matrix Algebra for MDS.- A Majorization Algorithm for Solving MDS.- Metric and Nonmetric MDS.- Confirmatory MDS.- MDS Fit Measures, Their Relations, and Some Algorithms.- Classical Scaling.- Special Solutions, Degeneracies, and Local Minima.- Unfolding.- Unfolding.- Avoiding Trivial Solutions in Unfolding.- Special Unfolding Models.- MDS Geometry as a Substantive Model.- MDS as a Psychological Model.- Scalar Products and Euclidean Distances.- Euclidean Embeddings.- MDS and Related Methods.- Procrustes Procedures.- Three-Way Procrustean Models.- Three-Way MDS Models.- Modeling Asymmetric Data.- Methods Related to MDS.", "year": 1999, "venue": "", "authors": [ "I. Borg", "P. Groenen" ], "externalIds": { "MAG": "2067752346", "DOI": "10.1111/J.1745-3984.2003.TB01108.X", "CorpusId": 123350066 }, "url": "https://www.semanticscholar.org/paper/e854885e41b6754177d0892129a8215b7e467c23", "referenceCount": 4, "citationCount": 4166, "influentialCitationCount": 416, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Multidimensional scaling by optimizing goodness of fit to a nonmetric hypothesis", "abstract": null, "year": 1964, "venue": "", "authors": [ "J. Kruskal" ], "externalIds": { "MAG": "2152825437", "DOI": "10.1007/BF02289565", "CorpusId": 48165675 }, "url": "https://www.semanticscholar.org/paper/e04108dc293c9cd7cabf32ee1524eaab0d4641b3", "referenceCount": 19, "citationCount": 7143, "influentialCitationCount": 681, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "A Survey of Text Representation and Embedding Techniques in NLP", "abstract": "Natural Language Processing (NLP) is a research field where a language in consideration is processed to understand its syntactic, semantic, and sentimental aspects. The advancement in the NLP area has helped solve problems in the domains such as Neural Machine Translation, Name Entity Recognition, Sentiment Analysis, and Chatbots, to name a few. The topic of NLP broadly consists of two main parts: the representation of the input text (raw data) into numerical format (vectors or matrix) and the design of models for processing the numerical data. This paper focuses on the former part and surveys how the NLP field has evolved from rule-based, statistical to more context-sensitive learned representations. For each embedding type, we list their representation, issues they addressed, limitations, and applications. This survey covers the history of text representations from the 1970s and onwards, from regular expressions to the latest vector representations used to encode the raw text data. It demonstrates how the NLP field progressed from where it could comprehend just bits and pieces to all the significant aspects of the text over time.", "year": 2023, "venue": "IEEE Access", "authors": [ "Rajvardhan Patil", "Sorio Boit", "V. Gudivada", "Jagadeesh Nandigam" ], "externalIds": { "DBLP": "journals/access/PatilBGN23", "DOI": "10.1109/ACCESS.2023.3266377", "CorpusId": 258088263 }, "url": "https://www.semanticscholar.org/paper/9159408884cbe7f5f7a79d90c9f91ba5cee0d932", "referenceCount": 126, "citationCount": 39, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Procrustes methods in the statistical analysis of shape", "abstract": "Procrustes Methods in the Statistical Analysis of Shape. Journal of the Royal Statistical Society. Basic techniques such as Procrustes analysis, tan.", "year": 1991, "venue": "", "authors": [ "C. Goodall" ], "externalIds": { "MAG": "33507944", "DOI": "10.1111/J.2517-6161.1991.TB01825.X", "CorpusId": 53315995 }, "url": "https://www.semanticscholar.org/paper/b0114a99504a294a70c21bcf8c96a68a5faf3428", "referenceCount": 88, "citationCount": 1981, "influentialCitationCount": 164, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] } ] }, "DreamWaltz-G: Expressive 3D Gaussian Avatars from Skeleton-Guided 2D Diffusion": { "paper_title": "DreamWaltz-G: Expressive 3D Gaussian Avatars from Skeleton-Guided 2D Diffusion", "arxiv_id": "2409.17145v1", "keyword": "diffusion model", "authors": [ "Yukun Huang", "Jianan Wang", "Ailing Zeng", "Zheng-Jun Zha", "Lei Zhang", "Xihui Liu" ], "references": [ { "title": "Expressive Whole-Body 3D Gaussian Avatar", "abstract": "Facial expression and hand motions are necessary to express our emotions and interact with the world. Nevertheless, most of the 3D human avatars modeled from a casually captured video only support body motions without facial expressions and hand motions.In this work, we present ExAvatar, an expressive whole-body 3D human avatar learned from a short monocular video. We design ExAvatar as a combination of the whole-body parametric mesh model (SMPL-X) and 3D Gaussian Splatting (3DGS). The main challenges are 1) a limited diversity of facial expressions and poses in the video and 2) the absence of 3D observations, such as 3D scans and RGBD images. The limited diversity in the video makes animations with novel facial expressions and poses non-trivial. In addition, the absence of 3D observations could cause significant ambiguity in human parts that are not observed in the video, which can result in noticeable artifacts under novel motions. To address them, we introduce our hybrid representation of the mesh and 3D Gaussians. Our hybrid representation treats each 3D Gaussian as a vertex on the surface with pre-defined connectivity information (i.e., triangle faces) between them following the mesh topology of SMPL-X. It makes our ExAvatar animatable with novel facial expressions by driven by the facial expression space of SMPL-X. In addition, by using connectivity-based regularizers, we significantly reduce artifacts in novel facial expressions and poses.", "year": 2024, "venue": "arXiv.org", "authors": [ "Gyeongsik Moon", "Takaaki Shiratori", "Shunsuke Saito" ], "externalIds": { "DBLP": "journals/corr/abs-2407-21686", "ArXiv": "2407.21686", "DOI": "10.48550/arXiv.2407.21686", "CorpusId": 271571347 }, "url": "https://www.semanticscholar.org/paper/b1830f1ff837af325a015db38e374eec5e76260a", "referenceCount": 54, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "PhysAvatar: Learning the Physics of Dressed 3D Avatars from Visual Observations", "abstract": "Modeling and rendering photorealistic avatars is of crucial importance in many applications. Existing methods that build a 3D avatar from visual observations, however, struggle to reconstruct clothed humans. We introduce PhysAvatar, a novel framework that combines inverse rendering with inverse physics to automatically estimate the shape and appearance of a human from multi-view video data along with the physical parameters of the fabric of their clothes. For this purpose, we adopt a mesh-aligned 4D Gaussian technique for spatio-temporal mesh tracking as well as a physically based inverse renderer to estimate the intrinsic material properties. PhysAvatar integrates a physics simulator to estimate the physical parameters of the garments using gradient-based optimization in a principled manner. These novel capabilities enable PhysAvatar to create high-quality novel-view renderings of avatars dressed in loose-fitting clothes under motions and lighting conditions not seen in the training data. This marks a significant advancement towards modeling photorealistic digital humans using physically based inverse rendering with physics in the loop. Our project website is at: https://qingqing-zhao.github.io/PhysAvatar", "year": 2024, "venue": "arXiv.org", "authors": [ "Yang Zheng", "Qingqing Zhao", "Guandao Yang", "Wang Yifan", "Donglai Xiang", "Florian Dubost", "Dmitry Lagun", "T. Beeler", "Federico Tombari", "Leonidas Guibas", "Gordon Wetzstein" ], "externalIds": { "DBLP": "journals/corr/abs-2404-04421", "ArXiv": "2404.04421", "DOI": "10.48550/arXiv.2404.04421", "CorpusId": 269005823 }, "url": "https://www.semanticscholar.org/paper/286e137bd617d1c29e7760ec5273212c7d29f07a", "referenceCount": 131, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "UV Gaussians: Joint Learning of Mesh Deformation and Gaussian Textures for Human Avatar Modeling", "abstract": "Reconstructing photo-realistic drivable human avatars from multi-view image sequences has been a popular and challenging topic in the field of computer vision and graphics. While existing NeRF-based methods can achieve high-quality novel view rendering of human models, both training and inference processes are time-consuming. Recent approaches have utilized 3D Gaussians to represent the human body, enabling faster training and rendering. However, they undermine the importance of the mesh guidance and directly predict Gaussians in 3D space with coarse mesh guidance. This hinders the learning procedure of the Gaussians and tends to produce blurry textures. Therefore, we propose UV Gaussians, which models the 3D human body by jointly learning mesh deformations and 2D UV-space Gaussian textures. We utilize the embedding of UV map to learn Gaussian textures in 2D space, leveraging the capabilities of powerful 2D networks to extract features. Additionally, through an independent Mesh network, we optimize pose-dependent geometric deformations, thereby guiding Gaussian rendering and significantly enhancing rendering quality. We collect and process a new dataset of human motion, which includes multi-view images, scanned models, parametric model registration, and corresponding texture maps. Experimental results demonstrate that our method achieves state-of-the-art synthesis of novel view and novel pose. The code and data will be made available on the homepage https://alex-jyj.github.io/UV-Gaussians/ once the paper is accepted.", "year": 2024, "venue": "arXiv.org", "authors": [ "Yujiao Jiang", "Qingmin Liao", "Xiaoyu Li", "Li Ma", "Qi Zhang", "Chaopeng Zhang", "Zongqing Lu", "Ying Shan" ], "externalIds": { "ArXiv": "2403.11589", "DBLP": "journals/corr/abs-2403-11589", "DOI": "10.48550/arXiv.2403.11589", "CorpusId": 268531576 }, "url": "https://www.semanticscholar.org/paper/895e3dcdb5a755a06e2f17329cf70cfecb254b30", "referenceCount": 51, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Scaling Rectified Flow Transformers for High-Resolution Image Synthesis", "abstract": "Diffusion models create data from noise by inverting the forward paths of data towards noise and have emerged as a powerful generative modeling technique for high-dimensional, perceptual data such as images and videos. Rectified flow is a recent generative model formulation that connects data and noise in a straight line. Despite its better theoretical properties and conceptual simplicity, it is not yet decisively established as standard practice. In this work, we improve existing noise sampling techniques for training rectified flow models by biasing them towards perceptually relevant scales. Through a large-scale study, we demonstrate the superior performance of this approach compared to established diffusion formulations for high-resolution text-to-image synthesis. Additionally, we present a novel transformer-based architecture for text-to-image generation that uses separate weights for the two modalities and enables a bidirectional flow of information between image and text tokens, improving text comprehension, typography, and human preference ratings. We demonstrate that this architecture follows predictable scaling trends and correlates lower validation loss to improved text-to-image synthesis as measured by various metrics and human evaluations. Our largest models outperform state-of-the-art models, and we will make our experimental data, code, and model weights publicly available.", "year": 2024, "venue": "International Conference on Machine Learning", "authors": [ "Patrick Esser", "Sumith Kulal", "A. Blattmann", "Rahim Entezari", "Jonas Muller", "Harry Saini", "Yam Levi", "Dominik Lorenz", "Axel Sauer", "Frederic Boesel", "Dustin Podell", "Tim Dockhorn", "Zion English", "Kyle Lacey", "Alex Goodwin", "Yannik Marek", "Robin Rombach" ], "externalIds": { "ArXiv": "2403.03206", "DBLP": "conf/icml/EsserKBEMSLLSBP24", "DOI": "10.48550/arXiv.2403.03206", "CorpusId": 268247980 }, "url": "https://www.semanticscholar.org/paper/41a66997ce0a366bba3becf7c3f37c9aebb13fbd", "referenceCount": 75, "citationCount": 188, "influentialCitationCount": 27, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "GaMeS: Mesh-Based Adapting and Modification of Gaussian Splatting", "abstract": "Recently, a range of neural network-based methods for image rendering have been introduced. One such widely-researched neural radiance field (NeRF) relies on a neural network to represent 3D scenes, allowing for realistic view synthesis from a small number of 2D images. However, most NeRF models are constrained by long training and inference times. In comparison, Gaussian Splatting (GS) is a novel, state-of-the-art technique for rendering points in a 3D scene by approximating their contribution to image pixels through Gaussian distributions, warranting fast training and swift, real-time rendering. A drawback of GS is the absence of a well-defined approach for its conditioning due to the necessity to condition several hundred thousand Gaussian components. To solve this, we introduce the Gaussian Mesh Splatting (GaMeS) model, which allows modification of Gaussian components in a similar way as meshes. We parameterize each Gaussian component by the vertices of the mesh face. Furthermore, our model needs mesh initialization on input or estimated mesh during training. We also define Gaussian splats solely based on their location on the mesh, allowing for automatic adjustments in position, scale, and rotation during animation. As a result, we obtain a real-time rendering of editable GS.", "year": 2024, "venue": "arXiv.org", "authors": [ "Joanna Waczy'nska", "Piotr Borycki", "S. Tadeja", "Jacek Tabor", "P. Spurek" ], "externalIds": { "ArXiv": "2402.01459", "DBLP": "journals/corr/abs-2402-01459", "DOI": "10.48550/arXiv.2402.01459", "CorpusId": 267406241 }, "url": "https://www.semanticscholar.org/paper/a667fb494417045b5d205e8ef497728eae6fc23c", "referenceCount": 28, "citationCount": 15, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "GAvatar: Animatable 3D Gaussian Avatars with Implicit Mesh Learning", "abstract": "Gaussian splatting has emerged as a powerful 3D representation that harnesses the advantages of both explicit (mesh) and implicit (NeRF) 3D representations. In this paper, we seek to leverage Gaussian splatting to generate realistic animatable avatars from textual descriptions, addressing the limitations (e.g., flexibility and efficiency) imposed by mesh or NeRF-based representations. However, a naive application of Gaussian splatting cannot generate high-quality animatable avatars and suffers from learning instability; it also cannot capture fine avatar geometries and often leads to degenerate body parts. To tackle these problems, we first propose a primitive-based 3D Gaussian representation where Gaussians are defined inside pose-driven primitives to facilitate animation. Second, to stabilize and amortize the learning of millions of Gaussians, we propose to use neural implicit fields to predict the Gaussian attributes (e.g., colors). Finally, to capture fine avatar geometries and extract detailed meshes, we propose a novel SDF-based implicit mesh learning approach for 3D Gaussians that regularizes the underlying geometries and extracts highly detailed textured meshes. Our proposed method, GAvatar, enables the large-scale generation of diverse animatable avatars using only text prompts. GAvatar significantly surpasses existing methods in terms of both appearance and geometry quality, and achieves extremely fast rendering (100 fps) at 1K resolution.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Ye Yuan", "Xueting Li", "Yangyi Huang", "Shalini De Mello", "Koki Nagano", "Jan Kautz", "Umar Iqbal" ], "externalIds": { "DBLP": "journals/corr/abs-2312-11461", "ArXiv": "2312.11461", "DOI": "10.1109/CVPR52733.2024.00091", "CorpusId": 266359217 }, "url": "https://www.semanticscholar.org/paper/b564d3673bbe1258b77dab5b6b4144bb40fdd399", "referenceCount": 51, "citationCount": 21, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "3DGS-Avatar: Animatable Avatars via Deformable 3D Gaussian Splatting", "abstract": "We introduce an approach that creates animatable hu-man avatars from monocular videos using 3D Gaussian Splatting (3DGS). Existing methods based on neural radi-ance fields (NeRFs) achieve high-quality novel-viewlnovel-pose image synthesis but often require days of training, and are extremely slow at inference time. Recently, the com-munity has explored fast grid structures for efficient training of clothed avatars. Albeit being extremely fast at training, these methods can barely achieve an interactive ren-de ring frame rate with around 15 FPS. In this paper, we use 3D Gaussian Splatting and learn a non-rigid deformation network to reconstruct animatable clothed human avatars that can be trained within 30 minutes and rendered at real-time frame rates (50+ FPS). Given the explicit nature of our representation, we further introduce as-isometric-as-possible regularizations on both the Gaussian mean vectors and the covariance matrices, enhancing the generalization of our model on highly articulated unseen poses. Experi-mental results show that our method achieves comparable and even better performance compared to state-of-the-art approaches on animatable avatar creation from a monoc-ular input, while being 400x and 250x faster in training and inference, respectively. Please see our project page at https://neuralbodies.github.ioI3DGS-Avatar.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Zhiyin Qian", "Shaofei Wang", "Marko Mihajlovic", "Andreas Geiger", "Siyu Tang" ], "externalIds": { "DBLP": "journals/corr/abs-2312-09228", "ArXiv": "2312.09228", "DOI": "10.1109/CVPR52733.2024.00480", "CorpusId": 266210317 }, "url": "https://www.semanticscholar.org/paper/fa35524739d5c60d94befc3f8e77488b4dd810db", "referenceCount": 80, "citationCount": 43, "influentialCitationCount": 9, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "GaussianAvatar: Towards Realistic Human Avatar Modeling from a Single Video via Animatable 3D Gaussians", "abstract": "We present GaussianAvatar, an efficient approach to cre-ating realistic human avatars with dynamic 3D appear-ances from a single video. We start by introducing animat-able 3D Gaussians to explicitly represent humans in var-ious poses and clothing styles. Such an explicit and ani-matable representation can fuse 3D appearances more effi-ciently and consistently from 2D observations. Our repre-sentation is further augmented with dynamic properties to support pose-dependent appearance modeling, where a dy-namic appearance network along with an optimizable feature tensor is designed to learn the motion-to-appearance mapping. Moreover, by leveraging the differentiable motion condition, our method enables a joint optimization of motions and appearances during avatar modeling, which helps to tackle the long-standing issue of inaccurate motion esti-mation in monocular settings. The efficacy of GaussianA-vatar is validated on both the public dataset and our col-lected dataset, demonstrating its superior performances in terms of appearance quality and rendering efficiency. The code and dataset are available at https://github.com/aipixel/GaussianAvatar.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Liangxiao Hu", "Hongwen Zhang", "Yuxiang Zhang", "Boyao Zhou", "Boning Liu", "Shengping Zhang", "Liqiang Nie" ], "externalIds": { "DBLP": "journals/corr/abs-2312-02134", "ArXiv": "2312.02134", "DOI": "10.1109/CVPR52733.2024.00067", "CorpusId": 265608952 }, "url": "https://www.semanticscholar.org/paper/64cce98e9a091a28008deb767b00018735d8bac9", "referenceCount": 73, "citationCount": 42, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "HumanGaussian: Text-Driven 3D Human Generation with Gaussian Splatting", "abstract": "Realistic 3D human generationfrom text prompts is a de-sirable yet challenging task. Existing methods optimize 3D representations like mesh or neural fields via score distil-lation sampling (SDS), which suffers from inadequate fine details or excessive training time. In this paper, we pro-pose an efficient yet effective framework, HumanGaussian, that generates high-quality 3D humans with fine-grained geometry and realistic appearance. Our key insight is that 3D Gaussian Splatting is an efficient renderer with peri-odic Gaussian shrinkage or growing, where such adaptive density control can be naturally guided by intrinsic human structures. Specifically, 1) we first propose a Structure-Aware SDS that simultaneously optimizes human appear-ance and geometry. The multi-modal score function from both RGB and depth space is leveraged to distill the Gaus-sian densification and pruning process. 2) Moreover, we devise an Annealed Negative Prompt Guidance by decom-posing SDS into a noisier generative score and a cleaner classifier score, which well addresses the over-saturation issue. The floating artifacts are further eliminated based on Gaussian size in a prune-only phase to enhance generation smoothness. Extensive experiments demonstrate the supe-rior efficiency and competitive quality of our framework, rendering vivid 3D humans under diverse scenarios.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Xian Liu", "Xiaohang Zhan", "Jiaxiang Tang", "Ying Shan", "Gang Zeng", "Dahua Lin", "Xihui Liu", "Ziwei Liu" ], "externalIds": { "ArXiv": "2311.17061", "DBLP": "journals/corr/abs-2311-17061", "DOI": "10.1109/CVPR52733.2024.00635", "CorpusId": 265466220 }, "url": "https://www.semanticscholar.org/paper/7665642af9e682e012bec045102a4d009421067c", "referenceCount": 102, "citationCount": 45, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "SuGaR: Surface-Aligned Gaussian Splatting for Efficient 3D Mesh Reconstruction and High-Quality Mesh Rendering", "abstract": "We propose a method to allow precise and extremely fast mesh extraction from 3D Gaussian Splatting [15]. Gaussian Splatting has recently become very popular as it yields realistic rendering while being significantly faster to train than NeRFs. It is however challenging to extract a mesh from the millions of tiny 3D Gaussians as these Gaussians tend to be unorganized after optimization and no method has been proposed so far. Our first key contribution is a regularization term that encourages the Gaussians to align well with the surface of the scene. We then introduce a method that exploits this alignment to extract a mesh from the Gaussians using Poisson reconstruction, which is fast, scalable, and preserves details, in contrast to the Marching Cubes algorithm usually applied to extract meshes from Neural SDFs. Finally, we introduce an optional refinement strategy that binds Gaussians to the surface of the mesh, and jointly optimizes these Gaussians and the mesh through Gaussian splatting rendering. This enables easy editing, sculpting, animating, and relighting of the Gaussians by manipulating the mesh instead of the Gaussians themselves. Retrieving such an editable mesh for realistic rendering is done within minutes with our method, compared to hours with the state-of-the-art method on SDFs, while providing a better rendering quality.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Antoine Gu'edon", "Vincent Lepetit" ], "externalIds": { "ArXiv": "2311.12775", "DBLP": "journals/corr/abs-2311-12775", "DOI": "10.1109/CVPR52733.2024.00512", "CorpusId": 265308825 }, "url": "https://www.semanticscholar.org/paper/e3f80d950e6f841bd7eea4c24d4e1e5aa2bd85c7", "referenceCount": 46, "citationCount": 138, "influentialCitationCount": 32, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "LucidDreamer: Towards High-Fidelity Text-to-3D Generation via Interval Score Matching", "abstract": "The recent advancements in text-to-3D generation mark a significant milestone in generative models, unlocking new possibilities for creating imaginative 3D assets across var-ious real-world scenarios. While recent advancements in text-to-3D generation have shown promise, they often fall short in rendering detailed and high-quality 3D models. This problem is especially prevalent as many methods base themselves on Score Distillation Sampling (SDS). This paper identifies a notable deficiency in SDS, that it brings inconsistent and low-quality updating direction for the 3D model, causing the over-smoothing effect. To address this, we propose a novel approach called Interval Score Matching (ISM). ISM employs deterministic diffusing trajectories and utilizes interval-based score matching to counteract over-smoothing. Furthermore, we incorporate 3D Gaussian Splatting into our text-to-3D generation pipeline. Extensive experiments show that our model largely outperforms the state-of-the-art in quality and training efficiency. Our code is available at: EnVision-Research/LucidDreamer", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Yixun Liang", "Xin Yang", "Jiantao Lin", "Haodong Li", "Xiaogang Xu", "Yingcong Chen" ], "externalIds": { "DBLP": "journals/corr/abs-2311-11284", "ArXiv": "2311.11284", "DOI": "10.1109/CVPR52733.2024.00623", "CorpusId": 265295106 }, "url": "https://www.semanticscholar.org/paper/6f709278506813d04a074e6fa20188cce9bb927b", "referenceCount": 50, "citationCount": 88, "influentialCitationCount": 23, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Drivable 3D Gaussian Avatars", "abstract": "We present Drivable 3D Gaussian Avatars (D3GA), the first 3D controllable model for human bodies rendered with Gaussian splats. Current photorealistic drivable avatars require either accurate 3D registrations during training, dense input images during testing, or both. The ones based on neural radiance fields also tend to be prohibitively slow for telepresence applications. This work uses the recently presented 3D Gaussian Splatting (3DGS) technique to render realistic humans at real-time framerates, using dense calibrated multi-view videos as input. To deform those primitives, we depart from the commonly used point deformation method of linear blend skinning (LBS) and use a classic volumetric deformation method: cage deformations. Given their smaller size, we drive these deformations with joint angles and keypoints, which are more suitable for communication applications. Our experiments on nine subjects with varied body shapes, clothes, and motions obtain higher-quality results than state-of-the-art methods when using the same training and test data.", "year": 2023, "venue": "arXiv.org", "authors": [ "Wojciech Zielonka", "Timur M. Bagautdinov", "Shunsuke Saito", "Michael Zollhofer", "Justus Thies", "Javier Romero" ], "externalIds": { "ArXiv": "2311.08581", "DBLP": "journals/corr/abs-2311-08581", "DOI": "10.48550/arXiv.2311.08581", "CorpusId": 265213240 }, "url": "https://www.semanticscholar.org/paper/cad7ec01f4acb2ea4260b8bb7f8de920f538eaff", "referenceCount": 66, "citationCount": 53, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Text-to-3D with Classifier Score Distillation", "abstract": "Text-to-3D generation has made remarkable progress recently, particularly with methods based on Score Distillation Sampling (SDS) that leverages pre-trained 2D diffusion models. While the usage of classifier-free guidance is well acknowledged to be crucial for successful optimization, it is considered an auxiliary trick rather than the most essential component. In this paper, we re-evaluate the role of classifier-free guidance in score distillation and discover a surprising finding: the guidance alone is enough for effective text-to-3D generation tasks. We name this method Classifier Score Distillation (CSD), which can be interpreted as using an implicit classification model for generation. This new perspective reveals new insights for understanding existing techniques. We validate the effectiveness of CSD across a variety of text-to-3D tasks including shape generation, texture synthesis, and shape editing, achieving results superior to those of state-of-the-art methods. Our project page is https://xinyu-andy.github.io/Classifier-Score-Distillation", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Xin Yu", "Yuanchen Guo", "Yangguang Li", "Ding Liang", "Song-Hai Zhang", "Xiaojuan Qi" ], "externalIds": { "DBLP": "conf/iclr/0004GLLZ024", "ArXiv": "2310.19415", "DOI": "10.48550/arXiv.2310.19415", "CorpusId": 264825424 }, "url": "https://www.semanticscholar.org/paper/4e21879b564cc2e803b16edf0dda9f1edb91b497", "referenceCount": 40, "citationCount": 49, "influentialCitationCount": 10, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Noise-Free Score Distillation", "abstract": "Score Distillation Sampling (SDS) has emerged as the de facto approach for text-to-content generation in non-image domains. In this paper, we reexamine the SDS process and introduce a straightforward interpretation that demystifies the necessity for large Classifier-Free Guidance (CFG) scales, rooted in the distillation of an undesired noise term. Building upon our interpretation, we propose a novel Noise-Free Score Distillation (NFSD) process, which requires minimal modifications to the original SDS framework. Through this streamlined design, we achieve more effective distillation of pre-trained text-to-image diffusion models while using a nominal CFG scale. This strategic choice allows us to prevent the over-smoothing of results, ensuring that the generated data is both realistic and complies with the desired prompt. To demonstrate the efficacy of NFSD, we provide qualitative examples that compare NFSD and SDS, as well as several other methods.", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Oren Katzir", "Or Patashnik", "D. Cohen-Or", "D. Lischinski" ], "externalIds": { "DBLP": "conf/iclr/KatzirPCL24", "ArXiv": "2310.17590", "DOI": "10.48550/arXiv.2310.17590", "CorpusId": 264490431 }, "url": "https://www.semanticscholar.org/paper/85a70c0a048cba4f53dcf332ee73f6032a2e53bc", "referenceCount": 33, "citationCount": 45, "influentialCitationCount": 9, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "HyperHuman: Hyper-Realistic Human Generation with Latent Structural Diffusion", "abstract": "Despite significant advances in large-scale text-to-image models, achieving hyper-realistic human image generation remains a desirable yet unsolved task. Existing models like Stable Diffusion and DALL-E 2 tend to generate human images with incoherent parts or unnatural poses. To tackle these challenges, our key insight is that human image is inherently structural over multiple granularities, from the coarse-level body skeleton to fine-grained spatial geometry. Therefore, capturing such correlations between the explicit appearance and latent structure in one model is essential to generate coherent and natural human images. To this end, we propose a unified framework, HyperHuman, that generates in-the-wild human images of high realism and diverse layouts. Specifically, 1) we first build a large-scale human-centric dataset, named HumanVerse, which consists of 340M images with comprehensive annotations like human pose, depth, and surface normal. 2) Next, we propose a Latent Structural Diffusion Model that simultaneously denoises the depth and surface normal along with the synthesized RGB image. Our model enforces the joint learning of image appearance, spatial relationship, and geometry in a unified network, where each branch in the model complements to each other with both structural awareness and textural richness. 3) Finally, to further boost the visual quality, we propose a Structure-Guided Refiner to compose the predicted conditions for more detailed generation of higher resolution. Extensive experiments demonstrate that our framework yields the state-of-the-art performance, generating hyper-realistic human images under diverse scenarios. Project Page: https://snap-research.github.io/HyperHuman/", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Xian Liu", "Jian Ren", "Aliaksandr Siarohin", "Ivan Skorokhodov", "Yanyu Li", "Dahua Lin", "Xihui Liu", "Ziwei Liu", "S. Tulyakov" ], "externalIds": { "ArXiv": "2310.08579", "DBLP": "conf/iclr/LiuRSSLLL0T24", "DOI": "10.48550/arXiv.2310.08579", "CorpusId": 263909070 }, "url": "https://www.semanticscholar.org/paper/61b160196ccf662f3da7a5ff6e6c8b5d3c35af1a", "referenceCount": 72, "citationCount": 30, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "HumanNorm: Learning Normal Diffusion Model for High-quality and Realistic 3D Human Generation", "abstract": "Recent text-to-3D methods employing diffusion models have made significant advancements in 3D human generation. However, these approaches face challenges due to the limitations of text-to-image diffusion models, which lack an understanding of 3D structures. Consequently, these methods struggle to achieve high-quality human generation, resulting in smooth geometry and cartoon-like appearances. In this paper, we propose HumanNorm, a novel approach for high-quality and realistic 3D human generation. The main idea is to enhance the model's 2D perception of 3D geometry by learning a normal-adapted diffusion model and a normal-aligned diffusion model. The normal-adapted diffusion model can generate high-fidelity normal maps corresponding to user prompts with view-dependent and body-aware text. The normal-aligned diffusion model learns to generate color images aligned with the normal maps, thereby transforming physical geometry details into realistic appearance. Leveraging the proposed normal diffusion model, we devise a progressive geometry generation strategy and a multi-step Score Distillation Sampling (SDS) loss to enhance the performance of 3D human generation. Comprehensive experiments substantiate HumanNorm's ability to generate 3D humans with intricate geometry and realistic appearances. HumanNorm outperforms existing text-to-3D methods in both geometry and texture quality. The project page of HumanNorm is https://humannorm.github.io/.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Xin Huang", "Ruizhi Shao", "Qi Zhang", "Hongwen Zhang", "Yingfa Feng", "Yebin Liu", "Qing Wang" ], "externalIds": { "DBLP": "journals/corr/abs-2310-01406", "ArXiv": "2310.01406", "DOI": "10.1109/CVPR52733.2024.00437", "CorpusId": 263605906 }, "url": "https://www.semanticscholar.org/paper/04d9cc34fdd128ae94819a5a0bcbc99cc16e1f38", "referenceCount": 63, "citationCount": 40, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DreamGaussian: Generative Gaussian Splatting for Efficient 3D Content Creation", "abstract": "Recent advances in 3D content creation mostly leverage optimization-based 3D generation via score distillation sampling (SDS). Though promising results have been exhibited, these methods often suffer from slow per-sample optimization, limiting their practical usage. In this paper, we propose DreamGaussian, a novel 3D content generation framework that achieves both efficiency and quality simultaneously. Our key insight is to design a generative 3D Gaussian Splatting model with companioned mesh extraction and texture refinement in UV space. In contrast to the occupancy pruning used in Neural Radiance Fields, we demonstrate that the progressive densification of 3D Gaussians converges significantly faster for 3D generative tasks. To further enhance the texture quality and facilitate downstream applications, we introduce an efficient algorithm to convert 3D Gaussians into textured meshes and apply a fine-tuning stage to refine the details. Extensive experiments demonstrate the superior efficiency and competitive generation quality of our proposed approach. Notably, DreamGaussian produces high-quality textured meshes in just 2 minutes from a single-view image, achieving approximately 10 times acceleration compared to existing methods.", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Jiaxiang Tang", "Jiawei Ren", "Hang Zhou", "Ziwei Liu", "Gang Zeng" ], "externalIds": { "DBLP": "conf/iclr/TangRZ0Z24", "ArXiv": "2309.16653", "DOI": "10.48550/arXiv.2309.16653", "CorpusId": 263131552 }, "url": "https://www.semanticscholar.org/paper/cc1a674bb164d09a060cf5b26fe518c02fae0ddc", "referenceCount": 76, "citationCount": 349, "influentialCitationCount": 49, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deformable 3D Gaussians for High-Fidelity Monocular Dynamic Scene Reconstruction", "abstract": "Implicit neural representation has paved the way for new approaches to dynamic scene reconstruction. Nonetheless, cutting-edge dynamic neural rendering methods rely heavily on these implicit representations, which frequently struggle to capture the intricate details of objects in the scene. Furthermore, implicit methods have difficulty achieving real-time rendering in general dynamic scenes, limiting their use in a variety of tasks. To address the issues, we propose a deformable 3D Gaussians splatting method that reconstructs scenes using 3D Gaussians and learns them in canonical space with a deformation field to model monocular dynamic scenes. We also introduce an annealing smoothing training mechanism with no extra overhead, which can mitigate the impact of inaccurate poses on the smoothness of time interpolation tasks in real-world scenes. Through a differential Gaussian rasterizer, the deformable 3D Gaussians not only achieve higher rendering quality but also real-time rendering speed. Experiments show that our method outperforms existing methods significantly in terms of both rendering quality and speed, making it well-suited for tasks such as novel-view synthesis, time interpolation, and real-time rendering. Our code is available at https://github.com/ingra14m/Deformable-3D-Gaussians.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Ziyi Yang", "Xinyu Gao", "Wenming Zhou", "Shaohui Jiao", "Yuqing Zhang", "Xiaogang Jin" ], "externalIds": { "DBLP": "journals/corr/abs-2309-13101", "ArXiv": "2309.13101", "DOI": "10.1109/CVPR52733.2024.01922", "CorpusId": 262466218 }, "url": "https://www.semanticscholar.org/paper/1ce81d64eefe5915c6ef9719915efa5f4079a6c1", "referenceCount": 62, "citationCount": 182, "influentialCitationCount": 21, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "TADA! Text to Animatable Digital Avatars", "abstract": "We introduce TADA, a simple-yet-effective approach that takes textual descriptions and produces expressive 3D avatars with high-quality geometry and lifelike textures, that can be animated and rendered with traditional graphics pipelines. Existing text-based character generation methods are limited in terms of geometry and texture quality, and cannot be realistically animated due to the misalignment between the geometry and the texture, particularly in the face region. To address these limitations, TADA leverages the synergy of a 2D diffusion model and a parametric body model. Specifically, we derive a high-resolution upsampled version of SMPL-X with a displacement layer and a texture map, and use hierarchical rendering with score distillation sampling (SDS) to create high-quality, detailed, holistic 3D avatars from text. To ensure alignment between the geometry and texture, we render normals and RGB images of the generated character and exploit their latent embeddings during the SDS optimization process. We further drive the character’s face with multiple expressions during optimization, ensuring that its semantics remain consistent with the original SMPL-X model. Both qualitative and quantitative evaluations show that TADA significantly surpasses existing approaches. TADA enables large-scale creation of digital characters ready for animation and rendering, while also enabling text-guided editing. The code is public for research purposes at tada.is.tue.mpg.de", "year": 2023, "venue": "International Conference on 3D Vision", "authors": [ "Tingting Liao", "Hongwei Yi", "Yuliang Xiu", "Jiaxaing Tang", "Yangyi Huang", "Justus Thies", "Michael J. Black" ], "externalIds": { "DBLP": "journals/corr/abs-2308-10899", "ArXiv": "2308.10899", "DOI": "10.1109/3DV62453.2024.00150", "CorpusId": 261064940 }, "url": "https://www.semanticscholar.org/paper/303f466fb823112f79a9f36637c7084dd8363fc5", "referenceCount": 92, "citationCount": 65, "influentialCitationCount": 18, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "AvatarVerse: High-quality & Stable 3D Avatar Creation from Text and Pose", "abstract": "Creating expressive, diverse and high-quality 3D avatars from highly customized text descriptions and pose guidance is a challenging task, due to the intricacy of modeling and texturing in 3D that ensure details and various styles (realistic, fictional, etc). We present AvatarVerse, a stable pipeline for generating expressive high-quality 3D avatars from nothing but text descriptions and pose guidance. In specific, we introduce a 2D diffusion model conditioned on DensePose signal to establish 3D pose control of avatars through 2D images, which enhances view consistency from partially observed scenarios. It addresses the infamous Janus Problem and significantly stablizes the generation process. Moreover, we propose a progressive high-resolution 3D synthesis strategy, which obtains substantial improvement over the quality of the created 3D avatars. To this end, the proposed AvatarVerse pipeline achieves zero-shot 3D modeling of 3D avatars that are not only more expressive, but also in higher quality and fidelity than previous works. Rigorous qualitative evaluations and user studies showcase AvatarVerse's superiority in synthesizing high-fidelity 3D avatars, leading to a new standard in high-quality and stable 3D avatar creation. Our project page is: https://avatarverse3d.github.io/ .", "year": 2023, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Huichao Zhang", "Bo Chen", "Hao Yang", "Liao Qu", "Xu Wang", "Li Chen", "Chao Long", "Feida Zhu", "Kang Du", "Minghang Zheng" ], "externalIds": { "ArXiv": "2308.03610", "DBLP": "journals/corr/abs-2308-03610", "DOI": "10.48550/arXiv.2308.03610", "CorpusId": 260681445 }, "url": "https://www.semanticscholar.org/paper/d3fd513594cd2e4cce10b50eb7ea16760b63a2b8", "referenceCount": 42, "citationCount": 39, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "SMPL: A Skinned Multi-Person Linear Model", "abstract": "We present a learned model of human body shape and posedependent shape variation that is more accurate than previous models and is compatible with existing graphics pipelines. Our Skinned Multi-Person Linear model (SMPL) is a skinned vertexbased model that accurately represents a wide variety of body shapes in natural human poses. The parameters of the model are learned from data including the rest pose template, blend weights, pose-dependent blend shapes, identity-dependent blend shapes, and a regressor from vertices to joint locations. Unlike previous models, the pose-dependent blend shapes are a linear function of the elements of the pose rotation matrices. This simple formulation enables training the entire model from a relatively large number of aligned 3D meshes of different people in different poses. We quantitatively evaluate variants of SMPL using linear or dual-quaternion blend skinning and show that both are more accurate than a Blend- SCAPE model trained on the same data. We also extend SMPL to realistically model dynamic soft-tissue deformations. Because it is based on blend skinning, SMPL is compatible with existing rendering engines and we make it available for research purposes.", "year": 2023, "venue": "", "authors": [ "M. Loper", "Naureen Mahmood", "J. Romero", "Gerard Pons-Moll", "Michael J. Black" ], "externalIds": { "DOI": "10.1145/3596711.3596800", "CorpusId": 5328073 }, "url": "https://www.semanticscholar.org/paper/32d3048a4fe4becc7c4638afd05f2354b631cfca", "referenceCount": 43, "citationCount": 2980, "influentialCitationCount": 532, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "3D Gaussian Splatting for Real-Time Radiance Field Rendering", "abstract": "Radiance Field methods have recently revolutionized novel-view synthesis of scenes captured with multiple photos or videos. However, achieving high visual quality still requires neural networks that are costly to train and render, while recent faster methods inevitably trade off speed for quality. For unbounded and complete scenes (rather than isolated objects) and 1080p resolution rendering, no current method can achieve real-time display rates. We introduce three key elements that allow us to achieve state-of-the-art visual quality while maintaining competitive training times and importantly allow high-quality real-time (≥ 30 fps) novel-view synthesis at 1080p resolution. First, starting from sparse points produced during camera calibration, we represent the scene with 3D Gaussians that preserve desirable properties of continuous volumetric radiance fields for scene optimization while avoiding unnecessary computation in empty space; Second, we perform interleaved optimization/density control of the 3D Gaussians, notably optimizing anisotropic covariance to achieve an accurate representation of the scene; Third, we develop a fast visibility-aware rendering algorithm that supports anisotropic splatting and both accelerates training and allows realtime rendering. We demonstrate state-of-the-art visual quality and real-time rendering on several established datasets.", "year": 2023, "venue": "ACM Transactions on Graphics", "authors": [ "B. Kerbl", "Georgios Kopanas", "Thomas Leimkuehler", "G. Drettakis" ], "externalIds": { "DBLP": "journals/tog/KerblKLD23", "ArXiv": "2308.04079", "DOI": "10.1145/3592433", "CorpusId": 259267917 }, "url": "https://www.semanticscholar.org/paper/2cc1d857e86d5152ba7fe6a8355c2a0150cc280a", "referenceCount": 60, "citationCount": 1458, "influentialCitationCount": 579, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "SDXL: Improving Latent Diffusion Models for High-Resolution Image Synthesis", "abstract": "We present SDXL, a latent diffusion model for text-to-image synthesis. Compared to previous versions of Stable Diffusion, SDXL leverages a three times larger UNet backbone: The increase of model parameters is mainly due to more attention blocks and a larger cross-attention context as SDXL uses a second text encoder. We design multiple novel conditioning schemes and train SDXL on multiple aspect ratios. We also introduce a refinement model which is used to improve the visual fidelity of samples generated by SDXL using a post-hoc image-to-image technique. We demonstrate that SDXL shows drastically improved performance compared the previous versions of Stable Diffusion and achieves results competitive with those of black-box state-of-the-art image generators. In the spirit of promoting open research and fostering transparency in large model training and evaluation, we provide access to code and model weights at https://github.com/Stability-AI/generative-models", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Dustin Podell", "Zion English", "Kyle Lacey", "A. Blattmann", "Tim Dockhorn", "Jonas Muller", "Joe Penna", "Robin Rombach" ], "externalIds": { "DBLP": "conf/iclr/PodellELBDMPR24", "ArXiv": "2307.01952", "DOI": "10.48550/arXiv.2307.01952", "CorpusId": 259341735 }, "url": "https://www.semanticscholar.org/paper/d7890d1906d95c4ae4c430b350455156d6d8aed9", "referenceCount": 56, "citationCount": 999, "influentialCitationCount": 244, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Motion-X: A Large-scale 3D Expressive Whole-body Human Motion Dataset", "abstract": "In this paper, we present Motion-X, a large-scale 3D expressive whole-body motion dataset. Existing motion datasets predominantly contain body-only poses, lacking facial expressions, hand gestures, and fine-grained pose descriptions. Moreover, they are primarily collected from limited laboratory scenes with textual descriptions manually labeled, which greatly limits their scalability. To overcome these limitations, we develop a whole-body motion and text annotation pipeline, which can automatically annotate motion from either single- or multi-view videos and provide comprehensive semantic labels for each video and fine-grained whole-body pose descriptions for each frame. This pipeline is of high precision, cost-effective, and scalable for further research. Based on it, we construct Motion-X, which comprises 15.6M precise 3D whole-body pose annotations (i.e., SMPL-X) covering 81.1K motion sequences from massive scenes. Besides, Motion-X provides 15.6M frame-level whole-body pose descriptions and 81.1K sequence-level semantic labels. Comprehensive experiments demonstrate the accuracy of the annotation pipeline and the significant benefit of Motion-X in enhancing expressive, diverse, and natural motion generation, as well as 3D whole-body human mesh recovery.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Jing-de Lin", "Ailing Zeng", "Shunlin Lu", "Yuan-Yuan Cai", "Ruimao Zhang", "Haoqian Wang", "L. Zhang" ], "externalIds": { "ArXiv": "2307.00818", "DBLP": "conf/nips/LinZLCZWZ23", "DOI": "10.48550/arXiv.2307.00818", "CorpusId": 259316966 }, "url": "https://www.semanticscholar.org/paper/9f3a2c29413339c2ce01121ba701b02b239eef8c", "referenceCount": 90, "citationCount": 54, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DreamTime: An Improved Optimization Strategy for Diffusion-Guided 3D Generation", "abstract": "Text-to-image diffusion models pre-trained on billions of image-text pairs have recently enabled 3D content creation by optimizing a randomly initialized differentiable 3D representation with score distillation. However, the optimization process suffers slow convergence and the resultant 3D models often exhibit two limitations: (a) quality concerns such as missing attributes and distorted shape and texture; (b) extremely low diversity comparing to text-guided image synthesis. In this paper, we show that the conflict between the 3D optimization process and uniform timestep sampling in score distillation is the main reason for these limitations. To resolve this conflict, we propose to prioritize timestep sampling with monotonically non-increasing functions, which aligns the 3D optimization process with the sampling process of diffusion model. Extensive experiments show that our simple redesign significantly improves 3D content creation with faster convergence, better quality and diversity.", "year": 2023, "venue": "", "authors": [ "Yukun Huang", "Jianan Wang", "Yukai Shi", "Xianbiao Qi", "Zhengjun Zha", "Lei Zhang" ], "externalIds": { "ArXiv": "2306.12422", "CorpusId": 259212145 }, "url": "https://www.semanticscholar.org/paper/8dfe271d2186d5746d034b3cce12131f4d3f45f7", "referenceCount": 39, "citationCount": 56, "influentialCitationCount": 8, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DreamHuman: Animatable 3D Avatars from Text", "abstract": "We present DreamHuman, a method to generate realistic animatable 3D human avatar models solely from textual descriptions. Recent text-to-3D methods have made considerable strides in generation, but are still lacking in important aspects. Control and often spatial resolution remain limited, existing methods produce fixed rather than animated 3D human models, and anthropometric consistency for complex structures like people remains a challenge. DreamHuman connects large text-to-image synthesis models, neural radiance fields, and statistical human body models in a novel modeling and optimization framework. This makes it possible to generate dynamic 3D human avatars with high-quality textures and learned, instance-specific, surface deformations. We demonstrate that our method is capable to generate a wide variety of animatable, realistic 3D human models from text. Our 3D models have diverse appearance, clothing, skin tones and body shapes, and significantly outperform both generic text-to-3D approaches and previous text-based 3D avatar generators in visual fidelity. For more results and animations please check our website at https://dream-human.github.io.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Nikos Kolotouros", "Thiemo Alldieck", "Andrei Zanfir", "Eduard Gabriel Bazavan", "Mihai Fieraru", "C. Sminchisescu" ], "externalIds": { "DBLP": "conf/nips/KolotourosAZBFS23", "ArXiv": "2306.09329", "DOI": "10.48550/arXiv.2306.09329", "CorpusId": 259171750 }, "url": "https://www.semanticscholar.org/paper/1799398201d38f527cd0edcd23024b053984c4ee", "referenceCount": 63, "citationCount": 63, "influentialCitationCount": 14, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Complete 3D Human Reconstruction from a Single Incomplete Image", "abstract": "This paper presents a method to reconstruct a complete human geometry and texture from an image of a person with only partial body observed, e.g., a torso. The core challenge arises from the occlusion: there exists no pixel to reconstruct where many existing single-view human reconstruction methods are not designed to handle such invisible parts, leading to missing data in 3D. To address this challenge, we introduce a novel coarse-to-fine human reconstruction framework. For coarse reconstruction, explicit volumetric features are learned to generate a complete human geometry with 3D convolutional neural networks conditioned by a 3D body model and the style features from visible parts. An implicit network combines the learned 3D features with the high-quality surface normals enhanced from multiviews to produce fine local details, e.g., high-frequency wrinkles. Finally, we perform progressive texture inpainting to reconstruct a complete appearance of the person in a view-consistent way, which is not possible without the reconstruction of a complete geometry. In experiments, we demonstrate that our method can reconstruct high-quality 3D humans, which is robust to occlusion.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Junying Wang", "Jae Shin Yoon", "Tuanfeng Y. Wang", "Krishna Kumar Singh", "U. Neumann" ], "externalIds": { "DBLP": "conf/cvpr/WangYWSN23", "DOI": "10.1109/CVPR52729.2023.00845", "CorpusId": 259339401 }, "url": "https://www.semanticscholar.org/paper/b459724e17751de1c9e74ad1525d9d08e1aabe3b", "referenceCount": 63, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "HIFA: High-fidelity Text-to-3D Generation with Advanced Diffusion Guidance", "abstract": "The advancements in automatic text-to-3D generation have been remarkable. Most existing methods use pre-trained text-to-image diffusion models to optimize 3D representations like Neural Radiance Fields (NeRFs) via latent-space denoising score matching. Yet, these methods often result in artifacts and inconsistencies across different views due to their suboptimal optimization approaches and limited understanding of 3D geometry. Moreover, the inherent constraints of NeRFs in rendering crisp geometry and stable textures usually lead to a two-stage optimization to attain high-resolution details. This work proposes holistic sampling and smoothing approaches to achieve high-quality text-to-3D generation, all in a single-stage optimization. We compute denoising scores in the text-to-image diffusion model's latent and image spaces. Instead of randomly sampling timesteps (also referred to as noise levels in denoising score matching), we introduce a novel timestep annealing approach that progressively reduces the sampled timestep throughout optimization. To generate high-quality renderings in a single-stage optimization, we propose regularization for the variance of z-coordinates along NeRF rays. To address texture flickering issues in NeRFs, we introduce a kernel smoothing technique that refines importance sampling weights coarse-to-fine, ensuring accurate and thorough sampling in high-density regions. Extensive experiments demonstrate the superiority of our method over previous approaches, enabling the generation of highly detailed and view-consistent 3D assets through a single-stage training process.", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Junzhe Zhu", "Peiye Zhuang", "Oluwasanmi Koyejo" ], "externalIds": { "ArXiv": "2305.18766", "DBLP": "conf/iclr/ZhuZK24", "CorpusId": 258967476 }, "url": "https://www.semanticscholar.org/paper/daf3b117f789b2b95223e58592979fb57627515e", "referenceCount": 47, "citationCount": 44, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ProlificDreamer: High-Fidelity and Diverse Text-to-3D Generation with Variational Score Distillation", "abstract": "Score distillation sampling (SDS) has shown great promise in text-to-3D generation by distilling pretrained large-scale text-to-image diffusion models, but suffers from over-saturation, over-smoothing, and low-diversity problems. In this work, we propose to model the 3D parameter as a random variable instead of a constant as in SDS and present variational score distillation (VSD), a principled particle-based variational framework to explain and address the aforementioned issues in text-to-3D generation. We show that SDS is a special case of VSD and leads to poor samples with both small and large CFG weights. In comparison, VSD works well with various CFG weights as ancestral sampling from diffusion models and simultaneously improves the diversity and sample quality with a common CFG weight (i.e., $7.5$). We further present various improvements in the design space for text-to-3D such as distillation time schedule and density initialization, which are orthogonal to the distillation algorithm yet not well explored. Our overall approach, dubbed ProlificDreamer, can generate high rendering resolution (i.e., $512\\times512$) and high-fidelity NeRF with rich structure and complex effects (e.g., smoke and drops). Further, initialized from NeRF, meshes fine-tuned by VSD are meticulously detailed and photo-realistic. Project page and codes: https://ml.cs.tsinghua.edu.cn/prolificdreamer/", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Zhengyi Wang", "Cheng Lu", "Yikai Wang", "Fan Bao", "Chongxuan Li", "Hang Su", "Jun Zhu" ], "externalIds": { "ArXiv": "2305.16213", "DBLP": "conf/nips/Wang00BL0023", "DOI": "10.48550/arXiv.2305.16213", "CorpusId": 258887357 }, "url": "https://www.semanticscholar.org/paper/c5e9fd131cde68c218d0ea69cd617a67c7f35d42", "referenceCount": 57, "citationCount": 524, "influentialCitationCount": 106, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DreamWaltz: Make a Scene with Complex 3D Animatable Avatars", "abstract": "We present DreamWaltz, a novel framework for generating and animating complex 3D avatars given text guidance and parametric human body prior. While recent methods have shown encouraging results for text-to-3D generation of common objects, creating high-quality and animatable 3D avatars remains challenging. To create high-quality 3D avatars, DreamWaltz proposes 3D-consistent occlusion-aware Score Distillation Sampling (SDS) to optimize implicit neural representations with canonical poses. It provides view-aligned supervision via 3D-aware skeleton conditioning which enables complex avatar generation without artifacts and multiple faces. For animation, our method learns an animatable and generalizable avatar representation which could map arbitrary poses to the canonical pose representation. Extensive evaluations demonstrate that DreamWaltz is an effective and robust approach for creating 3D avatars that can take on complex shapes and appearances as well as novel poses for animation. The proposed framework further enables the creation of complex scenes with diverse compositions, including avatar-avatar, avatar-object and avatar-scene interactions. See https://dreamwaltz3d.github.io/ for more vivid 3D avatar and animation results.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Yukun Huang", "Jianan Wang", "Ailing Zeng", "He Cao", "Xianbiao Qi", "Yukai Shi", "Zhengjun Zha", "Lei Zhang" ], "externalIds": { "ArXiv": "2305.12529", "DBLP": "journals/corr/abs-2305-12529", "DOI": "10.48550/arXiv.2305.12529", "CorpusId": 258833547 }, "url": "https://www.semanticscholar.org/paper/7316596b1f02f288e3b76546d90646524e35fd40", "referenceCount": 51, "citationCount": 50, "influentialCitationCount": 12, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "HumanSD: A Native Skeleton-Guided Diffusion Model for Human Image Generation", "abstract": "Controllable human image generation (HIG) has numerous real-life applications. State-of-the-art solutions, such as ControlNet and T2I-Adapter, introduce an additional learnable branch on top of the frozen pre-trained stable diffusion (SD) model, which can enforce various conditions, including skeleton guidance of HIG. While such a plug-and-play approach is appealing, the inevitable and uncertain conflicts between the original images produced from the frozen SD branch and the given condition incur significant challenges for the learnable branch, which essentially conducts image feature editing for condition enforcement.In this work, we propose a native skeleton-guided diffusion model for controllable HIG called HumanSD. Instead of performing image editing with dual-branch diffusion, we fine-tune the original SD model using a novel heatmap-guided denoising loss. This strategy effectively and efficiently strengthens the given skeleton condition during model training while mitigating the catastrophic forgetting effects. HumanSD is fine-tuned on the assembly of three large-scale human-centric datasets with text-image-pose information, two of which are established in this work. Experimental results show that HumanSD outperforms ControlNet in terms of pose control and image quality, particularly when the given skeleton guidance is sophisticated. Code and data are available at: https://idea-research.github.io/HumanSD/.", "year": 2023, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Xu Ju", "Ailing Zeng", "Chenchen Zhao", "Jianan Wang", "Lei Zhang", "Qian Xu" ], "externalIds": { "ArXiv": "2304.04269", "DBLP": "journals/corr/abs-2304-04269", "DOI": "10.1109/ICCV51070.2023.01465", "CorpusId": 258048904 }, "url": "https://www.semanticscholar.org/paper/1ea2140567bbed461c19ff02d0dd193c6709f4da", "referenceCount": 59, "citationCount": 50, "influentialCitationCount": 8, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MonoHuman: Animatable Human Neural Field from Monocular Video", "abstract": "Animating virtual avatars with free-view control is crucial for various applications like virtual reality and digital entertainment. Previous studies have attempted to utilize the representation power of the neural radiance field (NeRF) to reconstruct the human body from monocular videos. Recent works propose to graft a deformation network into the NeRF to further model the dynamics of the human neural field for animating vivid human motions. However, such pipelines either rely on pose-dependent representations or fall short of motion coherency due to frameindependent optimization, making it difficult to generalize to unseen pose sequences realistically. In this paper, we propose a novel framework MonoHuman, which robustly renders view-consistent and high-fidelity avatars under arbitrary novel poses. Our key insight is to model the deformation field with bi-directional constraints and explicitly leverage the off-the-peg keyframe information to reason the feature correlations for coherent results. Specifically, we first propose a Shared Bidirectional Deformation module, which creates a pose-independent generalizable deformation field by disentangling backward and forward deformation correspondences into shared skeletal motion weight and separate non-rigid motions. Then, we devise a Forward Correspondence Search module, which queries the correspondence feature of keyframes to guide the rendering network. The rendered results are thus multi-view consistent with high fidelity, even under challenging novel pose settings. Extensive experiments demonstrate the superiority of our proposed MonoHuman over state-of-the-art methods.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Zhengming Yu", "W. Cheng", "Xian Liu", "Wayne Wu", "Kwan-Yee Lin" ], "externalIds": { "ArXiv": "2304.02001", "DBLP": "journals/corr/abs-2304-02001", "DOI": "10.1109/CVPR52729.2023.01625", "CorpusId": 257921835 }, "url": "https://www.semanticscholar.org/paper/b5fb909d436856ba7c4d5e15bfdb83a847e7ff8a", "referenceCount": 66, "citationCount": 50, "influentialCitationCount": 9, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DreamAvatar: Text-and-Shape Guided 3D Human Avatar Generation via Diffusion Models", "abstract": "We present DreamAvatar, a text-and-shape guided framework for generating high-quality 3D human avatars with controllable poses. While encouraging results have been reported by recent methods on text-guided 3D common object generation, generating high-quality human avatars remains an open challenge due to the complexity of the human body's shape, pose, and appearance. We propose DreamAvatar to tackle this challenge, which utilizes a train-able NeRF for predicting density and color for 3D points and pretrained text-to-image diffusion models for providing 2D self-supervision. Specifically, we leverage the SMPL model to provide shape and pose guidance for the generation. We introduce a dual-observation-space design that involves the joint optimization of a canonical space and a posed space that are related by a learnable deformation field. This facilitates the generation of more complete textures and geometry faithful to the target pose. We also jointly optimize the losses computed from the full body and from the zoomed-in 3D head to alleviate the common multi-face “Janus” problem and improve facial details in the generated avatars. Extensive evaluations demonstrate that DreamAvatar significantly outperforms existing meth-ods, establishing a new state-of-the-art for text-and-shape guided 3D human avatar generation.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Yukang Cao", "Yan-Pei Cao", "Kai Han", "Ying Shan", "Kwan-Yee Kenneth Wong" ], "externalIds": { "DBLP": "journals/corr/abs-2304-00916", "ArXiv": "2304.00916", "DOI": "10.1109/CVPR52733.2024.00097", "CorpusId": 257912580 }, "url": "https://www.semanticscholar.org/paper/0fa1501c7378a0dca2ac913fce9dcdcc2b1958a7", "referenceCount": 72, "citationCount": 101, "influentialCitationCount": 10, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "AvatarCraft: Transforming Text into Neural Human Avatars with Parameterized Shape and Pose Control", "abstract": "Neural implicit fields are powerful for representing 3D scenes and generating high-quality novel views, but it remains challenging to use such implicit representations for creating a 3D human avatar with a specific identity and artistic style that can be easily animated. Our proposed method, AvatarCraft, addresses this challenge by using diffusion models to guide the learning of geometry and texture for a neural avatar based on a single text prompt. We carefully design the optimization framework of neural implicit fields, including a coarse-to-fine multi-bounding box training strategy, shape regularization, and diffusion-based constraints, to produce high-quality geometry and texture. Additionally, we make the human avatar animatable by deforming the neural implicit field with an explicit warping field that maps the target human mesh to a template human mesh, both represented using parametric human models. This simplifies animation and reshaping of the generated avatar by controlling pose and shape parameters. Extensive experiments on various text descriptions show that AvatarCraft is effective and robust in creating human avatars and rendering novel views, poses, and shapes. Our project page is: https://avatar-craft.github.io/.", "year": 2023, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Ruixia Jiang", "Can Wang", "Jingbo Zhang", "Menglei Chai", "Mingming He", "Dongdong Chen", "Jing Liao" ], "externalIds": { "DBLP": "journals/corr/abs-2303-17606", "ArXiv": "2303.17606", "DOI": "10.1109/ICCV51070.2023.01322", "CorpusId": 257834153 }, "url": "https://www.semanticscholar.org/paper/836f0d803332853bb12a89495ea30f0e91c97bf6", "referenceCount": 64, "citationCount": 59, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Fantasia3D: Disentangling Geometry and Appearance for High-quality Text-to-3D Content Creation", "abstract": "Automatic 3D content creation has achieved rapid progress recently due to the availability of pre-trained, large language models and image diffusion models, forming the emerging topic of text-to-3D content creation. Existing text-to-3D methods commonly use implicit scene representations, which couple the geometry and appearance via volume rendering and are suboptimal in terms of recovering finer geometries and achieving photorealistic rendering; consequently, they are less effective for generating high-quality 3D assets. In this work, we propose a new method of Fantasia3D for high-quality text-to-3D content creation. Key to Fantasia3D is the disentangled modeling and learning of geometry and appearance. For geometry learning, we rely on a hybrid scene representation, and propose to encode surface normal extracted from the representation as the input of the image diffusion model. For appearance modeling, we introduce the spatially varying bidirectional reflectance distribution function (BRDF) into the text-to-3D task, and learn the surface material for photorealistic rendering of the generated surface. Our disentangled framework is more compatible with popular graphics engines, supporting relighting, editing, and physical simulation of the generated 3D assets. We conduct thorough experiments that show the advantages of our method over existing ones under different text-to-3D task settings. Project page and source codes: https://fantasia3d.github.io/.", "year": 2023, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Rui Chen", "Y. Chen", "Ningxin Jiao", "K. Jia" ], "externalIds": { "DBLP": "journals/corr/abs-2303-13873", "ArXiv": "2303.13873", "DOI": "10.1109/ICCV51070.2023.02033", "CorpusId": 257757213 }, "url": "https://www.semanticscholar.org/paper/0cbb518c364067200476a51e5ce7476a4f582770", "referenceCount": 43, "citationCount": 399, "influentialCitationCount": 75, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Composer: Creative and Controllable Image Synthesis with Composable Conditions", "abstract": "Recent large-scale generative models learned on big data are capable of synthesizing incredible images yet suffer from limited controllability. This work offers a new generation paradigm that allows flexible control of the output image, such as spatial layout and palette, while maintaining the synthesis quality and model creativity. With compositionality as the core idea, we first decompose an image into representative factors, and then train a diffusion model with all these factors as the conditions to recompose the input. At the inference stage, the rich intermediate representations work as composable elements, leading to a huge design space (i.e., exponentially proportional to the number of decomposed factors) for customizable content creation. It is noteworthy that our approach, which we call Composer, supports various levels of conditions, such as text description as the global information, depth map and sketch as the local guidance, color histogram for low-level details, etc. Besides improving controllability, we confirm that Composer serves as a general framework and facilitates a wide range of classical generative tasks without retraining. Code and models will be made available.", "year": 2023, "venue": "International Conference on Machine Learning", "authors": [ "Lianghua Huang", "Di Chen", "Yu Liu", "Yujun Shen", "Deli Zhao", "Jingren Zhou" ], "externalIds": { "ArXiv": "2302.09778", "DBLP": "conf/icml/HuangC0SZZ23", "DOI": "10.48550/arXiv.2302.09778", "CorpusId": 257038979 }, "url": "https://www.semanticscholar.org/paper/26e5b933b8f60bd749d428b5ff813b2abcd765d8", "referenceCount": 51, "citationCount": 207, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "PersonNeRF : Personalized Reconstruction from Photo Collections", "abstract": "We present PersonNeRF, a method that takes a collection of photos of a subject (e.g. Roger Federer) captured across multiple years with arbitrary body poses and ap-pearances, and enables rendering the subject with arbitrary novel combinations of viewpoint, body pose, and appearance. PersonNeRF builds a customized neural volumetric 3D model of the subject that is able to render an entire space spanned by camera viewpoint, body pose, and appearance. A central challenge in this task is dealing with sparse observations; a given body pose is likely only observed by a single viewpoint with a single appearance, and a given appearance is only observed under a handful of different body poses. We address this issue by recovering a canonical T-pose neural volumetric representation of the subject that allows for changing appearance across different observations, but uses a shared pose-dependent motion field across all observations. We demonstrate that this approach, along with regularization of the recovered volumetric geometry to encourage smoothness, is able to recover a model that renders compelling images from novel combinations of viewpoint, pose, and appearance from these challenging unstructured photo collections, outperforming prior work for free-viewpoint human rendering.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Chung-Yi Weng", "Pratul P. Srinivasan", "B. Curless", "Ira Kemelmacher-Shlizerman" ], "externalIds": { "DBLP": "journals/corr/abs-2302-08504", "ArXiv": "2302.08504", "DOI": "10.1109/CVPR52729.2023.00058", "CorpusId": 256900741 }, "url": "https://www.semanticscholar.org/paper/df1ab83959b3dc49a2f4bb93dbe5f44e1c0db2f6", "referenceCount": 57, "citationCount": 13, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Adding Conditional Control to Text-to-Image Diffusion Models", "abstract": "We present ControlNet, a neural network architecture to add spatial conditioning controls to large, pretrained text-to-image diffusion models. ControlNet locks the production-ready large diffusion models, and reuses their deep and robust encoding layers pretrained with billions of images as a strong backbone to learn a diverse set of conditional controls. The neural architecture is connected with \"zero convolutions\" (zero-initialized convolution layers) that progressively grow the parameters from zero and ensure that no harmful noise could affect the finetuning. We test various conditioning controls, e.g., edges, depth, segmentation, human pose, etc., with Stable Diffusion, using single or multiple conditions, with or without prompts. We show that the training of ControlNets is robust with small (<50k) and large (>1m) datasets. Extensive results show that ControlNet may facilitate wider applications to control image diffusion models.", "year": 2023, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Lvmin Zhang", "Anyi Rao", "Maneesh Agrawala" ], "externalIds": { "DBLP": "journals/corr/abs-2302-05543", "ArXiv": "2302.05543", "DOI": "10.1109/ICCV51070.2023.00355", "CorpusId": 256827727 }, "url": "https://www.semanticscholar.org/paper/efbe97d20c4ffe356e8826c01dc550bacc405add", "referenceCount": 119, "citationCount": 2416, "influentialCitationCount": 489, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Scalable Diffusion Models with Transformers", "abstract": "We explore a new class of diffusion models based on the transformer architecture. We train latent diffusion models of images, replacing the commonly-used U-Net backbone with a transformer that operates on latent patches. We analyze the scalability of our Diffusion Transformers (DiTs) through the lens of forward pass complexity as measured by Gflops. We find that DiTs with higher Gflops—through increased transformer depth/width or increased number of input tokens—consistently have lower FID. In addition to possessing good scalability properties, our largest DiT-XL/2 models outperform all prior diffusion models on the class-conditional ImageNet 512×512 and 256×256 benchmarks, achieving a state-of-the-art FID of 2.27 on the latter.", "year": 2022, "venue": "IEEE International Conference on Computer Vision", "authors": [ "William S. Peebles", "Saining Xie" ], "externalIds": { "DBLP": "journals/corr/abs-2212-09748", "ArXiv": "2212.09748", "DOI": "10.1109/ICCV51070.2023.00387", "CorpusId": 254854389 }, "url": "https://www.semanticscholar.org/paper/736973165f98105fec3729b7db414ae4d80fcbeb", "referenceCount": 69, "citationCount": 773, "influentialCitationCount": 131, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Objaverse: A Universe of Annotated 3D Objects", "abstract": "Massive data corpora like WebText, Wikipedia, Conceptual Captions, WebImageText, and LAION have propelled recent dramatic progress in AI. Large neural models trained on such datasets produce impressive results and top many of today's benchmarks. A notable omisslion within this family of large-scale datasets is 3D data. Despite considerable interest and potential applications in 3D vision, datasets of high-fidelity 3D models continue to be mid-sized with limited diversity of object categories. Addressing this gap, we present Objaverse 1.0, a large dataset of objects with 800K + (and growing) 3D models with descriptive captions, tags, and animations. Objaverse improves upon present day 3D repositories in terms of scale, number of categories, and in the visual diversity of instances within a category. We demonstrate the large potential of Objaverse via four diverse applications: training generative 3D models, improving tail category segmentation on the LVIS benchmark, training open-vocabulary object-navigation models for Embodied AI, and creating a new benchmark for robustness analysis of vision models. Objaverse can open new directions for research and enable new applications across the field of AI.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Matt Deitke", "Dustin Schwenk", "Jordi Salvador", "Luca Weihs", "Oscar Michel", "Eli VanderBilt", "Ludwig Schmidt", "Kiana Ehsani", "Aniruddha Kembhavi", "Ali Farhadi" ], "externalIds": { "ArXiv": "2212.08051", "DBLP": "conf/cvpr/DeitkeSSWMVSEKF23", "DOI": "10.1109/CVPR52729.2023.01263", "CorpusId": 254685588 }, "url": "https://www.semanticscholar.org/paper/1b31dbf44e68b698120552366df03e6e35a1e428", "referenceCount": 88, "citationCount": 497, "influentialCitationCount": 106, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ECON: Explicit Clothed humans Optimized via Normal integration", "abstract": "The combination of deep learning, artist-curated scans, and Implicit Functions (IF), is enabling the creation of detailed, clothed, 3D humans from images. However, existing methods are far from perfect. IF-based methods recover free-form geometry, but produce disembodied limbs or degenerate shapes for novel poses or clothes. To increase robustness for these cases, existing work uses an explicit parametric body model to constrain surface reconstruction, but this limits the recovery of free-form surfaces such as loose clothing that deviates from the body. What we want is a method that combines the best properties of implicit representation and explicit body regularization. To this end, we make two key observations: (1) current networks are better at inferring detailed 2D maps than full-3D surfaces, and (2) a parametric model can be seen as a “canvas” for stitching together detailed surface patches. Based on these, our method, ECON, has three main steps: (1) It infers detailed 2D normal maps for the front and back side of a clothed person. (2) From these, it recovers 2.5D front and back surfaces, called d-BiNI, that are equally detailed, yet incomplete, and registers these w.r.t. each other with the help of a SMPL-X body mesh recovered from the image. (3) It “inpaints” the missing geometry between d-BiNI surfaces. If the face and hands are noisy, they can optionally be replaced with the ones of SMPL-X. As a result, ECON infers high-fidelity 3D humans even in loose clothes and challenging poses. This goes beyond previous methods, according to the quantitative evaluation on the CAPE and Renderpeople datasets. Perceptual studies also show that ECON's perceived realism is better by a large margin. Code and models are available for research purposes at econ.is.tue.mpg.de", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Yuliang Xiu", "Jinlong Yang", "Xu Cao", "Dimitrios Tzionas", "Michael J. Black" ], "externalIds": { "ArXiv": "2212.07422", "DBLP": "conf/cvpr/XiuYCTB23", "DOI": "10.1109/CVPR52729.2023.00057", "CorpusId": 257687178 }, "url": "https://www.semanticscholar.org/paper/868f6accaa51416a7ba662386e6fbd9913ff99dc", "referenceCount": 109, "citationCount": 102, "influentialCitationCount": 22, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Generating Holistic 3D Human Motion from Speech", "abstract": "This work addresses the problem of generating 3D holistic body motions from human speech. Given a speech recording, we synthesize sequences of 3D body poses, hand gestures, and facial expressions that are realistic and diverse. To achieve this, we first build a high-quality dataset of 3D holistic body meshes with synchronous speech. We then define a novel speech-to-motion generation framework in which the face, body, and hands are modeled separately. The separated modeling stems from the fact that face articulation strongly correlates with human speech, while body poses and hand gestures are less correlated. Specifically, we employ an autoencoder for face motions, and a compositional vector-quantized variational autoencoder (VQ- VAE) for the body and hand motions. The compositional VQ-VAE is key to generating diverse results. Additionally, we propose a cross-conditional autoregressive model that generates body poses and hand gestures, leading to coherent and realistic motions. Extensive experiments and user studies demonstrate that our proposed approach achieves state-of-the-art performance both qualitatively and quantitatively. Our dataset and code are released for research purposes at https://talkshow.is.tue.mpg.de/.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Hongwei Yi", "Hualin Liang", "Yifei Liu", "Qiong Cao", "Yandong Wen", "Timo Bolkart", "Dacheng Tao", "Michael J. Black" ], "externalIds": { "DBLP": "conf/cvpr/YiLLCWBTB23", "ArXiv": "2212.04420", "DOI": "10.1109/CVPR52729.2023.00053", "CorpusId": 254409031 }, "url": "https://www.semanticscholar.org/paper/8228394c8e7f168dfd5a88b6d4f92188c1ac5527", "referenceCount": 79, "citationCount": 89, "influentialCitationCount": 18, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Score Jacobian Chaining: Lifting Pretrained 2D Diffusion Models for 3D Generation", "abstract": "A diffusion model learns to predict a vector field of gradients. We propose to apply chain rule on the learned gradients, and back-propagate the score of a diffusion model through the Jacobian of a differentiable renderer, which we instantiate to be a voxel radiance field. This setup aggregates 2D scores at multiple camera viewpoints into a 3D score, and re-purposes a pretrained 2D model for 3D data generation. We identify a technical challenge of distribution mismatch that arises in this application, and propose a novel estimation mechanism to resolve it. We run our algorithm on several off-the-shelf diffusion image generative models, including the recently released Stable Diffusion trained on the large-scale LAION 5B dataset.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Haochen Wang", "Xiaodan Du", "Jiahao Li", "Raymond A. Yeh", "Gregory Shakhnarovich" ], "externalIds": { "DBLP": "journals/corr/abs-2212-00774", "ArXiv": "2212.00774", "DOI": "10.1109/CVPR52729.2023.01214", "CorpusId": 254125253 }, "url": "https://www.semanticscholar.org/paper/fc011ed5ee986332523a62d2783adee1179dc1ed", "referenceCount": 74, "citationCount": 404, "influentialCitationCount": 44, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Magic3D: High-Resolution Text-to-3D Content Creation", "abstract": "DreamFusion [31] has recently demonstrated the utility of a pretrained text-to-image diffusion model to optimize Neural Radiance Fields (NeRF) [23], achieving remarkable text-to-3D synthesis results. However, the method has two inherent limitations: (a) extremely slow optimization of NeRF and (b) low-resolution image space supervision on NeRF, leading to low-quality 3D models with a long processing time. In this paper, we address these limitations by utilizing a two-stage optimization framework. First, we obtain a coarse model using a low-resolution diffusion prior and accelerate with a sparse 3D hash grid structure. Using the coarse representation as the initialization, we further optimize a textured 3D mesh model with an efficient differentiable renderer interacting with a high-resolution latent diffusion model. Our method, dubbed Magic3D, can create high quality 3D mesh models in 40 minutes, which is 2× faster than DreamFusion (reportedly taking 1.5 hours on average), while also achieving higher resolution. User studies show 61.7% raters to prefer our approach over DreamFusion. Together with the image-conditioned generation capabilities, we provide users with new ways to control 3D synthesis, opening up new avenues to various creative applications.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Chen-Hsuan Lin", "Jun Gao", "Luming Tang", "Towaki Takikawa", "Xiaohui Zeng", "Xun Huang", "Karsten Kreis", "S. Fidler", "Ming-Yu Liu", "Tsung-Yi Lin" ], "externalIds": { "DBLP": "journals/corr/abs-2211-10440", "ArXiv": "2211.10440", "DOI": "10.1109/CVPR52729.2023.00037", "CorpusId": 253708074 }, "url": "https://www.semanticscholar.org/paper/bdf4af8311637c681904e71cf50f96fd0026f578", "referenceCount": 56, "citationCount": 816, "influentialCitationCount": 113, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Latent-NeRF for Shape-Guided Generation of 3D Shapes and Textures", "abstract": "Text-guided image generation has progressed rapidly in recent years, inspiring major breakthroughs in text-guided shape generation. Recently, it has been shown that using score distillation, one can successfully text-guide a NeRF model to generate a 3D object. We adapt the score distillation to the publicly available, and computationally efficient, Latent Diffusion Models, which apply the entire diffusion process in a compact latent space of a pretrained autoencoder. As NeRFs operate in image space, a naive solution for guiding them with latent score distillation would require encoding to the latent space at each guidance step. Instead, we propose to bring the NeRF to the latent space, resulting in a Latent-NeRF. Analyzing our Latent-NeRF, we show that while Text-to-3D models can generate impressive results, they are inherently unconstrained and may lack the ability to guide or enforce a specific 3D structure. To assist and direct the 3D generation, we propose to guide our Latent-NeRF using a Sketch-Shape: an abstract geometry that defines the coarse structure of the desired object. Then, we present means to integrate such a constraint directly into a Latent-NeRF. This unique combination of text and shape guidance allows for increased control over the generation process. We also show that latent score distillation can be successfully applied directly on 3D meshes. This allows for generating high-quality textures on a given geometry. Our experiments validate the power of our different forms of guidance and the efficiency of using latent rendering.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "G. Metzer", "Elad Richardson", "Or Patashnik", "R. Giryes", "D. Cohen-Or" ], "externalIds": { "DBLP": "journals/corr/abs-2211-07600", "ArXiv": "2211.07600", "DOI": "10.1109/CVPR52729.2023.01218", "CorpusId": 253510536 }, "url": "https://www.semanticscholar.org/paper/793939b83e10903f58d8edbb7534963df627a1fe", "referenceCount": 57, "citationCount": 344, "influentialCitationCount": 59, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "LAION-5B: An open large-scale dataset for training next generation image-text models", "abstract": "Groundbreaking language-vision architectures like CLIP and DALL-E proved the utility of training on large amounts of noisy image-text data, without relying on expensive accurate labels used in standard vision unimodal supervised learning. The resulting models showed capabilities of strong text-guided image generation and transfer to downstream tasks, while performing remarkably at zero-shot classification with noteworthy out-of-distribution robustness. Since then, large-scale language-vision models like ALIGN, BASIC, GLIDE, Flamingo and Imagen made further improvements. Studying the training and capabilities of such models requires datasets containing billions of image-text pairs. Until now, no datasets of this size have been made openly available for the broader research community. To address this problem and democratize research on large-scale multi-modal models, we present LAION-5B - a dataset consisting of 5.85 billion CLIP-filtered image-text pairs, of which 2.32B contain English language. We show successful replication and fine-tuning of foundational models like CLIP, GLIDE and Stable Diffusion using the dataset, and discuss further experiments enabled with an openly available dataset of this scale. Additionally we provide several nearest neighbor indices, an improved web-interface for dataset exploration and subset generation, and detection scores for watermark, NSFW, and toxic content detection. Announcement page https://laion.ai/laion-5b-a-new-era-of-open-large-scale-multi-modal-datasets/", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Christoph Schuhmann", "Romain Beaumont", "Richard Vencu", "Cade Gordon", "Ross Wightman", "Mehdi Cherti", "Theo Coombes", "Aarush Katta", "Clayton Mullis", "Mitchell Wortsman", "P. Schramowski", "Srivatsa Kundurthy", "Katherine Crowson", "Ludwig Schmidt", "R. Kaczmarczyk", "J. Jitsev" ], "externalIds": { "DBLP": "conf/nips/SchuhmannBVGWCC22", "ArXiv": "2210.08402", "DOI": "10.48550/arXiv.2210.08402", "CorpusId": 252917726 }, "url": "https://www.semanticscholar.org/paper/e5c8960eb2ec034ffbd353ef39fd1cb541d3c7c9", "referenceCount": 109, "citationCount": 2214, "influentialCitationCount": 256, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DreamFusion: Text-to-3D using 2D Diffusion", "abstract": "Recent breakthroughs in text-to-image synthesis have been driven by diffusion models trained on billions of image-text pairs. Adapting this approach to 3D synthesis would require large-scale datasets of labeled 3D data and efficient architectures for denoising 3D data, neither of which currently exist. In this work, we circumvent these limitations by using a pretrained 2D text-to-image diffusion model to perform text-to-3D synthesis. We introduce a loss based on probability density distillation that enables the use of a 2D diffusion model as a prior for optimization of a parametric image generator. Using this loss in a DeepDream-like procedure, we optimize a randomly-initialized 3D model (a Neural Radiance Field, or NeRF) via gradient descent such that its 2D renderings from random angles achieve a low loss. The resulting 3D model of the given text can be viewed from any angle, relit by arbitrary illumination, or composited into any 3D environment. Our approach requires no 3D training data and no modifications to the image diffusion model, demonstrating the effectiveness of pretrained image diffusion models as priors.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Ben Poole", "Ajay Jain", "J. Barron", "B. Mildenhall" ], "externalIds": { "DBLP": "conf/iclr/PooleJBM23", "ArXiv": "2209.14988", "DOI": "10.48550/arXiv.2209.14988", "CorpusId": 252596091 }, "url": "https://www.semanticscholar.org/paper/4c94d04afa4309ec2f06bdd0fe3781f91461b362", "referenceCount": 76, "citationCount": 1589, "influentialCitationCount": 404, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Human Performance Modeling and Rendering via Neural Animated Mesh", "abstract": "We have recently seen tremendous progress in the neural advances for photo-real human modeling and rendering. However, it's still challenging to integrate them into an existing mesh-based pipeline for downstream applications. In this paper, we present a comprehensive neural approach for high-quality reconstruction, compression, and rendering of human performances from dense multi-view videos. Our core intuition is to bridge the traditional animated mesh workflow with a new class of highly efficient neural techniques. We first introduce a neural surface reconstructor for high-quality surface generation in minutes. It marries the implicit volumetric rendering of the truncated signed distance field (TSDF) with multi-resolution hash encoding. We further propose a hybrid neural tracker to generate animated meshes, which combines explicit non-rigid tracking with implicit dynamic deformation in a self-supervised framework. The former provides the coarse warping back into the canonical space, while the latter implicit one further predicts the displacements using the 4D hash encoding as in our reconstructor. Then, we discuss the rendering schemes using the obtained animated meshes, ranging from dynamic texturing to lumigraph rendering under various bandwidth settings. To strike an intricate balance between quality and bandwidth, we propose a hierarchical solution by first rendering 6 virtual views covering the performer and then conducting occlusion-aware neural texture blending. We demonstrate the efficacy of our approach in a variety of mesh-based applications and photo-realistic free-view experiences on various platforms, i.e., inserting virtual human performances into real environments through mobile AR or immersively watching talent shows with VR headsets.", "year": 2022, "venue": "ACM Transactions on Graphics", "authors": [ "Fuqiang Zhao", "Yuheng Jiang", "Kaixin Yao", "Jiakai Zhang", "Liao Wang", "Haizhao Dai", "Yuhui Zhong", "Yingliang Zhang", "Minye Wu", "Lan Xu", "Jingyi Yu" ], "externalIds": { "DBLP": "journals/corr/abs-2209-08468", "ArXiv": "2209.08468", "DOI": "10.1145/3550454.3555451", "CorpusId": 252368352 }, "url": "https://www.semanticscholar.org/paper/e785dbcfbf9249e74fc94b6d853d1a04bb2cd662", "referenceCount": 89, "citationCount": 61, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Photorealistic Text-to-Image Diffusion Models with Deep Language Understanding", "abstract": "We present Imagen, a text-to-image diffusion model with an unprecedented degree of photorealism and a deep level of language understanding. Imagen builds on the power of large transformer language models in understanding text and hinges on the strength of diffusion models in high-fidelity image generation. Our key discovery is that generic large language models (e.g. T5), pretrained on text-only corpora, are surprisingly effective at encoding text for image synthesis: increasing the size of the language model in Imagen boosts both sample fidelity and image-text alignment much more than increasing the size of the image diffusion model. Imagen achieves a new state-of-the-art FID score of 7.27 on the COCO dataset, without ever training on COCO, and human raters find Imagen samples to be on par with the COCO data itself in image-text alignment. To assess text-to-image models in greater depth, we introduce DrawBench, a comprehensive and challenging benchmark for text-to-image models. With DrawBench, we compare Imagen with recent methods including VQ-GAN+CLIP, Latent Diffusion Models, and DALL-E 2, and find that human raters prefer Imagen over other models in side-by-side comparisons, both in terms of sample quality and image-text alignment. See https://imagen.research.google/ for an overview of the results.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Chitwan Saharia", "William Chan", "Saurabh Saxena", "Lala Li", "Jay Whang", "Emily L. Denton", "Seyed Kamyar Seyed Ghasemipour", "Burcu Karagol Ayan", "S. S. Mahdavi", "Raphael Gontijo Lopes", "Tim Salimans", "Jonathan Ho", "David J. Fleet", "Mohammad Norouzi" ], "externalIds": { "DBLP": "journals/corr/abs-2205-11487", "ArXiv": "2205.11487", "DOI": "10.48550/arXiv.2205.11487", "CorpusId": 248986576 }, "url": "https://www.semanticscholar.org/paper/9695824d7a01fad57ba9c01d7d76a519d78d65e7", "referenceCount": 108, "citationCount": 4291, "influentialCitationCount": 361, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Hierarchical Text-Conditional Image Generation with CLIP Latents", "abstract": "Contrastive models like CLIP have been shown to learn robust representations of images that capture both semantics and style. To leverage these representations for image generation, we propose a two-stage model: a prior that generates a CLIP image embedding given a text caption, and a decoder that generates an image conditioned on the image embedding. We show that explicitly generating image representations improves image diversity with minimal loss in photorealism and caption similarity. Our decoders conditioned on image representations can also produce variations of an image that preserve both its semantics and style, while varying the non-essential details absent from the image representation. Moreover, the joint embedding space of CLIP enables language-guided image manipulations in a zero-shot fashion. We use diffusion models for the decoder and experiment with both autoregressive and diffusion models for the prior, finding that the latter are computationally more efficient and produce higher-quality samples.", "year": 2022, "venue": "arXiv.org", "authors": [ "A. Ramesh", "Prafulla Dhariwal", "Alex Nichol", "Casey Chu", "Mark Chen" ], "externalIds": { "ArXiv": "2204.06125", "DBLP": "journals/corr/abs-2204-06125", "DOI": "10.48550/arXiv.2204.06125", "CorpusId": 248097655 }, "url": "https://www.semanticscholar.org/paper/c57293882b2561e1ba03017902df9fc2f289dea2", "referenceCount": 66, "citationCount": 5171, "influentialCitationCount": 421, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "CLIP-Mesh: Generating textured meshes from text using pretrained image-text models", "abstract": "We present a technique for zero-shot generation of a 3D model using only a target text prompt. Without any 3D supervision our method deforms the control shape of a limit subdivided surface along with its texture map and normal map to obtain a 3D asset that corresponds to the input text prompt and can be easily deployed into games or modeling applications. We rely only on a pre-trained CLIP model that compares the input text prompt with differentiably rendered images of our 3D model. While previous works have focused on stylization or required training of generative models we perform optimization on mesh parameters directly to generate shape, texture or both. To constrain the optimization to produce plausible meshes and textures we introduce a number of techniques using image augmentations and the use of a pretrained prior that generates CLIP image embeddings given a text embedding.", "year": 2022, "venue": "ACM SIGGRAPH Conference and Exhibition on Computer Graphics and Interactive Techniques in Asia", "authors": [ "N. Khalid", "Tianhao Xie", "Eugene Belilovsky", "T. Popa" ], "externalIds": { "DBLP": "conf/siggrapha/KhalidXBP22", "ArXiv": "2203.13333", "DOI": "10.1145/3550469.3555392", "CorpusId": 252089441 }, "url": "https://www.semanticscholar.org/paper/8941e477b2f39eb92712f04400412da60d349ec1", "referenceCount": 26, "citationCount": 247, "influentialCitationCount": 13, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "NeuMan: Neural Human Radiance Field from a Single Video", "abstract": "Photorealistic rendering and reposing of humans is important for enabling augmented reality experiences. We propose a novel framework to reconstruct the human and the scene that can be rendered with novel human poses and views from just a single in-the-wild video. Given a video captured by a moving camera, we train two NeRF models: a human NeRF model and a scene NeRF model. To train these models, we rely on existing methods to estimate the rough geometry of the human and the scene. Those rough geometry estimates allow us to create a warping field from the observation space to the canonical pose-independent space, where we train the human model in. Our method is able to learn subject specific details, including cloth wrinkles and accessories, from just a 10 seconds video clip, and to provide high quality renderings of the human under novel poses, from novel views, together with the background.", "year": 2022, "venue": "European Conference on Computer Vision", "authors": [ "Wei Jiang", "K. M. Yi", "Golnoosh Samei", "Oncel Tuzel", "Anurag Ranjan" ], "externalIds": { "DBLP": "journals/corr/abs-2203-12575", "ArXiv": "2203.12575", "DOI": "10.48550/arXiv.2203.12575", "CorpusId": 247618711 }, "url": "https://www.semanticscholar.org/paper/17df7e87a5d25e6e83d773e6f686eb0a85f6827e", "referenceCount": 54, "citationCount": 163, "influentialCitationCount": 18, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DeciWatch: A Simple Baseline for 10x Efficient 2D and 3D Pose Estimation", "abstract": "This paper proposes a simple baseline framework for video-based 2D/3D human pose estimation that can achieve 10 times efficiency improvement over existing works without any performance degradation, named DeciWatch. Unlike current solutions that estimate each frame in a video, DeciWatch introduces a simple yet effective sample-denoise-recover framework that only watches sparsely sampled frames, taking advantage of the continuity of human motions and the lightweight pose representation. Specifically, DeciWatch uniformly samples less than 10% video frames for detailed estimation, denoises the estimated 2D/3D poses with an efficient Transformer architecture, and then accurately recovers the rest of the frames using another Transformer-based network. Comprehensive experimental results on three video-based human pose estimation and body mesh recovery tasks with four datasets validate the efficiency and effectiveness of DeciWatch. Code is available at https://github.com/cure-lab/DeciWatch.", "year": 2022, "venue": "European Conference on Computer Vision", "authors": [ "Ailing Zeng", "Xu Ju", "Lei Yang", "Ruiyuan Gao", "Xizhou Zhu", "Bo Dai", "Qianglin Xu" ], "externalIds": { "DBLP": "journals/corr/abs-2203-08713", "ArXiv": "2203.08713", "DOI": "10.48550/arXiv.2203.08713", "CorpusId": 247475883 }, "url": "https://www.semanticscholar.org/paper/32da58aa252700339b118b1e4f03bd721d2d5b55", "referenceCount": 65, "citationCount": 40, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Instant neural graphics primitives with a multiresolution hash encoding", "abstract": "Neural graphics primitives, parameterized by fully connected neural networks, can be costly to train and evaluate. We reduce this cost with a versatile new input encoding that permits the use of a smaller network without sacrificing quality, thus significantly reducing the number of floating point and memory access operations: a small neural network is augmented by a multiresolution hash table of trainable feature vectors whose values are optimized through stochastic gradient descent. The multiresolution structure allows the network to disambiguate hash collisions, making for a simple architecture that is trivial to parallelize on modern GPUs. We leverage this parallelism by implementing the whole system using fully-fused CUDA kernels with a focus on minimizing wasted bandwidth and compute operations. We achieve a combined speedup of several orders of magnitude, enabling training of high-quality neural graphics primitives in a matter of seconds, and rendering in tens of milliseconds at a resolution of 1920×1080.", "year": 2022, "venue": "ACM Transactions on Graphics", "authors": [ "T. Müller", "Alex Evans", "Christoph Schied", "A. Keller" ], "externalIds": { "ArXiv": "2201.05989", "DBLP": "journals/tog/MullerESK22", "DOI": "10.1145/3528223.3530127", "CorpusId": 246016186 }, "url": "https://www.semanticscholar.org/paper/60e69982ef2920596c6f31d6fd3ca5e9591f3db6", "referenceCount": 56, "citationCount": 2787, "influentialCitationCount": 538, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "HumanNeRF: Free-viewpoint Rendering of Moving People from Monocular Video", "abstract": "We introduce a free-viewpoint rendering method - HumanNeRF - that works on a given monocular video of a human performing complex body motions, e.g. a video from YouTube. Our method enables pausing the video at any frame and rendering the subject from arbitrary new camera viewpoints or even a full 360-degree camera path for that particular frame and body pose. This task is particularly challenging, as it requires synthesizing photorealistic details of the body, as seen from various camera angles that may not exist in the input video, as well as synthesizing fine details such as cloth folds and facial appearance. Our method optimizes for a volumetric representation of the person in a canonical T-pose, in concert with a motion field that maps the estimated canonical representation to every frame of the video via backward warps. The motion field is decomposed into skeletal rigid and non-rigid motions, produced by deep networks. We show significant performance improvements over prior work, and compelling examples of free-viewpoint renderings from monocular video of moving humans in challenging uncontrolled capture scenarios.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Chung-Yi Weng", "B. Curless", "Pratul P. Srinivasan", "J. Barron", "Ira Kemelmacher-Shlizerman" ], "externalIds": { "DBLP": "conf/cvpr/WengCSBK22", "ArXiv": "2201.04127", "DOI": "10.1109/CVPR52688.2022.01573", "CorpusId": 245853751 }, "url": "https://www.semanticscholar.org/paper/f763a59644e27a2215095943224f2564e670a504", "referenceCount": 77, "citationCount": 361, "influentialCitationCount": 63, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "High-Resolution Image Synthesis with Latent Diffusion Models", "abstract": "By decomposing the image formation process into a sequential application of denoising autoencoders, diffusion models (DMs) achieve state-of-the-art synthesis results on image data and beyond. Additionally, their formulation allows for a guiding mechanism to control the image generation process without retraining. However, since these models typically operate directly in pixel space, optimization of powerful DMs often consumes hundreds of GPU days and inference is expensive due to sequential evaluations. To enable DM training on limited computational resources while retaining their quality and flexibility, we apply them in the latent space of powerful pretrained autoencoders. In contrast to previous work, training diffusion models on such a representation allows for the first time to reach a near-optimal point between complexity reduction and detail preservation, greatly boosting visual fidelity. By introducing cross-attention layers into the model architecture, we turn diffusion models into powerful and flexible generators for general conditioning inputs such as text or bounding boxes and high-resolution synthesis becomes possible in a convolutional manner. Our latent diffusion models (LDMs) achieve new state of the art scores for image inpainting and class-conditional image synthesis and highly competitive performance on various tasks, including unconditional image generation, text-to-image synthesis, and super-resolution, while significantly reducing computational requirements compared to pixel-based DMs.", "year": 2021, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Robin Rombach", "A. Blattmann", "Dominik Lorenz", "Patrick Esser", "B. Ommer" ], "externalIds": { "ArXiv": "2112.10752", "DBLP": "journals/corr/abs-2112-10752", "DOI": "10.1109/CVPR52688.2022.01042", "CorpusId": 245335280 }, "url": "https://www.semanticscholar.org/paper/c10075b3746a9f3dd5811970e93c8ca3ad39b39d", "referenceCount": 110, "citationCount": 9847, "influentialCitationCount": 2744, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "GLIDE: Towards Photorealistic Image Generation and Editing with Text-Guided Diffusion Models", "abstract": "Diffusion models have recently been shown to generate high-quality synthetic images, especially when paired with a guidance technique to trade off diversity for fidelity. We explore diffusion models for the problem of text-conditional image synthesis and compare two different guidance strategies: CLIP guidance and classifier-free guidance. We find that the latter is preferred by human evaluators for both photorealism and caption similarity, and often produces photorealistic samples. Samples from a 3.5 billion parameter text-conditional diffusion model using classifier-free guidance are favored by human evaluators to those from DALL-E, even when the latter uses expensive CLIP reranking. Additionally, we find that our models can be fine-tuned to perform image inpainting, enabling powerful text-driven image editing. We train a smaller model on a filtered dataset and release the code and weights at https://github.com/openai/glide-text2im.", "year": 2021, "venue": "International Conference on Machine Learning", "authors": [ "Alex Nichol", "Prafulla Dhariwal", "A. Ramesh", "Pranav Shyam", "Pamela Mishkin", "Bob McGrew", "I. Sutskever", "Mark Chen" ], "externalIds": { "ArXiv": "2112.10741", "DBLP": "journals/corr/abs-2112-10741", "CorpusId": 245335086 }, "url": "https://www.semanticscholar.org/paper/7002ae048e4b8c9133a55428441e8066070995cb", "referenceCount": 51, "citationCount": 2645, "influentialCitationCount": 244, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ICON: Implicit Clothed humans Obtained from Normals", "abstract": "Current methods for learning realistic and animatable 3D clothed avatars need either posed 3D scans or 2D images with carefully controlled user poses. In contrast, our goal is to learn an avatar from only 2D images of people in unconstrained poses. Given a set of images, our method estimates a detailed 3D surface from each image and then combines these into an animatable avatar. Implicit functions are well suited to the first task, as they can capture details like hair and clothes. Current methods, however, are not robust to varied human poses and often produce 3D surfaces with broken or disembodied limbs, missing details, or non-human shapes. The problem is that these methods use global feature encoders that are sensitive to global pose. To address this, we propose ICON (“Tmplicit Clothed humans Obtained from Normals”), which, instead, uses local features. ICON has two main modules, both of which exploit the SMPL(-X) body model. First, ICON infers detailed clothed-human normals (front/back) conditioned on the SMPL(-X) normals. Second, a visibility-aware implicit surface regressor produces an iso-surface of a human occupancy field. Importantly, at inference time, a feedback loop alternates between refining the SMPL(-X) mesh using the inferred clothed normals and then refining the normals. Given multiple reconstructed frames of a subject in varied poses, we use a modified version of SCANimate to produce an animatable avatar from them. Evaluation on the AGORA and CAPE datasets shows that ICON outperforms the state of the art in reconstruction, even with heavily limited training data. Additionally, it is much more robust to out-of-distribution samples, e.g., in-the-wild poses/images and out-of-frame cropping. ICON takes a step towards robust 3D clothed human reconstruction from in-the-wild images. This enables avatar creation directly from video with personalized pose-dependent cloth deformation. Models and code are available for research at https://icon.is.tue.mpg.de.", "year": 2021, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Yuliang Xiu", "Jinlong Yang", "Dimitrios Tzionas", "Michael J. Black" ], "externalIds": { "ArXiv": "2112.09127", "DBLP": "journals/corr/abs-2112-09127", "DOI": "10.1109/CVPR52688.2022.01294", "CorpusId": 245219054 }, "url": "https://www.semanticscholar.org/paper/4ffd87551ab02eca22bd6d5ad945c8d036e80b1d", "referenceCount": 76, "citationCount": 232, "influentialCitationCount": 39, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Zero-Shot Text-Guided Object Generation with Dream Fields", "abstract": "We combine neural rendering with multi-modal image and text representations to synthesize diverse 3D objects solely from natural language descriptions. Our method, Dream Fields, can generate the geometry and color of a wide range of objects without 3D supervision. Due to the scarcity of diverse, captioned 3D data, prior methods only generate objectsfrom a handful of categories, such as ShapeNet. Instead, we guide generation with image-text models pre-trained on large datasets of captioned images from the web. Our method optimizes a Neural Radiance Field from many camera views so that rendered images score highly with a target caption according to a pre-trained CLIP model. To improve fidelity and visual quality, we introduce simple geometric priors, including sparsity-inducing transmittance regularization, scene bounds, and new MLP architectures. In experiments, Dream Fields produce realistic, multi-view consistent object geometry and color from a variety of natural language captions.", "year": 2021, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Ajay Jain", "B. Mildenhall", "J. Barron", "P. Abbeel", "Ben Poole" ], "externalIds": { "DBLP": "journals/corr/abs-2112-01455", "ArXiv": "2112.01455", "DOI": "10.1109/CVPR52688.2022.00094", "CorpusId": 244799255 }, "url": "https://www.semanticscholar.org/paper/03e1c3b5fdad9b21bbed3d13af7e8d6c73cbcfa6", "referenceCount": 66, "citationCount": 480, "influentialCitationCount": 32, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Mip-NeRF 360: Unbounded Anti-Aliased Neural Radiance Fields", "abstract": "Though neural radiance fields (NeRF) have demon-strated impressive view synthesis results on objects and small bounded regions of space, they struggle on “un-bounded” scenes, where the camera may point in any di-rection and content may exist at any distance. In this set-ting, existing NeRF-like models often produce blurry or low-resolution renderings (due to the unbalanced detail and scale of nearby and distant objects), are slow to train, and may exhibit artifacts due to the inherent ambiguity of the task of reconstructing a large scene from a small set of images. We present an extension of mip-NeRF (a NeRF variant that addresses sampling and aliasing) that uses a non-linear scene parameterization, online distillation, and a novel distortion-based regularizer to overcome the chal-lenges presented by unbounded scenes. Our model, which we dub “mip-NeRF 360” as we target scenes in which the camera rotates 360 degrees around a point, reduces mean-squared error by 57% compared to mip-NeRF, and is able to produce realistic synthesized views and detailed depth maps for highly intricate, unbounded real-world scenes.", "year": 2021, "venue": "Computer Vision and Pattern Recognition", "authors": [ "J. Barron", "B. Mildenhall", "Dor Verbin", "Pratul P. Srinivasan", "Peter Hedman" ], "externalIds": { "ArXiv": "2111.12077", "DBLP": "conf/cvpr/BarronMVSH22", "DOI": "10.1109/CVPR52688.2022.00539", "CorpusId": 244488448 }, "url": "https://www.semanticscholar.org/paper/ec90ffa017a2cc6a51342509ce42b81b478aefb3", "referenceCount": 53, "citationCount": 1116, "influentialCitationCount": 179, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep Marching Tetrahedra: a Hybrid Representation for High-Resolution 3D Shape Synthesis", "abstract": "We introduce DMTet, a deep 3D conditional generative model that can synthesize high-resolution 3D shapes using simple user guides such as coarse voxels. It marries the merits of implicit and explicit 3D representations by leveraging a novel hybrid 3D representation. Compared to the current implicit approaches, which are trained to regress the signed distance values, DMTet directly optimizes for the reconstructed surface, which enables us to synthesize finer geometric details with fewer artifacts. Unlike deep 3D generative models that directly generate explicit representations such as meshes, our model can synthesize shapes with arbitrary topology. The core of DMTet includes a deformable tetrahedral grid that encodes a discretized signed distance function and a differentiable marching tetrahedra layer that converts the implicit signed distance representation to the explicit surface mesh representation. This combination allows joint optimization of the surface geometry and topology as well as generation of the hierarchy of subdivisions using reconstruction and adversarial losses defined explicitly on the surface mesh. Our approach significantly outperforms existing work on conditional shape synthesis from coarse voxel inputs, trained on a dataset of complex 3D animal shapes. Project page: https://nv-tlabs.github.io/DMTet/.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Tianchang Shen", "Jun Gao", "K. Yin", "Ming-Yu Liu", "S. Fidler" ], "externalIds": { "ArXiv": "2111.04276", "DBLP": "journals/corr/abs-2111-04276", "CorpusId": 243848115 }, "url": "https://www.semanticscholar.org/paper/8e970913466a81207230a09f6516bab944563cc0", "referenceCount": 72, "citationCount": 349, "influentialCitationCount": 41, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "imGHUM: Implicit Generative Models of 3D Human Shape and Articulated Pose", "abstract": "We present imGHUM, the first holistic generative model of 3D human shape and articulated pose, represented as a signed distance function. In contrast to prior work, we model the full human body implicitly as a function zero-level-set and without the use of an explicit template mesh. We propose a novel network architecture and a learning paradigm, which make it possible to learn a detailed implicit generative model of human pose, shape, and semantics, on par with state-of-the-art mesh-based models. Our model features desired detail for human models, such as articulated pose including hand motion and facial expressions, a broad spectrum of shape variations, and can be queried at arbitrary resolutions and spatial locations. Additionally, our model has attached spatial semantics making it straightforward to establish correspondences between different shape instances, thus enabling applications that are difficult to tackle using classical implicit representations. In extensive experiments, we demonstrate the model accuracy and its applicability to current research problems.", "year": 2021, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Thiemo Alldieck", "Hongyi Xu", "C. Sminchisescu" ], "externalIds": { "DBLP": "journals/corr/abs-2108-10842", "ArXiv": "2108.10842", "DOI": "10.1109/ICCV48922.2021.00541", "CorpusId": 237278058 }, "url": "https://www.semanticscholar.org/paper/05f907e437a14d0db9b7479662d0cd587cd54634", "referenceCount": 56, "citationCount": 101, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "NeuS: Learning Neural Implicit Surfaces by Volume Rendering for Multi-view Reconstruction", "abstract": "We present a novel neural surface reconstruction method, called NeuS, for reconstructing objects and scenes with high fidelity from 2D image inputs. Existing neural surface reconstruction approaches, such as DVR and IDR, require foreground mask as supervision, easily get trapped in local minima, and therefore struggle with the reconstruction of objects with severe self-occlusion or thin structures. Meanwhile, recent neural methods for novel view synthesis, such as NeRF and its variants, use volume rendering to produce a neural scene representation with robustness of optimization, even for highly complex objects. However, extracting high-quality surfaces from this learned implicit representation is difficult because there are not sufficient surface constraints in the representation. In NeuS, we propose to represent a surface as the zero-level set of a signed distance function (SDF) and develop a new volume rendering method to train a neural SDF representation. We observe that the conventional volume rendering method causes inherent geometric errors (i.e. bias) for surface reconstruction, and therefore propose a new formulation that is free of bias in the first order of approximation, thus leading to more accurate surface reconstruction even without the mask supervision. Experiments on the DTU dataset and the BlendedMVS dataset show that NeuS outperforms the state-of-the-arts in high-quality surface reconstruction, especially for objects and scenes with complex structures and self-occlusion.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Peng Wang", "Lingjie Liu", "Yuan Liu", "C. Theobalt", "T. Komura", "Wenping Wang" ], "externalIds": { "ArXiv": "2106.10689", "DBLP": "conf/nips/WangLLTKW21", "CorpusId": 235490453 }, "url": "https://www.semanticscholar.org/paper/cf5647cb2613f5f697729eab567383006dcd4913", "referenceCount": 53, "citationCount": 1299, "influentialCitationCount": 318, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Diffusion Models Beat GANs on Image Synthesis", "abstract": "We show that diffusion models can achieve image sample quality superior to the current state-of-the-art generative models. We achieve this on unconditional image synthesis by finding a better architecture through a series of ablations. For conditional image synthesis, we further improve sample quality with classifier guidance: a simple, compute-efficient method for trading off diversity for fidelity using gradients from a classifier. We achieve an FID of 2.97 on ImageNet 128$\\times$128, 4.59 on ImageNet 256$\\times$256, and 7.72 on ImageNet 512$\\times$512, and we match BigGAN-deep even with as few as 25 forward passes per sample, all while maintaining better coverage of the distribution. Finally, we find that classifier guidance combines well with upsampling diffusion models, further improving FID to 3.94 on ImageNet 256$\\times$256 and 3.85 on ImageNet 512$\\times$512. We release our code at https://github.com/openai/guided-diffusion", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Prafulla Dhariwal", "Alex Nichol" ], "externalIds": { "ArXiv": "2105.05233", "DBLP": "journals/corr/abs-2105-05233", "CorpusId": 234357997 }, "url": "https://www.semanticscholar.org/paper/64ea8f180d0682e6c18d1eb688afdb2027c02794", "referenceCount": 81, "citationCount": 5177, "influentialCitationCount": 661, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Learning Transferable Visual Models From Natural Language Supervision", "abstract": "State-of-the-art computer vision systems are trained to predict a fixed set of predetermined object categories. This restricted form of supervision limits their generality and usability since additional labeled data is needed to specify any other visual concept. Learning directly from raw text about images is a promising alternative which leverages a much broader source of supervision. We demonstrate that the simple pre-training task of predicting which caption goes with which image is an efficient and scalable way to learn SOTA image representations from scratch on a dataset of 400 million (image, text) pairs collected from the internet. After pre-training, natural language is used to reference learned visual concepts (or describe new ones) enabling zero-shot transfer of the model to downstream tasks. We study the performance of this approach by benchmarking on over 30 different existing computer vision datasets, spanning tasks such as OCR, action recognition in videos, geo-localization, and many types of fine-grained object classification. The model transfers non-trivially to most tasks and is often competitive with a fully supervised baseline without the need for any dataset specific training. For instance, we match the accuracy of the original ResNet-50 on ImageNet zero-shot without needing to use any of the 1.28 million training examples it was trained on. We release our code and pre-trained model weights at https://github.com/OpenAI/CLIP.", "year": 2021, "venue": "International Conference on Machine Learning", "authors": [ "Alec Radford", "Jong Wook Kim", "Chris Hallacy", "A. Ramesh", "Gabriel Goh", "Sandhini Agarwal", "Girish Sastry", "Amanda Askell", "Pamela Mishkin", "Jack Clark", "Gretchen Krueger", "I. Sutskever" ], "externalIds": { "ArXiv": "2103.00020", "DBLP": "conf/icml/RadfordKHRGASAM21", "CorpusId": 231591445 }, "url": "https://www.semanticscholar.org/paper/6f870f7f02a8c59c3e23f407f3ef00dd1dcf8fc4", "referenceCount": 220, "citationCount": 18886, "influentialCitationCount": 5013, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Improved Denoising Diffusion Probabilistic Models", "abstract": "Denoising diffusion probabilistic models (DDPM) are a class of generative models which have recently been shown to produce excellent samples. We show that with a few simple modifications, DDPMs can also achieve competitive log-likelihoods while maintaining high sample quality. Additionally, we find that learning variances of the reverse diffusion process allows sampling with an order of magnitude fewer forward passes with a negligible difference in sample quality, which is important for the practical deployment of these models. We additionally use precision and recall to compare how well DDPMs and GANs cover the target distribution. Finally, we show that the sample quality and likelihood of these models scale smoothly with model capacity and training compute, making them easily scalable. We release our code at https://github.com/openai/improved-diffusion", "year": 2021, "venue": "International Conference on Machine Learning", "authors": [ "Alex Nichol", "Prafulla Dhariwal" ], "externalIds": { "ArXiv": "2102.09672", "DBLP": "conf/icml/NicholD21", "CorpusId": 231979499 }, "url": "https://www.semanticscholar.org/paper/de18baa4964804cf471d85a5a090498242d2e79f", "referenceCount": 47, "citationCount": 2547, "influentialCitationCount": 282, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Conceptual 12M: Pushing Web-Scale Image-Text Pre-Training To Recognize Long-Tail Visual Concepts", "abstract": "The availability of large-scale image captioning and visual question answering datasets has contributed significantly to recent successes in vision-and-language pretraining. However, these datasets are often collected with overrestrictive requirements inherited from their original target tasks (e.g., image caption generation), which limit the resulting dataset scale and diversity. We take a step further in pushing the limits of vision-and-language pretraining data by relaxing the data collection pipeline used in Conceptual Captions 3M (CC3M) [54] and introduce the Conceptual 12M (CC12M), a dataset with 12 million image-text pairs specifically meant to be used for visionand-language pre-training. We perform an analysis of this dataset and benchmark its effectiveness against CC3M on multiple downstream tasks with an emphasis on long-tail visual recognition. Our results clearly illustrate the benefit of scaling up pre-training data for vision-and-language tasks, as indicated by the new state-of-the-art results on both the nocaps and Conceptual Captions benchmarks.1", "year": 2021, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Soravit Changpinyo", "P. Sharma", "Nan Ding", "Radu Soricut" ], "externalIds": { "DBLP": "conf/cvpr/ChangpinyoSDS21", "ArXiv": "2102.08981", "DOI": "10.1109/CVPR46437.2021.00356", "CorpusId": 231951742 }, "url": "https://www.semanticscholar.org/paper/394be105b87e9bfe72c20efe6338de10604e1a11", "referenceCount": 100, "citationCount": 848, "influentialCitationCount": 122, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Denoising Diffusion Implicit Models", "abstract": "Denoising diffusion probabilistic models (DDPMs) have achieved high quality image generation without adversarial training, yet they require simulating a Markov chain for many steps to produce a sample. To accelerate sampling, we present denoising diffusion implicit models (DDIMs), a more efficient class of iterative implicit probabilistic models with the same training procedure as DDPMs. In DDPMs, the generative process is defined as the reverse of a Markovian diffusion process. We construct a class of non-Markovian diffusion processes that lead to the same training objective, but whose reverse process can be much faster to sample from. We empirically demonstrate that DDIMs can produce high quality samples $10 \\times$ to $50 \\times$ faster in terms of wall-clock time compared to DDPMs, allow us to trade off computation for sample quality, and can perform semantically meaningful image interpolation directly in the latent space.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Jiaming Song", "Chenlin Meng", "Stefano Ermon" ], "externalIds": { "ArXiv": "2010.02502", "DBLP": "journals/corr/abs-2010-02502", "MAG": "3092442149", "CorpusId": 222140788 }, "url": "https://www.semanticscholar.org/paper/014576b866078524286802b1d0e18628520aa886", "referenceCount": 47, "citationCount": 4422, "influentialCitationCount": 877, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Denoising Diffusion Probabilistic Models", "abstract": "We present high quality image synthesis results using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics. Our best results are obtained by training on a weighted variational bound designed according to a novel connection between diffusion probabilistic models and denoising score matching with Langevin dynamics, and our models naturally admit a progressive lossy decompression scheme that can be interpreted as a generalization of autoregressive decoding. On the unconditional CIFAR10 dataset, we obtain an Inception score of 9.46 and a state-of-the-art FID score of 3.17. On 256x256 LSUN, we obtain sample quality similar to ProgressiveGAN. Our implementation is available at this https URL", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Jonathan Ho", "Ajay Jain", "P. Abbeel" ], "externalIds": { "ArXiv": "2006.11239", "MAG": "3100572490", "DBLP": "journals/corr/abs-2006-11239", "CorpusId": 219955663 }, "url": "https://www.semanticscholar.org/paper/5c126ae3421f05768d8edd97ecd44b1364e2c99a", "referenceCount": 73, "citationCount": 10778, "influentialCitationCount": 2337, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "PIFu: Pixel-Aligned Implicit Function for High-Resolution Clothed Human Digitization", "abstract": "We introduce Pixel-aligned Implicit Function (PIFu), an implicit representation that locally aligns pixels of 2D images with the global context of their corresponding 3D object. Using PIFu, we propose an end-to-end deep learning method for digitizing highly detailed clothed humans that can infer both 3D surface and texture from a single image, and optionally, multiple input images. Highly intricate shapes, such as hairstyles, clothing, as well as their variations and deformations can be digitized in a unified way. Compared to existing representations used for 3D deep learning, PIFu produces high-resolution surfaces including largely unseen regions such as the back of a person. In particular, it is memory efficient unlike the voxel representation, can handle arbitrary topology, and the resulting surface is spatially aligned with the input image. Furthermore, while previous techniques are designed to process either a single image or multiple views, PIFu extends naturally to arbitrary number of views. We demonstrate high-resolution and robust reconstructions on real world images from the DeepFashion dataset, which contains a variety of challenging clothing types. Our method achieves state-of-the-art performance on a public benchmark and outperforms the prior work for clothed human digitization from a single image.", "year": 2019, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Shunsuke Saito", "Zeng Huang", "Ryota Natsume", "S. Morishima", "Angjoo Kanazawa", "Hao Li" ], "externalIds": { "MAG": "2981978060", "DBLP": "journals/corr/abs-1905-05172", "ArXiv": "1905.05172", "DOI": "10.1109/ICCV.2019.00239", "CorpusId": 152282359 }, "url": "https://www.semanticscholar.org/paper/343da6d4cff7ce8c04270487a1f7a037ea0572d6", "referenceCount": 77, "citationCount": 1130, "influentialCitationCount": 196, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Expressive Body Capture: 3D Hands, Face, and Body From a Single Image", "abstract": "To facilitate the analysis of human actions, interactions and emotions, we compute a 3D model of human body pose, hand pose, and facial expression from a single monocular image. To achieve this, we use thousands of 3D scans to train a new, unified, 3D model of the human body, SMPL-X, that extends SMPL with fully articulated hands and an expressive face. Learning to regress the parameters of SMPL-X directly from images is challenging without paired images and 3D ground truth. Consequently, we follow the approach of SMPLify, which estimates 2D features and then optimizes model parameters to fit the features. We improve on SMPLify in several significant ways: (1) we detect 2D features corresponding to the face, hands, and feet and fit the full SMPL-X model to these; (2) we train a new neural network pose prior using a large MoCap dataset; (3) we define a new interpenetration penalty that is both fast and accurate; (4) we automatically detect gender and the appropriate body models (male, female, or neutral); (5) our PyTorch implementation achieves a speedup of more than 8x over Chumpy. We use the new method, SMPLify-X, to fit SMPL-X to both controlled images and images in the wild. We evaluate 3D accuracy on a new curated dataset comprising 100 images with pseudo ground-truth. This is a step towards automatic expressive human capture from monocular RGB data. The models, code, and data are available for research purposes at https://smpl-x.is.tue.mpg.de.", "year": 2019, "venue": "Computer Vision and Pattern Recognition", "authors": [ "G. Pavlakos", "Vasileios Choutas", "N. Ghorbani", "Timo Bolkart", "Ahmed A. A. Osman", "Dimitrios Tzionas", "Michael J. Black" ], "externalIds": { "MAG": "2978956737", "DBLP": "conf/cvpr/PavlakosCGBOTB19", "ArXiv": "1904.05866", "DOI": "10.1109/CVPR.2019.01123", "CorpusId": 109932872 }, "url": "https://www.semanticscholar.org/paper/4be4707aba8d622a0553aa159dc92ae7f9af9c5e", "referenceCount": 81, "citationCount": 1384, "influentialCitationCount": 267, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "AMASS: Archive of Motion Capture As Surface Shapes", "abstract": "Large datasets are the cornerstone of recent advances in computer vision using deep learning. In contrast, existing human motion capture (mocap) datasets are small and the motions limited, hampering progress on learning models of human motion. While there are many different datasets available, they each use a different parameterization of the body, making it difficult to integrate them into a single meta dataset. To address this, we introduce AMASS, a large and varied database of human motion that unifies 15 different optical marker-based mocap datasets by representing them within a common framework and parameterization. We achieve this using a new method, MoSh++, that converts mocap data into realistic 3D human meshes represented by a rigged body model. Here we use SMPL [Loper et al., 2015], which is widely used and provides a standard skeletal representation as well as a fully rigged surface mesh. The method works for arbitrary marker sets, while recovering soft-tissue dynamics and realistic hand motion. We evaluate MoSh++ and tune its hyperparameters using a new dataset of 4D body scans that are jointly recorded with markerbased mocap. The consistent representation of AMASS makes it readily useful for animation, visualization, and generating training data for deep learning. Our dataset is significantly richer than previous human motion collections, having more than 40 hours of motion data, spanning over 300 subjects, more than 11000 motions, and will be publicly available to the research community.", "year": 2019, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Naureen Mahmood", "N. Ghorbani", "N. Troje", "Gerard Pons-Moll", "Michael J. Black" ], "externalIds": { "MAG": "2928521819", "DBLP": "journals/corr/abs-1904-03278", "ArXiv": "1904.03278", "DOI": "10.1109/ICCV.2019.00554", "CorpusId": 102351100 }, "url": "https://www.semanticscholar.org/paper/690c817ab5be8017ff713fa1028669debde205af", "referenceCount": 46, "citationCount": 970, "influentialCitationCount": 154, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Conceptual Captions: A Cleaned, Hypernymed, Image Alt-text Dataset For Automatic Image Captioning", "abstract": "We present a new dataset of image caption annotations, Conceptual Captions, which contains an order of magnitude more images than the MS-COCO dataset (Lin et al., 2014) and represents a wider variety of both images and image caption styles. We achieve this by extracting and filtering image caption annotations from billions of webpages. We also present quantitative evaluations of a number of image captioning models and show that a model architecture based on Inception-ResNetv2 (Szegedy et al., 2016) for image-feature extraction and Transformer (Vaswani et al., 2017) for sequence modeling achieves the best performance when trained on the Conceptual Captions dataset.", "year": 2018, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Piyush Sharma", "Nan Ding", "Sebastian Goodman", "Radu Soricut" ], "externalIds": { "MAG": "2886641317", "ACL": "P18-1238", "DBLP": "conf/acl/SoricutDSG18", "DOI": "10.18653/v1/P18-1238", "CorpusId": 51876975 }, "url": "https://www.semanticscholar.org/paper/b4df354db88a70183a64dbc9e56cf14e7669a6c0", "referenceCount": 33, "citationCount": 2080, "influentialCitationCount": 334, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DensePose: Dense Human Pose Estimation in the Wild", "abstract": "In this work we establish dense correspondences between an RGB image and a surface-based representation of the human body, a task we refer to as dense human pose estimation. We gather dense correspondences for 50K persons appearing in the COCO dataset by introducing an efficient annotation pipeline. We then use our dataset to train CNN-based systems that deliver dense correspondence 'in the wild', namely in the presence of background, occlusions and scale variations. We improve our training set's effectiveness by training an inpainting network that can fill in missing ground truth values and report improvements with respect to the best results that would be achievable in the past. We experiment with fully-convolutional networks and region-based models and observe a superiority of the latter. We further improve accuracy through cascading, obtaining a system that delivers highly-accurate results at multiple frames per second on a single gpu. Supplementary materials, data, code, and videos are provided on the project page http://densepose.org.", "year": 2018, "venue": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition", "authors": [ "R. Güler", "N. Neverova", "Iasonas Kokkinos" ], "externalIds": { "MAG": "2785694322", "ArXiv": "1802.00434", "DBLP": "journals/corr/abs-1802-00434", "DOI": "10.1109/CVPR.2018.00762", "CorpusId": 13637778 }, "url": "https://www.semanticscholar.org/paper/8c94385d45f5896e748e43171eeaaa259009faab", "referenceCount": 51, "citationCount": 1260, "influentialCitationCount": 142, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Building efficient, accurate character skins from examples", "abstract": "Good character animation requires convincing skin deformations including subtleties and details like muscle bulges. Such effects are typically created in commercial animation packages which provide very general and powerful tools. While these systems are convenient and flexible for artists, the generality often leads to characters that are slow to compute or that require a substantial amount of memory and thus cannot be used in interactive systems. Instead, interactive systems restrict artists to a specific character deformation model which is fast and memory efficient but is notoriously difficult to author and can suffer from many deformation artifacts. This paper presents an automated framework that allows character artists to use the full complement of tools in high-end systems to create characters for interactive systems. Our method starts with an arbitrarily rigged character in an animation system. A set of examples is exported, consisting of skeleton configurations paired with the deformed geometry as static meshes. Using these examples, we fit the parameters of a deformation model that best approximates the original data yet remains fast to compute and compact in memory.", "year": 2003, "venue": "ACM Transactions on Graphics", "authors": [ "Alex Mohr", "Michael Gleicher" ], "externalIds": { "MAG": "2999456009", "DBLP": "journals/tog/MohrG03", "DOI": "10.1145/1201775.882308", "CorpusId": 13962529 }, "url": "https://www.semanticscholar.org/paper/fdada4e2e659a72fd855f578150c1837e1a377d2", "referenceCount": 19, "citationCount": 332, "influentialCitationCount": 23, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Occlusion Culling Algorithms: A Comprehensive Survey", "abstract": null, "year": 2002, "venue": "J. Intell. Robotic Syst.", "authors": [ "Ioannis S. Pantazopoulos", "S. Tzafestas" ], "externalIds": { "MAG": "1539604522", "DBLP": "journals/jirs/PantazopoulosT02", "DOI": "10.1023/A:1021175220384", "CorpusId": 46139505 }, "url": "https://www.semanticscholar.org/paper/c8a679622d3ac764aec5a928b9b84910133a1d78", "referenceCount": 52, "citationCount": 31, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "CCM: Real-Time Controllable Visual Content Creation Using Text-to-Image Consistency Models", "abstract": null, "year": 2024, "venue": "International Conference on Machine Learning", "authors": [ "Jie Xiao", "Kai Zhu", "Han Zhang", "Zhiheng Liu", "Yujun Shen", "Zhantao Yang", "Ruili Feng", "Yu Liu", "Xueyang Fu", "Zheng-Jun Zha" ], "externalIds": { "DBLP": "conf/icml/000200LSYF0FZ24", "CorpusId": 272330582 }, "url": "https://www.semanticscholar.org/paper/83fe755e953a8cb86aee9350713d30bf16410cc2", "referenceCount": 0, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learn to Dance with AIST++: Music Conditioned 3D Dance Generation", "abstract": "In this paper, we present a transformer-based learning framework for 3D dance generation conditioned on music. We carefully design our network architecture and empirically study the keys for obtaining qualitatively pleasing results. The critical components include a deep cross-modal transformer, which well learns the correlation be-tween the music and dance motion; and the full-attention with future-N supervision mechanism which is essential in producing long-range non-freezing motion. In addition, we propose a new dataset of paired 3D motion and music called AIST++, which we reconstruct from the AIST multi-view dance videos. This dataset contains 1.1M frames of 3D dance motion in 1408 sequences, covering 10 gen-res of dance choreographies and accompanied with multi-view camera parameters. To our knowledge it is the largest dataset of this kind. Rich experiments on AIST++ demonstrate our method produces much better results than the state-of-the-art methods both qualitatively and quantitatively. Please watch the video project page and dataset at https://google.github.io/aichoreographer.", "year": 2021, "venue": "arXiv.org", "authors": [ "Ruilong Li", "Sha Yang", "D. A. Ross", "Angjoo Kanazawa" ], "externalIds": { "DBLP": "journals/corr/abs-2101-08779", "CorpusId": 231662465 }, "url": "https://www.semanticscholar.org/paper/5d2289dea222330dcab41b3fbec12a6de9c91365", "referenceCount": 99, "citationCount": 106, "influentialCitationCount": 19, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Supplementary Material to: Recovering Accurate 3D Human Pose in The Wild Using IMUs and a Moving Camera", "abstract": "This document contains additional experiments to the paper ”Recovering Accurate 3D Human Pose in The Wild Using IMUs and a Moving Camera”[1]. These experiments validate different aspects of VIP, our proposed method for combining IMU-based tracking with a single hand-held camera, and provide further details to the proposed 3DPW. In Section 1, we validate that the explicit modeling of IMU heading errors is an important ingredient of the proposed method. In Section 2 we evaluate tracking accuracy of VIP for an additional IMU sensor setup. In order to demonstrate the challenges of our newly recorded dataset in comparison to existing datasets, we evaluate three monocular 3D pose estimation methods in Section 3.", "year": 2018, "venue": "", "authors": [ "T. V. Marcard", "Roberto Henschel", "Michael J. Black", "B. Rosenhahn", "Gerard Pons-Moll" ], "externalIds": { "CorpusId": 51883209 }, "url": "https://www.semanticscholar.org/paper/c19ed5102ecd953d5c78d5a0b87eaa51658e07d8", "referenceCount": 5, "citationCount": 807, "influentialCitationCount": 129, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "“NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis,”", "abstract": null, "year": null, "venue": "Communications of the ACM", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": ") received the BE and PhD degrees from the University of Science and Technology of China, Hefei, China, in 2004 and 2009, respectively. He", "abstract": null, "year": null, "venue": "is currently", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Considering that existing pre-trained 2D diffusion models struggle to generate intricate hands or con-trol facial expressions", "abstract": null, "year": null, "venue": "3D meshes of SMPL-X body parts", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "“Avatar-CLIP: Zero-Shot Text-Driven Generation and Animation of 3D Avatars,”", "abstract": null, "year": null, "venue": "ACM Transactions on Graphics (TOG)", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Generative Object Insertion in Gaussian Splatting with a Multi-View Diffusion Model": { "paper_title": "Generative Object Insertion in Gaussian Splatting with a Multi-View Diffusion Model", "arxiv_id": "2409.16938v1", "keyword": "diffusion model", "authors": [ "Hongliang Zhong", "Can Wang", "Jingbo Zhang", "Jing Liao" ], "references": [ { "title": "Style-NeRF2NeRF: 3D Style Transfer From Style-Aligned Multi-View Images", "abstract": "We propose a simple yet effective pipeline for stylizing a 3D scene, harnessing the power of 2D image diffusion models. Given a NeRF model reconstructed from a set of multi-view images, we perform 3D style transfer by refining the source NeRF model using stylized images generated by a style-aligned image-to-image diffusion model. Given a target style prompt, we first generate perceptually similar multi-view images by leveraging a depth-conditioned diffusion model with an attention-sharing mechanism. Next, based on the stylized multi-view images, we propose to guide the style transfer process with the sliced Wasserstein loss based on the feature maps extracted from a pre-trained CNN model. Our pipeline consists of decoupled steps, allowing users to test various prompt ideas and preview the stylized 3D result before proceeding to the NeRF fine-tuning stage. We demonstrate that our method can transfer diverse artistic styles to real-world 3D scenes with competitive quality. Result videos are also available on our project page: https://haruolabs.github.io/style-n2n/", "year": 2024, "venue": "arXiv.org", "authors": [ "Haruo Fujiwara", "Yusuke Mukuta", "Tatsuya Harada" ], "externalIds": { "DBLP": "journals/corr/abs-2406-13393", "ArXiv": "2406.13393", "DOI": "10.48550/arXiv.2406.13393", "CorpusId": 270620858 }, "url": "https://www.semanticscholar.org/paper/91ec893a955cbf9f08b3fc23a7816f33f80d9b8c", "referenceCount": 61, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MVIP-NeRF: Multi-View 3D Inpainting on NeRF Scenes via Diffusion Prior", "abstract": "Despite the emergence of successful NeRF inpainting methods built upon explicit RGB and depth 2D inpainting supervisions, these methods are inherently constrained by the capabilities of their underlying 2D inpainters. This is due to two key reasons: (i) independently inpainting constituent images results in view-inconsistent imagery, and (ii) 2D inpainters struggle to ensure high-quality geometry completion and alignment with inpainted RGB images. To overcome these limitations, we propose a novel approach called MVIP-NeRF that harnesses the potential of diffusion priors for NeRF inpainting, addressing both appearance and geometry aspects. MVIP-NeRF performs joint inpainting across multiple views to reach a consistent solution, which is achieved via an iterative optimization process based on Score Distillation Sampling (SDS). Apart from recovering the rendered RGB images, we also extract normal maps as a geometric representation and define a normal SDS loss that motivates accurate geometry inpaint- ing and alignment with the appearance. Additionally, we formulate a multi-view SDS score function to distill generative priors simultaneously from different view images, ensuring consistent visual completion when dealing with large view variations. Our experimental results show better appearance and geometry recovery than previous NeRF in painting methods.", "year": 2024, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Honghua Chen", "Chen Change Loy", "Xingang Pan" ], "externalIds": { "DBLP": "journals/corr/abs-2405-02859", "ArXiv": "2405.02859", "DOI": "10.1109/CVPR52733.2024.00511", "CorpusId": 269605099 }, "url": "https://www.semanticscholar.org/paper/f7bea787bfe2b02b90d7f6f6bd8805f67a5e06aa", "referenceCount": 43, "citationCount": 1, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "InFusion: Inpainting 3D Gaussians via Learning Depth Completion from Diffusion Prior", "abstract": "3D Gaussians have recently emerged as an efficient representation for novel view synthesis. This work studies its editability with a particular focus on the inpainting task, which aims to supplement an incomplete set of 3D Gaussians with additional points for visually harmonious rendering. Compared to 2D inpainting, the crux of inpainting 3D Gaussians is to figure out the rendering-relevant properties of the introduced points, whose optimization largely benefits from their initial 3D positions. To this end, we propose to guide the point initialization with an image-conditioned depth completion model, which learns to directly restore the depth map based on the observed image. Such a design allows our model to fill in depth values at an aligned scale with the original depth, and also to harness strong generalizability from largescale diffusion prior. Thanks to the more accurate depth completion, our approach, dubbed InFusion, surpasses existing alternatives with sufficiently better fidelity and efficiency under various complex scenarios. We further demonstrate the effectiveness of InFusion with several practical applications, such as inpainting with user-specific texture or with novel object insertion.", "year": 2024, "venue": "arXiv.org", "authors": [ "Zhiheng Liu", "Ouyang Hao", "Qiuyu Wang", "Ka Leong Cheng", "Jie Xiao", "Kai Zhu", "Nan Xue", "Yu Liu", "Yujun Shen", "Yang Cao" ], "externalIds": { "ArXiv": "2404.11613", "DBLP": "journals/corr/abs-2404-11613", "DOI": "10.48550/arXiv.2404.11613", "CorpusId": 269187667 }, "url": "https://www.semanticscholar.org/paper/379fb2f4a6eaf8209ef3608e00c239d313807b35", "referenceCount": 118, "citationCount": 5, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DATENeRF: Depth-Aware Text-based Editing of NeRFs", "abstract": "Recent advancements in diffusion models have shown remarkable proficiency in editing 2D images based on text prompts. However, extending these techniques to edit scenes in Neural Radiance Fields (NeRF) is complex, as editing individual 2D frames can result in inconsistencies across multiple views. Our crucial insight is that a NeRF scene's geometry can serve as a bridge to integrate these 2D edits. Utilizing this geometry, we employ a depth-conditioned ControlNet to enhance the coherence of each 2D image modification. Moreover, we introduce an inpainting approach that leverages the depth information of NeRF scenes to distribute 2D edits across different images, ensuring robustness against errors and resampling challenges. Our results reveal that this methodology achieves more consistent, lifelike, and detailed edits than existing leading methods for text-driven NeRF scene editing.", "year": 2024, "venue": "arXiv.org", "authors": [ "Sara Rojas", "Julien Philip", "Kai Zhang", "Sai Bi", "Fujun Luan", "Bernard Ghanem", "Kalyan Sunkavalli" ], "externalIds": { "DBLP": "journals/corr/abs-2404-04526", "ArXiv": "2404.04526", "DOI": "10.48550/arXiv.2404.04526", "CorpusId": 269005490 }, "url": "https://www.semanticscholar.org/paper/2cd2e2d0c689bdc0cdf511025d4c5c8b727e10f1", "referenceCount": 57, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Texture-GS: Disentangling the Geometry and Texture for 3D Gaussian Splatting Editing", "abstract": "3D Gaussian splatting, emerging as a groundbreaking approach, has drawn increasing attention for its capabilities of high-fidelity reconstruction and real-time rendering. However, it couples the appearance and geometry of the scene within the Gaussian attributes, which hinders the flexibility of editing operations, such as texture swapping. To address this issue, we propose a novel approach, namely Texture-GS, to disentangle the appearance from the geometry by representing it as a 2D texture mapped onto the 3D surface, thereby facilitating appearance editing. Technically, the disentanglement is achieved by our proposed texture mapping module, which consists of a UV mapping MLP to learn the UV coordinates for the 3D Gaussian centers, a local Taylor expansion of the MLP to efficiently approximate the UV coordinates for the ray-Gaussian intersections, and a learnable texture to capture the fine-grained appearance. Extensive experiments on the DTU dataset demonstrate that our method not only facilitates high-fidelity appearance editing but also achieves real-time rendering on consumer-level devices, e.g. a single RTX 2080 Ti GPU.", "year": 2024, "venue": "arXiv.org", "authors": [ "Tianhan Xu", "Wenbo Hu", "Yu-Kun Lai", "Ying Shan", "Songiie Zhang" ], "externalIds": { "ArXiv": "2403.10050", "DBLP": "journals/corr/abs-2403-10050", "DOI": "10.48550/arXiv.2403.10050", "CorpusId": 268510017 }, "url": "https://www.semanticscholar.org/paper/b16378d058d3c8ec639ce0cbf2eded59125f752e", "referenceCount": 31, "citationCount": 5, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ReplaceAnything3D: Text-Guided 3D Scene Editing with Compositional Neural Radiance Fields", "abstract": "We introduce ReplaceAnything3D model (RAM3D), a novel text-guided 3D scene editing method that enables the replacement of specific objects within a scene. Given multi-view images of a scene, a text prompt describing the object to replace, and a text prompt describing the new object, our Erase-and-Replace approach can effectively swap objects in the scene with newly generated content while maintaining 3D consistency across multiple viewpoints. We demonstrate the versatility of ReplaceAnything3D by applying it to various realistic 3D scenes, showcasing results of modified foreground objects that are well-integrated with the rest of the scene without affecting its overall integrity.", "year": 2024, "venue": "arXiv.org", "authors": [ "Edward Bartrum", "Thu Nguyen-Phuoc", "Christopher Xie", "Zhengqin Li", "Numair Khan", "A. Avetisyan", "Douglas Lanman", "Lei Xiao" ], "externalIds": { "ArXiv": "2401.17895", "DBLP": "journals/corr/abs-2401-17895", "DOI": "10.48550/arXiv.2401.17895", "CorpusId": 267334707 }, "url": "https://www.semanticscholar.org/paper/529479ee539c8cbd7f85f0702a4290ad1e2956a9", "referenceCount": 53, "citationCount": 3, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "RGBD Objects in the Wild: Scaling Real-World 3D Object Learning from RGB-D Videos", "abstract": "We introduce a new RGB-D object dataset captured in the wild called WildRGB-D. Unlike most existing real-world object-centric datasets which only come with RGB capturing, the direct capture of the depth channel allows better 3D annotations and broader downstream applications. WildRGB-D comprises large-scale category-level RGB-D object videos, which are taken using an iPhone to go around the objects in 360 degrees. It contains around 8500 recorded objects and nearly 20000 RGB-D videos across 46 common object categories. These videos are taken with diverse cluttered backgrounds with three setups to cover as many real-world scenarios as possible: (i) a single object in one video; (ii) multiple objects in one video; and (iii) an object with a static hand in one video. The dataset is annotated with object masks, real-world scale camera poses, and reconstructed aggregated point clouds from RGBD videos. We benchmark four tasks with WildRGB-D including novel view synthesis, camera pose estimation, object 6d pose estimation, and object surface reconstruction. Our experiments show that the large-scale capture of RGB-D objects provides a large potential to advance 3D object learning. Our project page is https://wildrgbd.github.io/.", "year": 2024, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Hongchi Xia", "Yang Fu", "Sifei Liu", "Xiaolong Wang" ], "externalIds": { "ArXiv": "2401.12592", "DBLP": "journals/corr/abs-2401-12592", "DOI": "10.1109/CVPR52733.2024.02112", "CorpusId": 267095041 }, "url": "https://www.semanticscholar.org/paper/4a6a666bbd2a1416b20cbf201bc884e0949cd704", "referenceCount": 85, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "NeRFEditor: Differentiable Style Decomposition for 3D Scene Editing", "abstract": "We present NeRFEditor, an efficient learning framework for 3D scene editing, which takes a video as input and outputs a high-quality, identity-preserving stylized 3D scene. Our goal is to bridge the gap between 2D and 3D editing, catering to a wide array of creative modifications such as reference-guided alterations, text-based prompts, and user interactions. We achieve this by encouraging a pre-trained StyleGAN model and a NeRF model to learn mutually consistent renderings. Specifically, we use NeRF to generate numerous (image, camera pose)-pairs to train an adjustor module, which adapts the StyleGAN latent code for generating high-fidelity stylized images from any given viewing angle. To extrapolate edits to novel views, i.e., those not seen by StyleGAN pre-training, while maintaining 360° consistency, we propose a second self-supervised module that maps these views into the hidden space of StyleGAN. Together these two modules produce sufficient guidance for NeRF to learn consistent stylization effects across the full range of views. Experiments show that NeRFEditor outperforms prior work on benchmark and real-world scenes with better editability, fidelity, and identity preservation.", "year": 2024, "venue": "IEEE Workshop/Winter Conference on Applications of Computer Vision", "authors": [ "Chunyi Sun", "Yanbin Liu", "Junlin Han", "Stephen Gould" ], "externalIds": { "DBLP": "conf/wacv/SunLHG24", "DOI": "10.1109/WACV57701.2024.00714", "CorpusId": 268753073 }, "url": "https://www.semanticscholar.org/paper/df1d36fc64162e7365748289b4fde8982b2c1f25", "referenceCount": 41, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Mesh-Guided Neural Implicit Field Editing", "abstract": "Neural implicit fields have emerged as a powerful 3D representation for reconstructing and rendering photo-realistic views, yet they possess limited editability. Conversely, explicit 3D representations, such as polygonal meshes, offer ease of editing but may not be as suitable for rendering high-quality novel views. To harness the strengths of both representations, we propose a new approach that employs a mesh as a guiding mechanism in editing the neural radiance field. We first introduce a differentiable method using marching tetrahedra for polygonal mesh extraction from the neural implicit field and then design a differentiable color extractor to assign colors obtained from the volume renderings to this extracted mesh. This differentiable colored mesh allows gradient back-propagation from the explicit mesh to the implicit fields, empowering users to easily manipulate the geometry and color of neural implicit fields. To enhance user control from coarse-grained to fine-grained levels, we introduce an octree-based structure into its optimization. This structure prioritizes the edited regions and the surface part, making our method achieve fine-grained edits to the neural implicit field and accommodate various user modifications, including object additions, component removals, specific area deformations, and adjustments to local and global colors. Through extensive experiments involving diverse scenes and editing operations, we have demonstrated the capabilities and effectiveness of our method. Our project page is: \\url{https://cassiepython.github.io/MNeuEdit/}", "year": 2023, "venue": "arXiv.org", "authors": [ "Can Wang", "Mingming He", "Menglei Chai", "Dongdong Chen", "Jing Liao" ], "externalIds": { "ArXiv": "2312.02157", "DBLP": "journals/corr/abs-2312-02157", "DOI": "10.48550/arXiv.2312.02157", "CorpusId": 265609951 }, "url": "https://www.semanticscholar.org/paper/82f2ea4bef81e072c4fe4a946740408d75ecd01f", "referenceCount": 42, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "HumanGaussian: Text-Driven 3D Human Generation with Gaussian Splatting", "abstract": "Realistic 3D human generationfrom text prompts is a de-sirable yet challenging task. Existing methods optimize 3D representations like mesh or neural fields via score distil-lation sampling (SDS), which suffers from inadequate fine details or excessive training time. In this paper, we pro-pose an efficient yet effective framework, HumanGaussian, that generates high-quality 3D humans with fine-grained geometry and realistic appearance. Our key insight is that 3D Gaussian Splatting is an efficient renderer with peri-odic Gaussian shrinkage or growing, where such adaptive density control can be naturally guided by intrinsic human structures. Specifically, 1) we first propose a Structure-Aware SDS that simultaneously optimizes human appear-ance and geometry. The multi-modal score function from both RGB and depth space is leveraged to distill the Gaus-sian densification and pruning process. 2) Moreover, we devise an Annealed Negative Prompt Guidance by decom-posing SDS into a noisier generative score and a cleaner classifier score, which well addresses the over-saturation issue. The floating artifacts are further eliminated based on Gaussian size in a prune-only phase to enhance generation smoothness. Extensive experiments demonstrate the supe-rior efficiency and competitive quality of our framework, rendering vivid 3D humans under diverse scenarios.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Xian Liu", "Xiaohang Zhan", "Jiaxiang Tang", "Ying Shan", "Gang Zeng", "Dahua Lin", "Xihui Liu", "Ziwei Liu" ], "externalIds": { "ArXiv": "2311.17061", "DBLP": "journals/corr/abs-2311-17061", "DOI": "10.1109/CVPR52733.2024.00635", "CorpusId": 265466220 }, "url": "https://www.semanticscholar.org/paper/7665642af9e682e012bec045102a4d009421067c", "referenceCount": 102, "citationCount": 45, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Multi-Scale 3D Gaussian Splatting for Anti-Aliased Rendering", "abstract": "3D Gaussians have recently emerged as a highly efficient representation for 3D reconstruction and rendering. Despite its high rendering quality and speed at high resolutions, they both deteriorate drastically when rendered at lower resolutions or from far away camera position. During low resolution or far away rendering, the pixel size of the image can fall below the Nyquist frequency compared to the screen size of each splatted 3D Gaussian and leads to aliasing effect. The rendering is also drastically slowed down by the sequential alpha blending of more splatted Gaussians per pixel. To address these issues, we propose a multi-scale 3D Gaussian splatting algorithm, which maintains Gaussians at different scales to represent the same scene. Higher-resolution images are rendered with more small Gaussians, and lower-resolution images are rendered with fewer larger Gaussians. With similar training time, our algorithm can achieve 13%-66% PSNR and 160%-2400% rendering speed improvement at 4 × -128 × scale rendering on Mip-NeRF360 dataset compared to the single scale 3D Gaussian splatting. More results and code are released on our project page.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Zhiwen Yan", "Weng Fei Low", "Yu Chen", "Gim Hee Lee" ], "externalIds": { "DBLP": "journals/corr/abs-2311-17089", "ArXiv": "2311.17089", "DOI": "10.1109/CVPR52733.2024.01977", "CorpusId": 265498759 }, "url": "https://www.semanticscholar.org/paper/6fc4869263ea4e9dde902d7cd9899436d9826dcc", "referenceCount": 21, "citationCount": 35, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Stable Video Diffusion: Scaling Latent Video Diffusion Models to Large Datasets", "abstract": "We present Stable Video Diffusion - a latent video diffusion model for high-resolution, state-of-the-art text-to-video and image-to-video generation. Recently, latent diffusion models trained for 2D image synthesis have been turned into generative video models by inserting temporal layers and finetuning them on small, high-quality video datasets. However, training methods in the literature vary widely, and the field has yet to agree on a unified strategy for curating video data. In this paper, we identify and evaluate three different stages for successful training of video LDMs: text-to-image pretraining, video pretraining, and high-quality video finetuning. Furthermore, we demonstrate the necessity of a well-curated pretraining dataset for generating high-quality videos and present a systematic curation process to train a strong base model, including captioning and filtering strategies. We then explore the impact of finetuning our base model on high-quality data and train a text-to-video model that is competitive with closed-source video generation. We also show that our base model provides a powerful motion representation for downstream tasks such as image-to-video generation and adaptability to camera motion-specific LoRA modules. Finally, we demonstrate that our model provides a strong multi-view 3D-prior and can serve as a base to finetune a multi-view diffusion model that jointly generates multiple views of objects in a feedforward fashion, outperforming image-based methods at a fraction of their compute budget. We release code and model weights at https://github.com/Stability-AI/generative-models .", "year": 2023, "venue": "arXiv.org", "authors": [ "A. Blattmann", "Tim Dockhorn", "Sumith Kulal", "Daniel Mendelevitch", "Maciej Kilian", "Dominik Lorenz" ], "externalIds": { "ArXiv": "2311.15127", "DBLP": "journals/corr/abs-2311-15127", "DOI": "10.48550/arXiv.2311.15127", "CorpusId": 265312551 }, "url": "https://www.semanticscholar.org/paper/1206b05eae5a06ba662ae79fb291b50e359c4f42", "referenceCount": 104, "citationCount": 384, "influentialCitationCount": 79, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "GaussianEditor: Swift and Controllable 3D Editing with Gaussian Splatting", "abstract": "3D editing plays a crucial role in many areas such as gaming and virtual reality. Traditional 3D editing methods, which rely on representations like meshes and point clouds, often fall short in realistically depicting complex scenes. On the other hand, methods based on implicit 3D representations, like Neural Radiance Field (NeRF), render complex scenes effectively but suffer from slow processing speeds and limited control over specific scene areas. In response to these challenges, our paper presents GaussianEditor, the first 3D editing algorithm based on Gaussian Splatting (GS), a novel 3D representation. GaussianEditor enhances precision and control in editing through our proposed Gaussian semantic tracing, which traces the editing target throughout the training process. Additionally, we propose Hierarchical Gaussian splatting (HGS) to achieve stabilized and fine results under stochastic generative guidance from 2D diffusion models. We also develop editing strategies for efficient object removal and integration, a challenging task for existing methods. Our comprehensive experiments demonstrate GaussianEditor's superior control, effective, and efficient performance, marking a significant advancement in 3D editing.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Yiwen Chen", "Zilong Chen", "Chi Zhang", "Feng Wang", "Xiaofeng Yang", "Yikai Wang", "Zhongang Cai", "Lei Yang", "Huaping Liu", "Guosheng Lin" ], "externalIds": { "ArXiv": "2311.14521", "DBLP": "journals/corr/abs-2311-14521", "DOI": "10.1109/CVPR52733.2024.02029", "CorpusId": 265445359 }, "url": "https://www.semanticscholar.org/paper/bbc6531afdfe41fe8664002a80d9d73a07a080d2", "referenceCount": 60, "citationCount": 84, "influentialCitationCount": 10, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Text-to-3D with Classifier Score Distillation", "abstract": "Text-to-3D generation has made remarkable progress recently, particularly with methods based on Score Distillation Sampling (SDS) that leverages pre-trained 2D diffusion models. While the usage of classifier-free guidance is well acknowledged to be crucial for successful optimization, it is considered an auxiliary trick rather than the most essential component. In this paper, we re-evaluate the role of classifier-free guidance in score distillation and discover a surprising finding: the guidance alone is enough for effective text-to-3D generation tasks. We name this method Classifier Score Distillation (CSD), which can be interpreted as using an implicit classification model for generation. This new perspective reveals new insights for understanding existing techniques. We validate the effectiveness of CSD across a variety of text-to-3D tasks including shape generation, texture synthesis, and shape editing, achieving results superior to those of state-of-the-art methods. Our project page is https://xinyu-andy.github.io/Classifier-Score-Distillation", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Xin Yu", "Yuanchen Guo", "Yangguang Li", "Ding Liang", "Song-Hai Zhang", "Xiaojuan Qi" ], "externalIds": { "DBLP": "conf/iclr/0004GLLZ024", "ArXiv": "2310.19415", "DOI": "10.48550/arXiv.2310.19415", "CorpusId": 264825424 }, "url": "https://www.semanticscholar.org/paper/4e21879b564cc2e803b16edf0dda9f1edb91b497", "referenceCount": 40, "citationCount": 49, "influentialCitationCount": 10, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ControlStyle: Text-Driven Stylized Image Generation Using Diffusion Priors", "abstract": "Recently, the multimedia community has witnessed the rise of diffusion models trained on large-scale multi-modal data for visual content creation, particularly in the field of text-to-image generation. In this paper, we propose a new task for \"stylizing'' text-to-image models, namely text-driven stylized image generation, that further enhances editability in content creation. Given input text prompt and style image, this task aims to produce stylized images which are both semantically relevant to input text prompt and meanwhile aligned with the style image in style. To achieve this, we present a new diffusion model (ControlStyle) via upgrading a pre-trained text-to-image model with a trainable modulation network enabling more conditions of text prompts and style images. Moreover, diffusion style and content regularizations are simultaneously introduced to facilitate the learning of this modulation network with these diffusion priors, pursuing high-quality stylized text-to-image generation. Extensive experiments demonstrate the effectiveness of our ControlStyle in producing more visually pleasing and artistic results, surpassing a simple combination of text-to-image model and conventional style transfer techniques.", "year": 2023, "venue": "ACM Multimedia", "authors": [ "Jingwen Chen", "Yingwei Pan", "Ting Yao", "Tao Mei" ], "externalIds": { "DBLP": "conf/mm/ChenPY023", "ArXiv": "2311.05463", "DOI": "10.1145/3581783.3612524", "CorpusId": 264492716 }, "url": "https://www.semanticscholar.org/paper/f382ec8ba41d843d484690111f2d80cda9a42db1", "referenceCount": 44, "citationCount": 20, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Wonder3D: Single Image to 3D Using Cross-Domain Diffusion", "abstract": "In this work, we introduce Wonder3D, a novel method for efficiently generating high-fidelity textured meshes from single-view images. Recent methods based on Score Distillation Sampling (SDS) have shown the potential to recover 3D geometry from 2D diffusion priors, but they typically suffer from time-consuming per-shape optimization and inconsistent geometry. In contrast, certain works di-rectly produce 3D information via fast network inferences, but their results are often of low quality and lack geometric details. To holistically improve the quality, consistency, and efficiency of single-view reconstruction tasks, we pro-pose a cross-domain diffusion model that generates multi-view normal maps and the corresponding color images. To ensure the consistency of generation, we employ a multi-view cross-domain attention mechanism that facilitates information exchange across views and modalities. Lastly, we introduce a geometry-aware normal fusion algorithm that extracts high-quality surfaces from the multi-view 2D representations in only 2 r-;» 3 minutes. Our extensive evaluations demonstrate that our method achieves high-quality reconstruction results, robust generalization, and good efficiency compared to prior works.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Xiaoxiao Long", "Yuanchen Guo", "Cheng Lin", "Yuan Liu", "Zhiyang Dou", "Lingjie Liu", "Yuexin Ma", "Song-Hai Zhang", "Marc Habermann", "Christian Theobalt", "Wenping Wang" ], "externalIds": { "DBLP": "journals/corr/abs-2310-15008", "ArXiv": "2310.15008", "DOI": "10.1109/CVPR52733.2024.00951", "CorpusId": 264436465 }, "url": "https://www.semanticscholar.org/paper/d2c5565a039f464b778e0f2263da418ef42e98b0", "referenceCount": 84, "citationCount": 199, "influentialCitationCount": 35, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DreamEditor: Text-Driven 3D Scene Editing with Neural Fields", "abstract": "Neural fields have achieved impressive advancements in view synthesis and scene reconstruction. However, editing these neural fields remains challenging due to the implicit encoding of geometry and texture information. In this paper, we propose DreamEditor, a novel framework that enables users to perform controlled editing of neural fields using text prompts. By representing scenes as mesh-based neural fields, DreamEditor allows localized editing within specific regions. DreamEditor utilizes the text encoder of a pretrained text-to-Image diffusion model to automatically identify the regions to be edited based on the semantics of the text prompts. Subsequently, DreamEditor optimizes the editing region and aligns its geometry and texture with the text prompts through score distillation sampling [Poole et al. 2022]. Extensive experiments have demonstrated that DreamEditor can accurately edit neural fields of real-world scenes according to the given text prompts while ensuring consistency in irrelevant areas. DreamEditor generates highly realistic textures and geometry, significantly surpassing previous works in both quantitative and qualitative evaluations.", "year": 2023, "venue": "ACM SIGGRAPH Conference and Exhibition on Computer Graphics and Interactive Techniques in Asia", "authors": [ "Jingyu Zhuang", "Chen Wang", "Lingjie Liu", "Liang Lin", "Guanbin Li" ], "externalIds": { "DBLP": "conf/siggrapha/ZhuangWLLL23", "ArXiv": "2306.13455", "DOI": "10.1145/3610548.3618190", "CorpusId": 259243782 }, "url": "https://www.semanticscholar.org/paper/029f3e2c215edac138be26ade67b3d70b8f74dd7", "referenceCount": 58, "citationCount": 80, "influentialCitationCount": 8, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Local 3D Editing via 3D Distillation of CLIP Knowledge", "abstract": "3D content manipulation is an important computer vision task with many real-world applications (e.g., product design, cartoon generation, and 3D Avatar editing). Recently proposed 3D CANs can generate diverse photorealistic 3D-aware contents using Neural Radiance fields (NeRF). However, manipulation of NeRF still remains a challenging problem since the visual quality tends to degrade after manipulation and suboptimal control handles such as 2D semantic maps are used for manipulations. While text-guided manipulations have shown potential in 3D editing, such approaches often lack locality. To overcome these problems, we propose Local Editing NeRF (LENeRF), which only requires text inputs for fine-grained and localized manipulation. Specifically, we present three add-on modules of LENeRF, the Latent Residual Mapper, the Attention Field Network, and the Deformation Network, which are jointly usedfor local manipulations of 3D features by estimating a 3D attention field. The 3D attention field is learned in an unsupervised way, by distilling the zero-shot mask generation capability of CLIP to the 3D space with multi-view guidance. We conduct diverse experiments and thorough evaluations both quantitatively and qualitatively.11We will make our code publicly available.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "J. Hyung", "S. Hwang", "Daejin Kim", "Hyunji Lee", "J. Choo" ], "externalIds": { "DBLP": "conf/cvpr/HyungHKLC23", "ArXiv": "2306.12570", "DOI": "10.1109/CVPR52729.2023.01219", "CorpusId": 259224838 }, "url": "https://www.semanticscholar.org/paper/9ee81e0cbe3672a8f9d72ae8692b88e38316f605", "referenceCount": 61, "citationCount": 20, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "HIFA: High-fidelity Text-to-3D Generation with Advanced Diffusion Guidance", "abstract": "The advancements in automatic text-to-3D generation have been remarkable. Most existing methods use pre-trained text-to-image diffusion models to optimize 3D representations like Neural Radiance Fields (NeRFs) via latent-space denoising score matching. Yet, these methods often result in artifacts and inconsistencies across different views due to their suboptimal optimization approaches and limited understanding of 3D geometry. Moreover, the inherent constraints of NeRFs in rendering crisp geometry and stable textures usually lead to a two-stage optimization to attain high-resolution details. This work proposes holistic sampling and smoothing approaches to achieve high-quality text-to-3D generation, all in a single-stage optimization. We compute denoising scores in the text-to-image diffusion model's latent and image spaces. Instead of randomly sampling timesteps (also referred to as noise levels in denoising score matching), we introduce a novel timestep annealing approach that progressively reduces the sampled timestep throughout optimization. To generate high-quality renderings in a single-stage optimization, we propose regularization for the variance of z-coordinates along NeRF rays. To address texture flickering issues in NeRFs, we introduce a kernel smoothing technique that refines importance sampling weights coarse-to-fine, ensuring accurate and thorough sampling in high-density regions. Extensive experiments demonstrate the superiority of our method over previous approaches, enabling the generation of highly detailed and view-consistent 3D assets through a single-stage training process.", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Junzhe Zhu", "Peiye Zhuang", "Oluwasanmi Koyejo" ], "externalIds": { "ArXiv": "2305.18766", "DBLP": "conf/iclr/ZhuZK24", "CorpusId": 258967476 }, "url": "https://www.semanticscholar.org/paper/daf3b117f789b2b95223e58592979fb57627515e", "referenceCount": 47, "citationCount": 44, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ProlificDreamer: High-Fidelity and Diverse Text-to-3D Generation with Variational Score Distillation", "abstract": "Score distillation sampling (SDS) has shown great promise in text-to-3D generation by distilling pretrained large-scale text-to-image diffusion models, but suffers from over-saturation, over-smoothing, and low-diversity problems. In this work, we propose to model the 3D parameter as a random variable instead of a constant as in SDS and present variational score distillation (VSD), a principled particle-based variational framework to explain and address the aforementioned issues in text-to-3D generation. We show that SDS is a special case of VSD and leads to poor samples with both small and large CFG weights. In comparison, VSD works well with various CFG weights as ancestral sampling from diffusion models and simultaneously improves the diversity and sample quality with a common CFG weight (i.e., $7.5$). We further present various improvements in the design space for text-to-3D such as distillation time schedule and density initialization, which are orthogonal to the distillation algorithm yet not well explored. Our overall approach, dubbed ProlificDreamer, can generate high rendering resolution (i.e., $512\\times512$) and high-fidelity NeRF with rich structure and complex effects (e.g., smoke and drops). Further, initialized from NeRF, meshes fine-tuned by VSD are meticulously detailed and photo-realistic. Project page and codes: https://ml.cs.tsinghua.edu.cn/prolificdreamer/", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Zhengyi Wang", "Cheng Lu", "Yikai Wang", "Fan Bao", "Chongxuan Li", "Hang Su", "Jun Zhu" ], "externalIds": { "ArXiv": "2305.16213", "DBLP": "conf/nips/Wang00BL0023", "DOI": "10.48550/arXiv.2305.16213", "CorpusId": 258887357 }, "url": "https://www.semanticscholar.org/paper/c5e9fd131cde68c218d0ea69cd617a67c7f35d42", "referenceCount": 57, "citationCount": 524, "influentialCitationCount": 106, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Segment and Track Anything", "abstract": "This report presents a framework called Segment And Track Anything (SAMTrack) that allows users to precisely and effectively segment and track any object in a video. Additionally, SAM-Track employs multimodal interaction methods that enable users to select multiple objects in videos for tracking, corresponding to their specific requirements. These interaction methods comprise click, stroke, and text, each possessing unique benefits and capable of being employed in combination. As a result, SAM-Track can be used across an array of fields, ranging from drone technology, autonomous driving, medical imaging, augmented reality, to biological analysis. SAM-Track amalgamates Segment Anything Model (SAM), an interactive key-frame segmentation model, with our proposed AOT-based tracking model (DeAOT), which secured 1st place in four tracks of the VOT 2022 challenge, to facilitate object tracking in video. In addition, SAM-Track incorporates Grounding-DINO, which enables the framework to support text-based interaction. We have demonstrated the remarkable capabilities of SAM-Track on DAVIS-2016 Val (92.0%), DAVIS-2017 Test (79.2%)and its practicability in diverse applications. The project page is available at: https://github.com/z-x-yang/Segment-and-Track-Anything.", "year": 2023, "venue": "arXiv.org", "authors": [ "Yangming Cheng", "Liulei Li", "Yuanyou Xu", "Xiaodi Li", "Zongxin Yang", "Wenguan Wang", "Yi Yang" ], "externalIds": { "ArXiv": "2305.06558", "DBLP": "journals/corr/abs-2305-06558", "DOI": "10.48550/arXiv.2305.06558", "CorpusId": 258615204 }, "url": "https://www.semanticscholar.org/paper/bbdc4118df106d4ba7af9d7d94d7f0a1144c11e2", "referenceCount": 22, "citationCount": 132, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Instruct-NeRF2NeRF: Editing 3D Scenes with Instructions", "abstract": "We propose a method for editing NeRF scenes with text-instructions. Given a NeRF of a scene and the collection of images used to reconstruct it, our method uses an image-conditioned diffusion model (InstructPix2Pix) to iteratively edit the input images while optimizing the underlying scene, resulting in an optimized 3D scene that respects the edit instruction. We demonstrate that our proposed method is able to edit large-scale, real-world scenes, and is able to accomplish more realistic, targeted edits than prior work. Result videos can be found on the project website: https://instruct-nerf2nerf.github.io.", "year": 2023, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Ayaan Haque", "Matthew Tancik", "Alexei A. Efros", "Aleksander Holynski", "Angjoo Kanazawa" ], "externalIds": { "DBLP": "conf/iccv/HaqueTEHK23", "ArXiv": "2303.12789", "DOI": "10.1109/ICCV51070.2023.01808", "CorpusId": 257663414 }, "url": "https://www.semanticscholar.org/paper/26c22380282a00166273038bc5ba785d845d61ad", "referenceCount": 61, "citationCount": 246, "influentialCitationCount": 64, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "SKED: Sketch-guided Text-based 3D Editing", "abstract": "Text-to-image diffusion models are gradually introduced into computer graphics, recently enabling the development of Text-to-3D pipelines in an open domain. However, for interactive editing purposes, local manipulations of content through a simplistic textual interface can be arduous. Incorporating user guided sketches with Text-to-image pipelines offers users more intuitive control. Still, as state-of-the-art Text-to-3D pipelines rely on optimizing Neural Radiance Fields (NeRF) through gradients from arbitrary rendering views, conditioning on sketches is not straightforward. In this paper, we present SKED, a technique for editing 3D shapes represented by NeRFs. Our technique utilizes as few as two guiding sketches from different views to alter an existing neural field. The edited region respects the prompt semantics through a pre-trained diffusion model. To ensure the generated output adheres to the provided sketches, we propose novel loss functions to generate the desired edits while preserving the density and radiance of the base instance. We demonstrate the effectiveness of our proposed method through several qualitative and quantitative experiments. https://sked-paper.github.io/", "year": 2023, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Aryan Mikaeili", "Or Perel", "D. Cohen-Or", "Ali Mahdavi-Amiri" ], "externalIds": { "DBLP": "conf/iccv/MikaeiliPSCM23", "ArXiv": "2303.10735", "DOI": "10.1109/ICCV51070.2023.01343", "CorpusId": 257632144 }, "url": "https://www.semanticscholar.org/paper/6ebec1ece44daa090158ff2531d6fabb94a4e683", "referenceCount": 89, "citationCount": 43, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Grounding DINO: Marrying DINO with Grounded Pre-Training for Open-Set Object Detection", "abstract": "In this paper, we present an open-set object detector, called Grounding DINO, by marrying Transformer-based detector DINO with grounded pre-training, which can detect arbitrary objects with human inputs such as category names or referring expressions. The key solution of open-set object detection is introducing language to a closed-set detector for open-set concept generalization. To effectively fuse language and vision modalities, we conceptually divide a closed-set detector into three phases and propose a tight fusion solution, which includes a feature enhancer, a language-guided query selection, and a cross-modality decoder for cross-modality fusion. While previous works mainly evaluate open-set object detection on novel categories, we propose to also perform evaluations on referring expression comprehension for objects specified with attributes. Grounding DINO performs remarkably well on all three settings, including benchmarks on COCO, LVIS, ODinW, and RefCOCO/+/g. Grounding DINO achieves a $52.5$ AP on the COCO detection zero-shot transfer benchmark, i.e., without any training data from COCO. It sets a new record on the ODinW zero-shot benchmark with a mean $26.1$ AP. Code will be available at \\url{https://github.com/IDEA-Research/GroundingDINO}.", "year": 2023, "venue": "arXiv.org", "authors": [ "Shilong Liu", "Zhaoyang Zeng", "Tianhe Ren", "Feng Li", "Hao Zhang", "Jie Yang", "Chun-yue Li", "Jianwei Yang", "Hang Su", "Jun-Juan Zhu", "Lei Zhang" ], "externalIds": { "DBLP": "journals/corr/abs-2303-05499", "ArXiv": "2303.05499", "DOI": "10.48550/arXiv.2303.05499", "CorpusId": 257427307 }, "url": "https://www.semanticscholar.org/paper/c3e5a20b844c042d2174263d2fd5b30d8cc8f0b0", "referenceCount": 68, "citationCount": 988, "influentialCitationCount": 147, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "NeRF-Art: Text-Driven Neural Radiance Fields Stylization", "abstract": "As a powerful representation of 3D scenes, the neural radiance field (NeRF) enables high-quality novel view synthesis from multi-view images. Stylizing NeRF, however, remains challenging, especially in simulating a text-guided style with both the appearance and the geometry altered simultaneously. In this paper, we present NeRF-Art, a text-guided NeRF stylization approach that manipulates the style of a pre-trained NeRF model with a simple text prompt. Unlike previous approaches that either lack sufficient geometry deformations and texture details or require meshes to guide the stylization, our method can shift a 3D scene to the target style characterized by desired geometry and appearance variations without any mesh guidance. This is achieved by introducing a novel global-local contrastive learning strategy, combined with the directional constraint to simultaneously control both the trajectory and the strength of the target style. Moreover, we adopt a weight regularization method to effectively suppress cloudy artifacts and geometry noises which arise easily when the density field is transformed during geometry stylization. Through extensive experiments on various styles, we demonstrate that our method is effective and robust regarding both single-view stylization quality and cross-view consistency.", "year": 2022, "venue": "IEEE Transactions on Visualization and Computer Graphics", "authors": [ "Can Wang", "Ruixia Jiang", "Menglei Chai", "Mingming He", "Dongdong Chen", "Jing Liao" ], "externalIds": { "DBLP": "journals/corr/abs-2212-08070", "ArXiv": "2212.08070", "DOI": "10.1109/TVCG.2023.3283400", "CorpusId": 254685994, "PubMed": "37279137" }, "url": "https://www.semanticscholar.org/paper/82660ea43e4ea63688bdff9ca6f0485822f2d6f3", "referenceCount": 100, "citationCount": 100, "influentialCitationCount": 12, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "SmartBrush: Text and Shape Guided Object Inpainting with Diffusion Model", "abstract": "Generic image inpainting aims to complete a corrupted image by borrowing surrounding information, which barely generates novel content. By contrast, multi-modal inpainting provides more flexible and useful controls on the inpainted content, e.g., a text prompt can be used to describe an object with richer attributes, and a mask can be used to constrain the shape of the inpainted object rather than being only considered as a missing area. We propose a new diffusion-based model named SmartBrush for completing a missing region with an object using both text and shape-guidance. While previous work such as DALLE-2 and Stable Diffusion can do text-guided inapinting they do not support shape guidance and tend to modify background texture surrounding the generated object. Our model incorporates both text and shape guidance with precision control. To preserve the background better, we propose a novel training and sampling strategy by augmenting the diffusion U-net with object-mask prediction. Lastly, we introduce a multi-task training strategy by jointly training inpainting with text-to-image generation to leverage more training data. We conduct extensive experiments showing that our model outperforms all baselines in terms of visual quality, mask controllability, and background preservation.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Shaoan Xie", "Zhifei Zhang", "Zhe Lin", "T. Hinz", "Kun Zhang" ], "externalIds": { "DBLP": "journals/corr/abs-2212-05034", "ArXiv": "2212.05034", "DOI": "10.1109/CVPR52729.2023.02148", "CorpusId": 254535802 }, "url": "https://www.semanticscholar.org/paper/95ca7eb71b2aea74d7bbe23252e41c03070b807d", "referenceCount": 32, "citationCount": 138, "influentialCitationCount": 17, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "InstructPix2Pix: Learning to Follow Image Editing Instructions", "abstract": "We propose a method for editing images from human instructions: given an input image and a written instruction that tells the model what to do, our model follows these instructions to edit the image. To obtain training data for this problem, we combine the knowledge of two large pretrained models—a language model (GPT-3) and a text-to-image model (Stable Diffusion)—to generate a large dataset of image editing examples. Our conditional diffusion model, InstructPix2Pix, is trained on our generated data, and generalizes to real images and user-written instructions at inference time. Since it performs edits in the forward pass and does not require per-example fine-tuning or inversion, our model edits images quickly, in a matter of seconds. We show compelling editing results for a diverse collection of input images and written instructions.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Tim Brooks", "Aleksander Holynski", "Alexei A. Efros" ], "externalIds": { "DBLP": "conf/cvpr/BrooksHE23", "ArXiv": "2211.09800", "DOI": "10.1109/CVPR52729.2023.01764", "CorpusId": 253581213 }, "url": "https://www.semanticscholar.org/paper/a2d2bbe4c542173662a444b33b76c66992697830", "referenceCount": 74, "citationCount": 1074, "influentialCitationCount": 204, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Imagic: Text-Based Real Image Editing with Diffusion Models", "abstract": "Text-conditioned image editing has recently attracted considerable interest. However, most methods are currently limited to one of the following: specific editing types (e.g., object overlay, style transfer), synthetically generated images, or requiring multiple input images of a common object. In this paper we demonstrate, for the very first time, the ability to apply complex (e.g., non-rigid) text-based semantic edits to a single real image. For example, we can change the posture and composition of one or multiple objects inside an image, while preserving its original characteristics. Our method can make a standing dog sit down, cause a bird to spread its wings, etc. – each within its single high-resolution user-provided natural image. Contrary to previous work, our proposed method requires only a single input image and a target text (the desired edit). It operates on real images, and does not require any additional inputs (such as image masks or additional views of the object). Our method, called Imagic, leverages a pre-trained text-to-image diffusion model for this task. It produces a text embedding that aligns with both the input image and the target text, while fine-tuning the diffusion model to capture the image-specific appearance. We demonstrate the quality and versatility of Imagic on numerous inputs from various domains, showcasing a plethora of high quality complex semantic image edits, all within a single unified framework. To better assess performance, we introduce TEdBench, a highly challenging image editing benchmark. We conduct a user study, whose findings show that human raters prefer Imagic to previous leading editing methods on TEdBench.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Bahjat Kawar", "Shiran Zada", "Oran Lang", "Omer Tov", "Hui-Tang Chang", "Tali Dekel", "Inbar Mosseri", "M. Irani" ], "externalIds": { "ArXiv": "2210.09276", "DBLP": "journals/corr/abs-2210-09276", "DOI": "10.1109/CVPR52729.2023.00582", "CorpusId": 252918469 }, "url": "https://www.semanticscholar.org/paper/23e261a20a315059b4de5492ed071c97a20c12e7", "referenceCount": 71, "citationCount": 778, "influentialCitationCount": 67, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DreamFusion: Text-to-3D using 2D Diffusion", "abstract": "Recent breakthroughs in text-to-image synthesis have been driven by diffusion models trained on billions of image-text pairs. Adapting this approach to 3D synthesis would require large-scale datasets of labeled 3D data and efficient architectures for denoising 3D data, neither of which currently exist. In this work, we circumvent these limitations by using a pretrained 2D text-to-image diffusion model to perform text-to-3D synthesis. We introduce a loss based on probability density distillation that enables the use of a 2D diffusion model as a prior for optimization of a parametric image generator. Using this loss in a DeepDream-like procedure, we optimize a randomly-initialized 3D model (a Neural Radiance Field, or NeRF) via gradient descent such that its 2D renderings from random angles achieve a low loss. The resulting 3D model of the given text can be viewed from any angle, relit by arbitrary illumination, or composited into any 3D environment. Our approach requires no 3D training data and no modifications to the image diffusion model, demonstrating the effectiveness of pretrained image diffusion models as priors.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Ben Poole", "Ajay Jain", "J. Barron", "B. Mildenhall" ], "externalIds": { "DBLP": "conf/iclr/PooleJBM23", "ArXiv": "2209.14988", "DOI": "10.48550/arXiv.2209.14988", "CorpusId": 252596091 }, "url": "https://www.semanticscholar.org/paper/4c94d04afa4309ec2f06bdd0fe3781f91461b362", "referenceCount": 76, "citationCount": 1589, "influentialCitationCount": 404, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "NeRF-In: Free-Form NeRF Inpainting with RGB-D Priors", "abstract": "Though Neural Radiance Field (NeRF) demonstrates compelling novel view synthesis results, it is still unintuitive to edit a pre-trained NeRF because the neural network's parameters and the scene geometry/appearance are often not explicitly associated. In this paper, we introduce the first framework that enables users to remove unwanted objects or retouch undesired regions in a 3D scene represented by a pre-trained NeRF without any category-specific data and training. The user first draws a free-form mask to specify a region containing unwanted objects over a rendered view from the pre-trained NeRF. Our framework first transfers the user-provided mask to other rendered views and estimates guiding color and depth images within these transferred masked regions. Next, we formulate an optimization problem that jointly inpaints the image content in all masked regions across multiple views by updating the NeRF model's parameters. We demonstrate our framework on diverse scenes and show it obtained visual plausible and structurally consistent results across multiple views using shorter time and less user manual efforts.", "year": 2022, "venue": "arXiv.org", "authors": [ "Haolin Liu", "I-Chao Shen", "Binghui Chen" ], "externalIds": { "ArXiv": "2206.04901", "DBLP": "journals/corr/abs-2206-04901", "DOI": "10.48550/arXiv.2206.04901", "CorpusId": 249605553 }, "url": "https://www.semanticscholar.org/paper/ccab1de47eb320b98c085a5f55f05af1afe391c7", "referenceCount": 46, "citationCount": 40, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MDISN: Learning multiscale deformed implicit fields from single images", "abstract": null, "year": 2022, "venue": "Visual Informatics", "authors": [ "Yujie Wang", "Yixin Zhuang", "Yunzhe Liu", "Baoquan Chen" ], "externalIds": { "DBLP": "journals/vi/WangZLC22", "DOI": "10.1016/j.visinf.2022.03.003", "CorpusId": 247888004 }, "url": "https://www.semanticscholar.org/paper/64ab6de499abb7b9cdee0d615e9153c766bf0c3b", "referenceCount": 6, "citationCount": 7, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "High-Resolution Image Synthesis with Latent Diffusion Models", "abstract": "By decomposing the image formation process into a sequential application of denoising autoencoders, diffusion models (DMs) achieve state-of-the-art synthesis results on image data and beyond. Additionally, their formulation allows for a guiding mechanism to control the image generation process without retraining. However, since these models typically operate directly in pixel space, optimization of powerful DMs often consumes hundreds of GPU days and inference is expensive due to sequential evaluations. To enable DM training on limited computational resources while retaining their quality and flexibility, we apply them in the latent space of powerful pretrained autoencoders. In contrast to previous work, training diffusion models on such a representation allows for the first time to reach a near-optimal point between complexity reduction and detail preservation, greatly boosting visual fidelity. By introducing cross-attention layers into the model architecture, we turn diffusion models into powerful and flexible generators for general conditioning inputs such as text or bounding boxes and high-resolution synthesis becomes possible in a convolutional manner. Our latent diffusion models (LDMs) achieve new state of the art scores for image inpainting and class-conditional image synthesis and highly competitive performance on various tasks, including unconditional image generation, text-to-image synthesis, and super-resolution, while significantly reducing computational requirements compared to pixel-based DMs.", "year": 2021, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Robin Rombach", "A. Blattmann", "Dominik Lorenz", "Patrick Esser", "B. Ommer" ], "externalIds": { "ArXiv": "2112.10752", "DBLP": "journals/corr/abs-2112-10752", "DOI": "10.1109/CVPR52688.2022.01042", "CorpusId": 245335280 }, "url": "https://www.semanticscholar.org/paper/c10075b3746a9f3dd5811970e93c8ca3ad39b39d", "referenceCount": 110, "citationCount": 9847, "influentialCitationCount": 2744, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Towards efficient and photorealistic 3D human reconstruction: A brief survey", "abstract": null, "year": 2021, "venue": "Visual Informatics", "authors": [ "Lu Chen", "Sida Peng", "X. Zhou" ], "externalIds": { "DBLP": "journals/vi/ChenPZ21", "DOI": "10.1016/j.visinf.2021.10.003", "CorpusId": 244282342 }, "url": "https://www.semanticscholar.org/paper/70d90259f2e3df5f259e201d4682af5f4614849e", "referenceCount": 44, "citationCount": 22, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MUSIQ: Multi-scale Image Quality Transformer", "abstract": "Image quality assessment (IQA) is an important research topic for understanding and improving visual experience. The current state-of-the-art IQA methods are based on convolutional neural networks (CNNs). The performance of CNN-based models is often compromised by the fixed shape constraint in batch training. To accommodate this, the input images are usually resized and cropped to a fixed shape, causing image quality degradation. To address this, we design a multi-scale image quality Transformer (MUSIQ) to process native resolution images with varying sizes and aspect ratios. With a multi-scale image representation, our proposed method can capture image quality at different granularities. Furthermore, a novel hash-based 2D spatial embedding and a scale embedding is proposed to support the positional embedding in the multi-scale representation. Experimental results verify that our method can achieve state-of-the-art performance on multiple large scale IQA datasets such as PaQ-2-PiQ [41], SPAQ [11], and KonIQ-10k [16]. 1", "year": 2021, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Junjie Ke", "Qifei Wang", "Yilin Wang", "P. Milanfar", "Feng Yang" ], "externalIds": { "DBLP": "journals/corr/abs-2108-05997", "ArXiv": "2108.05997", "DOI": "10.1109/ICCV48922.2021.00510", "CorpusId": 237048383 }, "url": "https://www.semanticscholar.org/paper/e3d06054af531ee2f42270d43100b309c28546ef", "referenceCount": 53, "citationCount": 359, "influentialCitationCount": 51, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning Transferable Visual Models From Natural Language Supervision", "abstract": "State-of-the-art computer vision systems are trained to predict a fixed set of predetermined object categories. This restricted form of supervision limits their generality and usability since additional labeled data is needed to specify any other visual concept. Learning directly from raw text about images is a promising alternative which leverages a much broader source of supervision. We demonstrate that the simple pre-training task of predicting which caption goes with which image is an efficient and scalable way to learn SOTA image representations from scratch on a dataset of 400 million (image, text) pairs collected from the internet. After pre-training, natural language is used to reference learned visual concepts (or describe new ones) enabling zero-shot transfer of the model to downstream tasks. We study the performance of this approach by benchmarking on over 30 different existing computer vision datasets, spanning tasks such as OCR, action recognition in videos, geo-localization, and many types of fine-grained object classification. The model transfers non-trivially to most tasks and is often competitive with a fully supervised baseline without the need for any dataset specific training. For instance, we match the accuracy of the original ResNet-50 on ImageNet zero-shot without needing to use any of the 1.28 million training examples it was trained on. We release our code and pre-trained model weights at https://github.com/OpenAI/CLIP.", "year": 2021, "venue": "International Conference on Machine Learning", "authors": [ "Alec Radford", "Jong Wook Kim", "Chris Hallacy", "A. Ramesh", "Gabriel Goh", "Sandhini Agarwal", "Girish Sastry", "Amanda Askell", "Pamela Mishkin", "Jack Clark", "Gretchen Krueger", "I. Sutskever" ], "externalIds": { "ArXiv": "2103.00020", "DBLP": "conf/icml/RadfordKHRGASAM21", "CorpusId": 231591445 }, "url": "https://www.semanticscholar.org/paper/6f870f7f02a8c59c3e23f407f3ef00dd1dcf8fc4", "referenceCount": 220, "citationCount": 18886, "influentialCitationCount": 5013, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "NeRF in the Wild: Neural Radiance Fields for Unconstrained Photo Collections", "abstract": "We present a learning-based method for synthesizing novel views of complex scenes using only unstructured collections of in-the-wild photographs. We build on Neural Radiance Fields (NeRF), which uses the weights of a multi-layer perceptron to model the density and color of a scene as a function of 3D coordinates. While NeRF works well on images of static subjects captured under controlled settings, it is incapable of modeling many ubiquitous, real-world phenomena in uncontrolled images, such as variable illumination or transient occluders. We introduce a series of extensions to NeRF to address these issues, thereby enabling accurate reconstructions from unstructured image collections taken from the internet. We apply our system, dubbed NeRF-W, to internet photo collections of famous landmarks, and demonstrate temporally consistent novel view renderings that are significantly closer to photorealism than the prior state of the art.", "year": 2020, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Ricardo Martin-Brualla", "Noha Radwan", "Mehdi S. M. Sajjadi", "J. Barron", "Alexey Dosovitskiy", "Daniel Duckworth" ], "externalIds": { "DBLP": "journals/corr/abs-2008-02268", "ArXiv": "2008.02268", "MAG": "3047146825", "DOI": "10.1109/CVPR46437.2021.00713", "CorpusId": 220968781 }, "url": "https://www.semanticscholar.org/paper/691eddbfaebbc71f6a12d3c99d5c155042459434", "referenceCount": 48, "citationCount": 1229, "influentialCitationCount": 95, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "3D RECONSTRUCTION METHODS, A SURVEY", "abstract": "3D reconstruction technologies have evolved over the years. In this paper we try to highlight the evolution of the scanning technologies. The idea of a survey came up with our decision to look at 3D reconstruction methods. Little has been written about the methods in general, yet many developments have taken place in this area. This survey will prove useful for those intending to embark on research in 3D reconstruction technologies or are considering acquiring a 3D scanner. The survey takes a look at the major reconstruction methods, which are; Laser triangulation, Stereoscopy, Conoscopic holography and Moiré Interferometry. A review of the major producers of scanning technology for 3D reconstruction is also carried out.", "year": 2018, "venue": "", "authors": [ "J. Butime", "Íñigo Gutiérrez", "L. G. Corzo", "Carlos Flores Espronceda" ], "externalIds": { "MAG": "2556874127", "DOI": "10.5220/0001369704570463", "CorpusId": 4871280 }, "url": "https://www.semanticscholar.org/paper/1a639c74c0327c7982714b3f964fd09417ec4e9f", "referenceCount": 5, "citationCount": 14, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ED-NeRF: Efficient Text-Guided Editing of 3D Scene using Latent Space NeRF", "abstract": "Recently, there has been a significant advancement in text-to-image diffusion models, leading to groundbreaking performance in 2D image generation. These advancements have been extended to 3D models, enabling the generation of novel 3D objects from textual descriptions. This has evolved into NeRF editing meth-ods, which allow the manipulation of existing 3D objects through textual conditioning. However, existing NeRF editing techniques have faced limitations in their performance due to slow training speeds and the use of loss functions that do not adequately consider editing. To address this, here we present a novel 3D NeRF editing approach dubbed ED-NeRF by successfully embedding real-world scenes into the latent space of the latent diffusion model (LDM) through a unique refinement layer. This approach enables us to obtain a NeRF backbone that is not only faster but also more amenable to editing compared to traditional image space NeRF editing. Furthermore, we propose an improved loss function tailored for editing by migrating the delta denoising score (DDS) distillation loss, originally used in 2D image editing to the three-dimensional domain. This novel loss function surpasses the well-known score distillation sampling (SDS) loss in terms of suitability for editing purposes. Our experimental results demonstrate that ED-NeRF achieves faster editing speed while producing improved output quality compared to state-of-the-art 3D editing models.", "year": 2023, "venue": "arXiv.org", "authors": [ "Jangho Park", "Gihyun Kwon", "Jong Chul Ye" ], "externalIds": { "DBLP": "journals/corr/abs-2310-02712", "DOI": "10.48550/arXiv.2310.02712", "CorpusId": 263620514 }, "url": "https://www.semanticscholar.org/paper/811be823eca290d51eb751a6c5ef254a273a9614", "referenceCount": 32, "citationCount": 25, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "NeRF-: Neural Radiance Fields Without Known Camera Parameters", "abstract": "radiance field representation (NeRF), novel view synthesis, camera pose estimation, deep learning.", "year": 2021, "venue": "arXiv.org", "authors": [ "Zirui Wang", "Shangzhe Wu", "Weidi Xie", "Min Chen", "V. Prisacariu" ], "externalIds": { "DBLP": "journals/corr/abs-2102-07064", "CorpusId": 231924858 }, "url": "https://www.semanticscholar.org/paper/6caf3307096a15832ace34a0d54cd28413503f8b", "referenceCount": 55, "citationCount": 482, "influentialCitationCount": 54, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Mip-nerf: A multiscale representation for H. Zhong et al.: Preprint", "abstract": null, "year": 2021, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Towardsrobustmonoculardepthestimation:Mixingdatasetsforzero-shotcross-datasettransfer", "abstract": null, "year": 2020, "venue": "IEEEtransactionsonpatternanalysisand machineintelligence44", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2023. Adding conditional control to text-to-image diffusion models", "abstract": null, "year": null, "venue": "Proceedings of the IEEE/CVF International Conference on Computer Vision", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "score matching", "abstract": null, "year": null, "venue": "Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2023. Uni-paint: A unified framework for multimodalimageinpaintingwithpretraineddiffusionmodel", "abstract": null, "year": null, "venue": "Pro-ceedingsofthe31stACMInternationalConferenceonMultimedia", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Lu-ciddreamer: Towards high-fidelity text-to-3d generation via interval", "abstract": null, "year": null, "venue": "Zhong", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2023a. Reference-guided controllable inpainting of neural radiance fields", "abstract": null, "year": null, "venue": "Proceedings of the IEEE/CVF International Conference on Computer Vision", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2023.Segment anything", "abstract": null, "year": null, "venue": "ProceedingsoftheIEEE/CVFInternationalConferenceonComputerVision", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2024. Diffmat: Latent diffu-sionmodels for image-guidedmaterial generation", "abstract": null, "year": null, "venue": "Visual Informatics", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2022. Mip-nerf 360: Unbounded anti-aliased neural radiance fields", "abstract": null, "year": null, "venue": "Proceedings of the IEEE/CVF conference on computer vision and Page 12 of 14", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "MaskBit: Embedding-free Image Generation via Bit Tokens": { "paper_title": "MaskBit: Embedding-free Image Generation via Bit Tokens", "arxiv_id": "2409.16211v1", "keyword": "diffusion model", "authors": [ "Mark Weber", "Lijun Yu", "Qihang Yu", "Xueqing Deng", "Xiaohui Shen", "Daniel Cremers", "Liang-Chieh Chen" ], "references": [ { "title": "Open-MAGVIT2: An Open-Source Project Toward Democratizing Auto-regressive Visual Generation", "abstract": "We present Open-MAGVIT2, a family of auto-regressive image generation models ranging from 300M to 1.5B. The Open-MAGVIT2 project produces an open-source replication of Google's MAGVIT-v2 tokenizer, a tokenizer with a super-large codebook (i.e., $2^{18}$ codes), and achieves the state-of-the-art reconstruction performance (1.17 rFID) on ImageNet $256 \\times 256$. Furthermore, we explore its application in plain auto-regressive models and validate scalability properties. To assist auto-regressive models in predicting with a super-large vocabulary, we factorize it into two sub-vocabulary of different sizes by asymmetric token factorization, and further introduce\"next sub-token prediction\"to enhance sub-token interaction for better generation quality. We release all models and codes to foster innovation and creativity in the field of auto-regressive visual generation.", "year": 2024, "venue": "", "authors": [ "Zhuoyan Luo", "Fengyuan Shi", "Yixiao Ge", "Yujiu Yang", "Limin Wang", "Ying Shan" ], "externalIds": { "ArXiv": "2409.04410", "CorpusId": 272463752 }, "url": "https://www.semanticscholar.org/paper/d42c930bb977bcc7543dd350f9129404198e7c6a", "referenceCount": 38, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Autoregressive Image Generation without Vector Quantization", "abstract": "Conventional wisdom holds that autoregressive models for image generation are typically accompanied by vector-quantized tokens. We observe that while a discrete-valued space can facilitate representing a categorical distribution, it is not a necessity for autoregressive modeling. In this work, we propose to model the per-token probability distribution using a diffusion procedure, which allows us to apply autoregressive models in a continuous-valued space. Rather than using categorical cross-entropy loss, we define a Diffusion Loss function to model the per-token probability. This approach eliminates the need for discrete-valued tokenizers. We evaluate its effectiveness across a wide range of cases, including standard autoregressive models and generalized masked autoregressive (MAR) variants. By removing vector quantization, our image generator achieves strong results while enjoying the speed advantage of sequence modeling. We hope this work will motivate the use of autoregressive generation in other continuous-valued domains and applications. Code is available at: https://github.com/LTH14/mar", "year": 2024, "venue": "arXiv.org", "authors": [ "Tianhong Li", "Yonglong Tian", "He Li", "Mingyang Deng", "Kaiming He" ], "externalIds": { "DBLP": "journals/corr/abs-2406-11838", "ArXiv": "2406.11838", "DOI": "10.48550/arXiv.2406.11838", "CorpusId": 270560593 }, "url": "https://www.semanticscholar.org/paper/e46a79cf7c2c9192639f9e1d12280650e06b9d56", "referenceCount": 55, "citationCount": 7, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Alleviating Distortion in Image Generation via Multi-Resolution Diffusion Models", "abstract": "This paper presents innovative enhancements to diffusion models by integrating a novel multi-resolution network and time-dependent layer normalization. Diffusion models have gained prominence for their effectiveness in high-fidelity image generation. While conventional approaches rely on convolutional U-Net architectures, recent Transformer-based designs have demonstrated superior performance and scalability. However, Transformer architectures, which tokenize input data (via\"patchification\"), face a trade-off between visual fidelity and computational complexity due to the quadratic nature of self-attention operations concerning token length. While larger patch sizes enable attention computation efficiency, they struggle to capture fine-grained visual details, leading to image distortions. To address this challenge, we propose augmenting the Diffusion model with the Multi-Resolution network (DiMR), a framework that refines features across multiple resolutions, progressively enhancing detail from low to high resolution. Additionally, we introduce Time-Dependent Layer Normalization (TD-LN), a parameter-efficient approach that incorporates time-dependent parameters into layer normalization to inject time information and achieve superior performance. Our method's efficacy is demonstrated on the class-conditional ImageNet generation benchmark, where DiMR-XL variants outperform prior diffusion models, setting new state-of-the-art FID scores of 1.70 on ImageNet 256 x 256 and 2.89 on ImageNet 512 x 512. Project page: https://qihao067.github.io/projects/DiMR", "year": 2024, "venue": "arXiv.org", "authors": [ "Qihao Liu", "Zhanpeng Zeng", "Ju He", "Qihang Yu", "Xiaohui Shen", "Liang-Chieh Chen" ], "externalIds": { "ArXiv": "2406.09416", "DBLP": "journals/corr/abs-2406-09416", "DOI": "10.48550/arXiv.2406.09416", "CorpusId": 270440523 }, "url": "https://www.semanticscholar.org/paper/4853df65daa4c220a57b86960d04c2d1d0bc227a", "referenceCount": 63, "citationCount": 1, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "An Image is Worth 32 Tokens for Reconstruction and Generation", "abstract": "Recent advancements in generative models have highlighted the crucial role of image tokenization in the efficient synthesis of high-resolution images. Tokenization, which transforms images into latent representations, reduces computational demands compared to directly processing pixels and enhances the effectiveness and efficiency of the generation process. Prior methods, such as VQGAN, typically utilize 2D latent grids with fixed downsampling factors. However, these 2D tokenizations face challenges in managing the inherent redundancies present in images, where adjacent regions frequently display similarities. To overcome this issue, we introduce Transformer-based 1-Dimensional Tokenizer (TiTok), an innovative approach that tokenizes images into 1D latent sequences. TiTok provides a more compact latent representation, yielding substantially more efficient and effective representations than conventional techniques. For example, a 256 x 256 x 3 image can be reduced to just 32 discrete tokens, a significant reduction from the 256 or 1024 tokens obtained by prior methods. Despite its compact nature, TiTok achieves competitive performance to state-of-the-art approaches. Specifically, using the same generator framework, TiTok attains 1.97 gFID, outperforming MaskGIT baseline significantly by 4.21 at ImageNet 256 x 256 benchmark. The advantages of TiTok become even more significant when it comes to higher resolution. At ImageNet 512 x 512 benchmark, TiTok not only outperforms state-of-the-art diffusion model DiT-XL/2 (gFID 2.74 vs. 3.04), but also reduces the image tokens by 64x, leading to 410x faster generation process. Our best-performing variant can significantly surpasses DiT-XL/2 (gFID 2.13 vs. 3.04) while still generating high-quality samples 74x faster.", "year": 2024, "venue": "arXiv.org", "authors": [ "Qihang Yu", "Mark Weber", "Xueqing Deng", "Xiaohui Shen", "Daniel Cremers", "Liang-Chieh Chen" ], "externalIds": { "ArXiv": "2406.07550", "DBLP": "journals/corr/abs-2406-07550", "DOI": "10.48550/arXiv.2406.07550", "CorpusId": 270379986 }, "url": "https://www.semanticscholar.org/paper/1e31f4a4ccfc0d1e461be05361d77b5e045f4d37", "referenceCount": 75, "citationCount": 7, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Autoregressive Model Beats Diffusion: Llama for Scalable Image Generation", "abstract": "We introduce LlamaGen, a new family of image generation models that apply original ``next-token prediction'' paradigm of large language models to visual generation domain. It is an affirmative answer to whether vanilla autoregressive models, e.g., Llama, without inductive biases on visual signals can achieve state-of-the-art image generation performance if scaling properly. We reexamine design spaces of image tokenizers, scalability properties of image generation models, and their training data quality. The outcome of this exploration consists of: (1) An image tokenizer with downsample ratio of 16, reconstruction quality of 0.94 rFID and codebook usage of 97% on ImageNet benchmark. (2) A series of class-conditional image generation models ranging from 111M to 3.1B parameters, achieving 2.18 FID on ImageNet 256x256 benchmarks, outperforming the popular diffusion models such as LDM, DiT. (3) A text-conditional image generation model with 775M parameters, from two-stage training on LAION-COCO and high aesthetics quality images, demonstrating competitive performance of visual quality and text alignment. (4) We verify the effectiveness of LLM serving frameworks in optimizing the inference speed of image generation models and achieve 326% - 414% speedup. We release all models and codes to facilitate open-source community of visual generation and multimodal foundation models.", "year": 2024, "venue": "arXiv.org", "authors": [ "Peize Sun", "Yi Jiang", "Shoufa Chen", "Shilong Zhang", "Bingyue Peng", "Ping Luo", "Zehuan Yuan" ], "externalIds": { "ArXiv": "2406.06525", "DBLP": "journals/corr/abs-2406-06525", "DOI": "10.48550/arXiv.2406.06525", "CorpusId": 270371603 }, "url": "https://www.semanticscholar.org/paper/b15e6e2b1d81bc110f8fc98c3caf2e25e2512539", "referenceCount": 98, "citationCount": 15, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Visual Autoregressive Modeling: Scalable Image Generation via Next-Scale Prediction", "abstract": "We present Visual AutoRegressive modeling (VAR), a new generation paradigm that redefines the autoregressive learning on images as coarse-to-fine\"next-scale prediction\"or\"next-resolution prediction\", diverging from the standard raster-scan\"next-token prediction\". This simple, intuitive methodology allows autoregressive (AR) transformers to learn visual distributions fast and generalize well: VAR, for the first time, makes GPT-like AR models surpass diffusion transformers in image generation. On ImageNet 256x256 benchmark, VAR significantly improve AR baseline by improving Frechet inception distance (FID) from 18.65 to 1.73, inception score (IS) from 80.4 to 350.2, with around 20x faster inference speed. It is also empirically verified that VAR outperforms the Diffusion Transformer (DiT) in multiple dimensions including image quality, inference speed, data efficiency, and scalability. Scaling up VAR models exhibits clear power-law scaling laws similar to those observed in LLMs, with linear correlation coefficients near -0.998 as solid evidence. VAR further showcases zero-shot generalization ability in downstream tasks including image in-painting, out-painting, and editing. These results suggest VAR has initially emulated the two important properties of LLMs: Scaling Laws and zero-shot task generalization. We have released all models and codes to promote the exploration of AR/VAR models for visual generation and unified learning.", "year": 2024, "venue": "arXiv.org", "authors": [ "Keyu Tian", "Yi Jiang", "Zehuan Yuan", "Bingyue Peng", "Liwei Wang" ], "externalIds": { "DBLP": "journals/corr/abs-2404-02905", "ArXiv": "2404.02905", "DOI": "10.48550/arXiv.2404.02905", "CorpusId": 268876071 }, "url": "https://www.semanticscholar.org/paper/76e8218f657c77c38da44daaed5bb54ab727a8fc", "referenceCount": 103, "citationCount": 36, "influentialCitationCount": 6, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Photorealistic Video Generation with Diffusion Models", "abstract": "We present W.A.L.T, a transformer-based approach for photorealistic video generation via diffusion modeling. Our approach has two key design decisions. First, we use a causal encoder to jointly compress images and videos within a unified latent space, enabling training and generation across modalities. Second, for memory and training efficiency, we use a window attention architecture tailored for joint spatial and spatiotemporal generative modeling. Taken together these design decisions enable us to achieve state-of-the-art performance on established video (UCF-101 and Kinetics-600) and image (ImageNet) generation benchmarks without using classifier free guidance. Finally, we also train a cascade of three models for the task of text-to-video generation consisting of a base latent video diffusion model, and two video super-resolution diffusion models to generate videos of $512 \\times 896$ resolution at $8$ frames per second.", "year": 2023, "venue": "arXiv.org", "authors": [ "Agrim Gupta", "Lijun Yu", "Kihyuk Sohn", "Xiuye Gu", "Meera Hahn", "Fei-Fei Li", "Irfan Essa", "Lu Jiang", "José Lezama" ], "externalIds": { "ArXiv": "2312.06662", "DBLP": "journals/corr/abs-2312-06662", "DOI": "10.48550/arXiv.2312.06662", "CorpusId": 266163109 }, "url": "https://www.semanticscholar.org/paper/905ba940236b00bebb2fd348d4d932e7887b0c0a", "referenceCount": 88, "citationCount": 72, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Pytorch Reproduction of Masked Generative Image Transformer", "abstract": "In this technical report, we present a reproduction of MaskGIT: Masked Generative Image Transformer, using PyTorch. The approach involves leveraging a masked bidirectional transformer architecture, enabling image generation with only few steps (8~16 steps) for 512 x 512 resolution images, i.e., ~64x faster than an auto-regressive approach. Through rigorous experimentation and optimization, we achieved results that closely align with the findings presented in the original paper. We match the reported FID of 7.32 with our replication and obtain 7.59 with similar hyperparameters on ImageNet at resolution 512 x 512. Moreover, we improve over the official implementation with some minor hyperparameter tweaking, achieving FID of 7.26. At the lower resolution of 256 x 256 pixels, our reimplementation scores 6.80, in comparison to the original paper's 6.18. To promote further research on Masked Generative Models and facilitate their reproducibility, we released our code and pre-trained weights openly at https://github.com/valeoai/MaskGIT-pytorch/", "year": 2023, "venue": "arXiv.org", "authors": [ "Victor Besnier", "Mickael Chen" ], "externalIds": { "DBLP": "journals/corr/abs-2310-14400", "ArXiv": "2310.14400", "DOI": "10.48550/arXiv.2310.14400", "CorpusId": 264425971 }, "url": "https://www.semanticscholar.org/paper/625364f77d2d234e276137bfea7a1c4e19166cc8", "referenceCount": 16, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Language Model Beats Diffusion -- Tokenizer is Key to Visual Generation", "abstract": "While Large Language Models (LLMs) are the dominant models for generative tasks in language, they do not perform as well as diffusion models on image and video generation. To effectively use LLMs for visual generation, one crucial component is the visual tokenizer that maps pixel-space inputs to discrete tokens appropriate for LLM learning. In this paper, we introduce MAGVIT-v2, a video tokenizer designed to generate concise and expressive tokens for both videos and images using a common token vocabulary. Equipped with this new tokenizer, we show that LLMs outperform diffusion models on standard image and video generation benchmarks including ImageNet and Kinetics. In addition, we demonstrate that our tokenizer surpasses the previously top-performing video tokenizer on two more tasks: (1) video compression comparable to the next-generation video codec (VCC) according to human evaluations, and (2) learning effective representations for action recognition tasks.", "year": 2023, "venue": "", "authors": [ "Lijun Yu", "José Lezama", "N. B. Gundavarapu", "Luca Versari", "Kihyuk Sohn", "David C. Minnen", "Yong Cheng", "Agrim Gupta", "Xiuye Gu", "Alexander G. Hauptmann", "Boqing Gong", "Ming-Hsuan Yang", "Irfan Essa", "David A. Ross", "Lu Jiang" ], "externalIds": { "ArXiv": "2310.05737", "CorpusId": 263830733 }, "url": "https://www.semanticscholar.org/paper/985f0c89c5a607742ec43c1fdc2cbfe54541cbad", "referenceCount": 82, "citationCount": 76, "influentialCitationCount": 12, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Finite Scalar Quantization: VQ-VAE Made Simple", "abstract": "We propose to replace vector quantization (VQ) in the latent representation of VQ-VAEs with a simple scheme termed finite scalar quantization (FSQ), where we project the VAE representation down to a few dimensions (typically less than 10). Each dimension is quantized to a small set of fixed values, leading to an (implicit) codebook given by the product of these sets. By appropriately choosing the number of dimensions and values each dimension can take, we obtain the same codebook size as in VQ. On top of such discrete representations, we can train the same models that have been trained on VQ-VAE representations. For example, autoregressive and masked transformer models for image generation, multimodal generation, and dense prediction computer vision tasks. Concretely, we employ FSQ with MaskGIT for image generation, and with UViM for depth estimation, colorization, and panoptic segmentation. Despite the much simpler design of FSQ, we obtain competitive performance in all these tasks. We emphasize that FSQ does not suffer from codebook collapse and does not need the complex machinery employed in VQ (commitment losses, codebook reseeding, code splitting, entropy penalties, etc.) to learn expressive discrete representations.", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Fabian Mentzer", "David C. Minnen", "E. Agustsson", "M. Tschannen" ], "externalIds": { "DBLP": "journals/corr/abs-2309-15505", "ArXiv": "2309.15505", "DOI": "10.48550/arXiv.2309.15505", "CorpusId": 263153393 }, "url": "https://www.semanticscholar.org/paper/eb76781932d4cdc241f1d739f7a6e3d34a4eec87", "referenceCount": 51, "citationCount": 47, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Online Clustered Codebook", "abstract": "Vector Quantisation (VQ) is experiencing a comeback in machine learning, where it is increasingly used in representation learning. However, optimizing the codevectors in existing VQ-VAE is not entirely trivial. A problem is codebook collapse, where only a small subset of codevectors receive gradients useful for their optimisation, whereas a majority of them simply \"dies off\" and is never updated or used. This limits the effectiveness of VQ for learning larger codebooks in complex computer vision tasks that require high-capacity representations. In this paper, we present a simple alternative method for online codebook learning, Clustering VQ-VAE (CVQ-VAE). Our approach selects encoded features as anchors to update the \"dead\" codevectors, while optimising the codebooks which are alive via the original loss. This strategy brings unused codevectors closer in distribution to the encoded features, increasing the likelihood of being chosen and optimized. We extensively validate the generalization capability of our quantiser on various datasets, tasks (e.g. reconstruction and generation), and architectures (e.g. VQ-VAE, VQGAN, LDM). CVQ-VAE can be easily integrated into the existing models with just a few lines of code.", "year": 2023, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Chuanxia Zheng", "A. Vedaldi" ], "externalIds": { "ArXiv": "2307.15139", "DBLP": "journals/corr/abs-2307-15139", "DOI": "10.1109/ICCV51070.2023.02084", "CorpusId": 260316394 }, "url": "https://www.semanticscholar.org/paper/80baf98b54cab677cc5617af8794ed4f53b7feee", "referenceCount": 47, "citationCount": 14, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Masked Diffusion Transformer is a Strong Image Synthesizer", "abstract": "Despite its success in image synthesis, we observe that diffusion probabilistic models (DPMs) often lack contextual reasoning ability to learn the relations among object parts in an image, leading to a slow learning process. To solve this issue, we propose a Masked Diffusion Transformer (MDT) that introduces a mask latent modeling scheme to explicitly enhance the DPMs’ ability to contextual relation learning among object semantic parts in an image. During training, MDT operates in the latent space to mask certain tokens. Then, an asymmetric masking diffusion transformer is designed to predict masked tokens from unmasked ones while maintaining the diffusion generation process. Our MDT can reconstruct the full information of an image from its incomplete contextual input, thus enabling it to learn the associated relations among image tokens. Experimental results show that MDT achieves superior image synthesis performance, e.g., a new SOTA FID score in the ImageNet data set, and has about 3× faster learning speed than the previous SOTA DiT. The source code is released at https://github.com/sail-sg/MDT.", "year": 2023, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Shanghua Gao", "Pan Zhou", "Mingg-Ming Cheng", "Shuicheng Yan" ], "externalIds": { "DBLP": "conf/iccv/GaoZCY23", "ArXiv": "2303.14389", "DOI": "10.1109/ICCV51070.2023.02117", "CorpusId": 257767316 }, "url": "https://www.semanticscholar.org/paper/ca21f28e2a0a8205038301d8385151ab7ca2a050", "referenceCount": 65, "citationCount": 81, "influentialCitationCount": 16, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "simple diffusion: End-to-end diffusion for high resolution images", "abstract": "Currently, applying diffusion models in pixel space of high resolution images is difficult. Instead, existing approaches focus on diffusion in lower dimensional spaces (latent diffusion), or have multiple super-resolution levels of generation referred to as cascades. The downside is that these approaches add additional complexity to the diffusion framework. This paper aims to improve denoising diffusion for high resolution images while keeping the model as simple as possible. The paper is centered around the research question: How can one train a standard denoising diffusion models on high resolution images, and still obtain performance comparable to these alternate approaches? The four main findings are: 1) the noise schedule should be adjusted for high resolution images, 2) It is sufficient to scale only a particular part of the architecture, 3) dropout should be added at specific locations in the architecture, and 4) downsampling is an effective strategy to avoid high resolution feature maps. Combining these simple yet effective techniques, we achieve state-of-the-art on image generation among diffusion models without sampling modifiers on ImageNet.", "year": 2023, "venue": "International Conference on Machine Learning", "authors": [ "Emiel Hoogeboom", "J. Heek", "Tim Salimans" ], "externalIds": { "DBLP": "journals/corr/abs-2301-11093", "ArXiv": "2301.11093", "DOI": "10.48550/arXiv.2301.11093", "CorpusId": 256274516 }, "url": "https://www.semanticscholar.org/paper/6e3a3b7a8a0376d867cad72eedf2f9b746f29a33", "referenceCount": 30, "citationCount": 147, "influentialCitationCount": 13, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "GLIGEN: Open-Set Grounded Text-to-Image Generation", "abstract": "Large-scale text-to-image diffusion models have made amazing advances. However, the status quo is to use text input alone, which can impede controllability. In this work, we propose GLIGEN, Grounded-Language-to-Image Generation, a novel approach that builds upon and extends the functionality of existing pre-trained text-to-image diffusion models by enabling them to also be conditioned on grounding inputs. To preserve the vast concept knowledge of the pre-trained model, we freeze all of its weights and inject the grounding information into new trainable layers via a gated mechanism. Our model achieves open-world grounded text2img generation with caption and bounding box condition inputs, and the grounding ability generalizes well to novel spatial configurations and concepts. GLIGEN's zero-shot performance on COCO and LVIS outperforms existing supervised layout-to-image baselines by a large margin.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Yuheng Li", "Haotian Liu", "Qingyang Wu", "Fangzhou Mu", "Jianwei Yang", "Jianfeng Gao", "Chunyuan Li", "Yong Jae Lee" ], "externalIds": { "DBLP": "conf/cvpr/LiLWMYGLL23", "ArXiv": "2301.07093", "DOI": "10.1109/CVPR52729.2023.02156", "CorpusId": 255942528 }, "url": "https://www.semanticscholar.org/paper/994a1ce6677b496bd3c0c63aceafc6556005e994", "referenceCount": 80, "citationCount": 386, "influentialCitationCount": 78, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Muse: Text-To-Image Generation via Masked Generative Transformers", "abstract": "We present Muse, a text-to-image Transformer model that achieves state-of-the-art image generation performance while being significantly more efficient than diffusion or autoregressive models. Muse is trained on a masked modeling task in discrete token space: given the text embedding extracted from a pre-trained large language model (LLM), Muse is trained to predict randomly masked image tokens. Compared to pixel-space diffusion models, such as Imagen and DALL-E 2, Muse is significantly more efficient due to the use of discrete tokens and requiring fewer sampling iterations; compared to autoregressive models, such as Parti, Muse is more efficient due to the use of parallel decoding. The use of a pre-trained LLM enables fine-grained language understanding, translating to high-fidelity image generation and the understanding of visual concepts such as objects, their spatial relationships, pose, cardinality etc. Our 900M parameter model achieves a new SOTA on CC3M, with an FID score of 6.06. The Muse 3B parameter model achieves an FID of 7.88 on zero-shot COCO evaluation, along with a CLIP score of 0.32. Muse also directly enables a number of image editing applications without the need to fine-tune or invert the model: inpainting, outpainting, and mask-free editing. More results are available at https://muse-model.github.io", "year": 2023, "venue": "International Conference on Machine Learning", "authors": [ "Huiwen Chang", "Han Zhang", "Jarred Barber", "AJ Maschinot", "José Lezama", "Lu Jiang", "Ming Yang", "K. Murphy", "W. Freeman", "Michael Rubinstein", "Yuanzhen Li", "Dilip Krishnan" ], "externalIds": { "DBLP": "conf/icml/ChangZBML00MFRL23", "ArXiv": "2301.00704", "DOI": "10.48550/arXiv.2301.00704", "CorpusId": 255372955 }, "url": "https://www.semanticscholar.org/paper/2a3213cb3c755f036d5dfec7261d726a819c78c1", "referenceCount": 87, "citationCount": 377, "influentialCitationCount": 30, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Scalable Diffusion Models with Transformers", "abstract": "We explore a new class of diffusion models based on the transformer architecture. We train latent diffusion models of images, replacing the commonly-used U-Net backbone with a transformer that operates on latent patches. We analyze the scalability of our Diffusion Transformers (DiTs) through the lens of forward pass complexity as measured by Gflops. We find that DiTs with higher Gflops—through increased transformer depth/width or increased number of input tokens—consistently have lower FID. In addition to possessing good scalability properties, our largest DiT-XL/2 models outperform all prior diffusion models on the class-conditional ImageNet 512×512 and 256×256 benchmarks, achieving a state-of-the-art FID of 2.27 on the latter.", "year": 2022, "venue": "IEEE International Conference on Computer Vision", "authors": [ "William S. Peebles", "Saining Xie" ], "externalIds": { "DBLP": "journals/corr/abs-2212-09748", "ArXiv": "2212.09748", "DOI": "10.1109/ICCV51070.2023.00387", "CorpusId": 254854389 }, "url": "https://www.semanticscholar.org/paper/736973165f98105fec3729b7db414ae4d80fcbeb", "referenceCount": 69, "citationCount": 773, "influentialCitationCount": 131, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MAGVIT: Masked Generative Video Transformer", "abstract": "We introduce the MAsked Generative VIdeo Transformer, MAGVIT, to tackle various video synthesis tasks with a single model. We introduce a 3D tokenizer to quantize a video into spatial-temporal visual tokens and propose an embedding method for masked video token modeling to facilitate multi-task learning. We conduct extensive experiments to demonstrate the quality, efficiency, and flexibility of MAGVIT. Our experiments show that (i) MAGVIT performs favorably against state-of-the-art approaches and establishes the best-published FVD on three video generation benchmarks, including the challenging Kinetics-600. (ii) MAGVIT outperforms existing methods in inference time by two orders of magnitude against diffusion models and by 60x against autoregressive models. (iii) A single MAGVIT model supports ten diverse generation tasks and generalizes across videos from different visual domains. The source code and trained models will be released to the public at https://magvit.cs.cmu.edu.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Lijun Yu", "Yong Cheng", "Kihyuk Sohn", "José Lezama", "Han Zhang", "Huiwen Chang", "A. Hauptmann", "Ming-Hsuan Yang", "Yuan Hao", "Irfan Essa", "Lu Jiang" ], "externalIds": { "DBLP": "journals/corr/abs-2212-05199", "ArXiv": "2212.05199", "DOI": "10.1109/CVPR52729.2023.01008", "CorpusId": 254563906 }, "url": "https://www.semanticscholar.org/paper/fe34137e5cc07235eae65ce53a54cd226b9f8b23", "referenceCount": 86, "citationCount": 109, "influentialCitationCount": 14, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "LAION-5B: An open large-scale dataset for training next generation image-text models", "abstract": "Groundbreaking language-vision architectures like CLIP and DALL-E proved the utility of training on large amounts of noisy image-text data, without relying on expensive accurate labels used in standard vision unimodal supervised learning. The resulting models showed capabilities of strong text-guided image generation and transfer to downstream tasks, while performing remarkably at zero-shot classification with noteworthy out-of-distribution robustness. Since then, large-scale language-vision models like ALIGN, BASIC, GLIDE, Flamingo and Imagen made further improvements. Studying the training and capabilities of such models requires datasets containing billions of image-text pairs. Until now, no datasets of this size have been made openly available for the broader research community. To address this problem and democratize research on large-scale multi-modal models, we present LAION-5B - a dataset consisting of 5.85 billion CLIP-filtered image-text pairs, of which 2.32B contain English language. We show successful replication and fine-tuning of foundational models like CLIP, GLIDE and Stable Diffusion using the dataset, and discuss further experiments enabled with an openly available dataset of this scale. Additionally we provide several nearest neighbor indices, an improved web-interface for dataset exploration and subset generation, and detection scores for watermark, NSFW, and toxic content detection. Announcement page https://laion.ai/laion-5b-a-new-era-of-open-large-scale-multi-modal-datasets/", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Christoph Schuhmann", "Romain Beaumont", "Richard Vencu", "Cade Gordon", "Ross Wightman", "Mehdi Cherti", "Theo Coombes", "Aarush Katta", "Clayton Mullis", "Mitchell Wortsman", "P. Schramowski", "Srivatsa Kundurthy", "Katherine Crowson", "Ludwig Schmidt", "R. Kaczmarczyk", "J. Jitsev" ], "externalIds": { "DBLP": "conf/nips/SchuhmannBVGWCC22", "ArXiv": "2210.08402", "DOI": "10.48550/arXiv.2210.08402", "CorpusId": 252917726 }, "url": "https://www.semanticscholar.org/paper/e5c8960eb2ec034ffbd353ef39fd1cb541d3c7c9", "referenceCount": 109, "citationCount": 2214, "influentialCitationCount": 256, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "All are Worth Words: A ViT Backbone for Diffusion Models", "abstract": "Vision transformers (ViT) have shown promise in various vision tasks while the U-Net based on a convolutional neural network (CNN) remains dominant in diffusion models. We design a simple and general ViT-based architecture (named U-ViT) for image generation with diffusion models. U-ViT is characterized by treating all inputs including the time, condition and noisy image patches as tokens and employing long skip connections between shallow and deep layers. We evaluate U-ViT in unconditional and classconditional image generation, as well as text-to-image generation tasks, where U-ViT is comparable if not superior to a CNN-based U-Net of a similar size. In particular, latent diffusion models with U-ViT achieve record-breaking FID scores of 2.29 in class-conditional image generation on ImageNet 256×256, and 5.48 in text-to-image generation on MS-COCO, among methods without accessing large external datasets during the training of generative models. Our results suggest that, for diffusion-based image modeling, the long skip connection is crucial while the down-sampling and upsampling operators in CNN-based U-Net are not always necessary. We believe that U-ViT can provide insights for future research on backbones in diffusion models and benefit generative modeling on large scale cross-modality datasets.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Fan Bao", "Shen Nie", "Kaiwen Xue", "Yue Cao", "Chongxuan Li", "Hang Su", "Jun Zhu" ], "externalIds": { "DBLP": "conf/cvpr/BaoNXCL0023", "ArXiv": "2209.12152", "DOI": "10.1109/CVPR52729.2023.02171", "CorpusId": 253581703 }, "url": "https://www.semanticscholar.org/paper/897f3bb5eacaa80359e81ff33378e1110e20ae95", "referenceCount": 87, "citationCount": 147, "influentialCitationCount": 18, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MoVQ: Modulating Quantized Vectors for High-Fidelity Image Generation", "abstract": "Although two-stage Vector Quantized (VQ) generative models allow for synthesizing high-fidelity and high-resolution images, their quantization operator encodes similar patches within an image into the same index, resulting in a repeated artifact for similar adjacent regions using existing decoder architectures. To address this issue, we propose to incorporate the spatially conditional normalization to modulate the quantized vectors so as to insert spatially variant information to the embedded index maps, encouraging the decoder to generate more photorealistic images. Moreover, we use multichannel quantization to increase the recombination capability of the discrete codes without increasing the cost of model and codebook. Additionally, to generate discrete tokens at the second stage, we adopt a Masked Generative Image Transformer (MaskGIT) to learn an underlying prior distribution in the compressed latent space, which is much faster than the conventional autoregressive model. Experiments on two benchmark datasets demonstrate that our proposed modulated VQGAN is able to greatly improve the reconstructed image quality as well as provide high-fidelity image generation.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Chuanxia Zheng", "L. Vuong", "Jianfei Cai", "Dinh Q. Phung" ], "externalIds": { "DBLP": "journals/corr/abs-2209-09002", "ArXiv": "2209.09002", "DOI": "10.48550/arXiv.2209.09002", "CorpusId": 252367709 }, "url": "https://www.semanticscholar.org/paper/d193a474ac10f5edda3e17b7871750516971954e", "referenceCount": 51, "citationCount": 37, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Improved Masked Image Generation with Token-Critic", "abstract": "Non-autoregressive generative transformers recently demonstrated impressive image generation performance, and orders of magnitude faster sampling than their autoregressive counterparts. However, optimal parallel sampling from the true joint distribution of visual tokens remains an open challenge. In this paper we introduce Token-Critic, an auxiliary model to guide the sampling of a non-autoregressive generative transformer. Given a masked-and-reconstructed real image, the Token-Critic model is trained to distinguish which visual tokens belong to the original image and which were sampled by the generative transformer. During non-autoregressive iterative sampling, Token-Critic is used to select which tokens to accept and which to reject and resample. Coupled with Token-Critic, a state-of-the-art generative transformer significantly improves its performance, and outperforms recent diffusion models and GANs in terms of the trade-off between generated image quality and diversity, in the challenging class-conditional ImageNet generation.", "year": 2022, "venue": "European Conference on Computer Vision", "authors": [ "José Lezama", "Huiwen Chang", "Lu Jiang", "Irfan Essa" ], "externalIds": { "DBLP": "conf/eccv/LezamaCJE22", "ArXiv": "2209.04439", "DOI": "10.48550/arXiv.2209.04439", "CorpusId": 252185410 }, "url": "https://www.semanticscholar.org/paper/e875667d1ae8fd8f3b760eee6feb6c8a79497e8c", "referenceCount": 41, "citationCount": 31, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Analog Bits: Generating Discrete Data using Diffusion Models with Self-Conditioning", "abstract": "We present Bit Diffusion: a simple and generic approach for generating discrete data with continuous state and continuous time diffusion models. The main idea behind our approach is to first represent the discrete data as binary bits, and then train a continuous diffusion model to model these bits as real numbers which we call analog bits. To generate samples, the model first generates the analog bits, which are then thresholded to obtain the bits that represent the discrete variables. We further propose two simple techniques, namely Self-Conditioning and Asymmetric Time Intervals, which lead to a significant improvement in sample quality. Despite its simplicity, the proposed approach can achieve strong performance in both discrete image generation and image captioning tasks. For discrete image generation, we significantly improve previous state-of-the-art on both CIFAR-10 (which has 3K discrete 8-bit tokens) and ImageNet-64x64 (which has 12K discrete 8-bit tokens), outperforming the best autoregressive model in both sample quality (measured by FID) and efficiency. For image captioning on MS-COCO dataset, our approach achieves competitive results compared to autoregressive models.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Ting Chen", "Ruixiang Zhang", "Geoffrey E. Hinton" ], "externalIds": { "DBLP": "conf/iclr/ChenZH23", "ArXiv": "2208.04202", "DOI": "10.48550/arXiv.2208.04202", "CorpusId": 251402961 }, "url": "https://www.semanticscholar.org/paper/b64537bdf7a103aa01972ba06ea24a9c08f7cd74", "referenceCount": 68, "citationCount": 211, "influentialCitationCount": 34, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Classifier-Free Diffusion Guidance", "abstract": "Classifier guidance is a recently introduced method to trade off mode coverage and sample fidelity in conditional diffusion models post training, in the same spirit as low temperature sampling or truncation in other types of generative models. Classifier guidance combines the score estimate of a diffusion model with the gradient of an image classifier and thereby requires training an image classifier separate from the diffusion model. It also raises the question of whether guidance can be performed without a classifier. We show that guidance can be indeed performed by a pure generative model without such a classifier: in what we call classifier-free guidance, we jointly train a conditional and an unconditional diffusion model, and we combine the resulting conditional and unconditional score estimates to attain a trade-off between sample quality and diversity similar to that obtained using classifier guidance.", "year": 2022, "venue": "arXiv.org", "authors": [ "Jonathan Ho" ], "externalIds": { "ArXiv": "2207.12598", "DBLP": "journals/corr/abs-2207-12598", "DOI": "10.48550/arXiv.2207.12598", "CorpusId": 249145348 }, "url": "https://www.semanticscholar.org/paper/af9f365ed86614c800f082bd8eb14be76072ad16", "referenceCount": 25, "citationCount": 2396, "influentialCitationCount": 313, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Scaling Autoregressive Models for Content-Rich Text-to-Image Generation", "abstract": "We present the Pathways Autoregressive Text-to-Image (Parti) model, which generates high-fidelity photorealistic images and supports content-rich synthesis involving complex compositions and world knowledge. Parti treats text-to-image generation as a sequence-to-sequence modeling problem, akin to machine translation, with sequences of image tokens as the target outputs rather than text tokens in another language. This strategy can naturally tap into the rich body of prior work on large language models, which have seen continued advances in capabilities and performance through scaling data and model sizes. Our approach is simple: First, Parti uses a Transformer-based image tokenizer, ViT-VQGAN, to encode images as sequences of discrete tokens. Second, we achieve consistent quality improvements by scaling the encoder-decoder Transformer model up to 20B parameters, with a new state-of-the-art zero-shot FID score of 7.23 and finetuned FID score of 3.22 on MS-COCO. Our detailed analysis on Localized Narratives as well as PartiPrompts (P2), a new holistic benchmark of over 1600 English prompts, demonstrate the effectiveness of Parti across a wide variety of categories and difficulty aspects. We also explore and highlight limitations of our models in order to define and exemplify key areas of focus for further improvements. See https://parti.research.google/ for high-resolution images.", "year": 2022, "venue": "Trans. Mach. Learn. Res.", "authors": [ "Jiahui Yu", "Yuanzhong Xu", "Jing Yu Koh", "Thang Luong", "Gunjan Baid", "Zirui Wang", "Vijay Vasudevan", "Alexander Ku", "Yinfei Yang", "Burcu Karagol Ayan", "Ben Hutchinson", "Wei Han", "Zarana Parekh", "Xin Li", "Han Zhang", "Jason Baldridge", "Yonghui Wu" ], "externalIds": { "DBLP": "journals/tmlr/YuXKLBWVKYAHHPLZBW22", "ArXiv": "2206.10789", "DOI": "10.48550/arXiv.2206.10789", "CorpusId": 249926846 }, "url": "https://www.semanticscholar.org/paper/1243e13254bb4ea1f71b4be8a3e4e54ffd02d2fe", "referenceCount": 115, "citationCount": 827, "influentialCitationCount": 79, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Draft-and-Revise: Effective Image Generation with Contextual RQ-Transformer", "abstract": "Although autoregressive models have achieved promising results on image generation, their unidirectional generation process prevents the resultant images from fully reflecting global contexts. To address the issue, we propose an effective image generation framework of Draft-and-Revise with Contextual RQ-transformer to consider global contexts during the generation process. As a generalized VQ-VAE, RQ-VAE first represents a high-resolution image as a sequence of discrete code stacks. After code stacks in the sequence are randomly masked, Contextual RQ-Transformer is trained to infill the masked code stacks based on the unmasked contexts of the image. Then, Contextual RQ-Transformer uses our two-phase decoding, Draft-and-Revise, and generates an image, while exploiting the global contexts of the image during the generation process. Specifically. in the draft phase, our model first focuses on generating diverse images despite rather low quality. Then, in the revise phase, the model iteratively improves the quality of images, while preserving the global contexts of generated images. In experiments, our method achieves state-of-the-art results on conditional image generation. We also validate that the Draft-and-Revise decoding can achieve high performance by effectively controlling the quality-diversity trade-off in image generation.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Doyup Lee", "Chiheon Kim", "Saehoon Kim", "Minsu Cho", "Wook-Shin Han" ], "externalIds": { "DBLP": "journals/corr/abs-2206-04452", "ArXiv": "2206.04452", "DOI": "10.48550/arXiv.2206.04452", "CorpusId": 249538420 }, "url": "https://www.semanticscholar.org/paper/f39f24210d413b6cc785d029851b59b54ce47d88", "referenceCount": 41, "citationCount": 22, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Make-A-Scene: Scene-Based Text-to-Image Generation with Human Priors", "abstract": "Recent text-to-image generation methods provide a simple yet exciting conversion capability between text and image domains. While these methods have incrementally improved the generated image fidelity and text relevancy, several pivotal gaps remain unanswered, limiting applicability and quality. We propose a novel text-to-image method that addresses these gaps by (i) enabling a simple control mechanism complementary to text in the form of a scene, (ii) introducing elements that substantially improve the tokenization process by employing domain-specific knowledge over key image regions (faces and salient objects), and (iii) adapting classifier-free guidance for the transformer use case. Our model achieves state-of-the-art FID and human evaluation results, unlocking the ability to generate high fidelity images in a resolution of 512x512 pixels, significantly improving visual quality. Through scene controllability, we introduce several new capabilities: (i) Scene editing, (ii) text editing with anchor scenes, (iii) overcoming out-of-distribution text prompts, and (iv) story illustration generation, as demonstrated in the story we wrote.", "year": 2022, "venue": "European Conference on Computer Vision", "authors": [ "Oran Gafni", "Adam Polyak", "Oron Ashual", "Shelly Sheynin", "Devi Parikh", "Yaniv Taigman" ], "externalIds": { "ArXiv": "2203.13131", "DBLP": "conf/eccv/GafniPASPT22", "DOI": "10.48550/arXiv.2203.13131", "CorpusId": 247628171 }, "url": "https://www.semanticscholar.org/paper/15e234a67f30d6761f1d7670d501095d1697b69c", "referenceCount": 73, "citationCount": 413, "influentialCitationCount": 28, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Autoregressive Image Generation using Residual Quantization", "abstract": "For autoregressive (AR) modeling of high-resolution images, vector quantization (VQ) represents an image as a sequence of discrete codes. A short sequence length is important for an AR model to reduce its computational costs to consider long-range interactions of codes. However, we postulate that previous VQ cannot shorten the code sequence and generate high-fidelity images together in terms of the rate-distortion trade-off. In this study, we propose the two-stage framework, which consists of Residual-Quantized VAE (RQ-VAE) and RQ-Transformer, to effectively generate high-resolution images. Given a fixed codebook size, RQ-VAE can precisely approximate a feature map of an image and represent the image as a stacked map of discrete codes. Then, RQ-Transformer learns to predict the quantized feature vector at the next position by predicting the next stack of codes. Thanks to the precise approximation of RQ-VAE, we can represent a $256\\times 256$ image as $8\\times 8$ resolution of the feature map, and RQ-Transformer can efficiently reduce the computational costs. Consequently, our framework out-performs the existing AR models on various benchmarks of unconditional and conditional image generation. Our approach also has a significantly faster sampling speed than previous AR models to generate high-quality images.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Doyup Lee", "Chiheon Kim", "Saehoon Kim", "Minsu Cho", "Wook-Shin Han" ], "externalIds": { "ArXiv": "2203.01941", "DBLP": "journals/corr/abs-2203-01941", "DOI": "10.1109/CVPR52688.2022.01123", "CorpusId": 247244535 }, "url": "https://www.semanticscholar.org/paper/8fbc2d349d3d0945efa5e92fd3713734ce63d19e", "referenceCount": 53, "citationCount": 162, "influentialCitationCount": 28, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MaskGIT: Masked Generative Image Transformer", "abstract": "Generative transformers have experienced rapid popularity growth in the computer vision community in synthesizing high-fidelity and high-resolution images. The best generative transformer models so far, however, still treat an image naively as a sequence of tokens, and decode an image sequentially following the raster scan ordering (i.e. line-by-line). We find this strategy neither optimal nor efficient. This paper proposes a novel image synthesis paradigm using a bidirectional transformer decoder, which we term MaskGIT. During training, MaskGIT learns to predict randomly masked tokens by attending to tokens in all directions. At inference time, the model begins with generating all tokens of an image simultaneously, and then refines the image iteratively conditioned on the previous generation. Our experiments demonstrate that MaskGIT significantly outperforms the state-of-the-art transformer model on the ImageNet dataset, and accelerates autoregressive decoding by up to 48x. Besides, we illustrate that MaskGIT can be easily extended to various image editing tasks, such as inpainting, extrapolation, and image manipulation. Project page: masked-generative-image-transformer.github.io.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Huiwen Chang", "Han Zhang", "Lu Jiang", "Ce Liu", "W. Freeman" ], "externalIds": { "DBLP": "journals/corr/abs-2202-04200", "ArXiv": "2202.04200", "DOI": "10.1109/CVPR52688.2022.01103", "CorpusId": 246680316 }, "url": "https://www.semanticscholar.org/paper/7c597874535c1537d7ddff3b3723015b4dc79d30", "referenceCount": 61, "citationCount": 382, "influentialCitationCount": 82, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "High-Resolution Image Synthesis with Latent Diffusion Models", "abstract": "By decomposing the image formation process into a sequential application of denoising autoencoders, diffusion models (DMs) achieve state-of-the-art synthesis results on image data and beyond. Additionally, their formulation allows for a guiding mechanism to control the image generation process without retraining. However, since these models typically operate directly in pixel space, optimization of powerful DMs often consumes hundreds of GPU days and inference is expensive due to sequential evaluations. To enable DM training on limited computational resources while retaining their quality and flexibility, we apply them in the latent space of powerful pretrained autoencoders. In contrast to previous work, training diffusion models on such a representation allows for the first time to reach a near-optimal point between complexity reduction and detail preservation, greatly boosting visual fidelity. By introducing cross-attention layers into the model architecture, we turn diffusion models into powerful and flexible generators for general conditioning inputs such as text or bounding boxes and high-resolution synthesis becomes possible in a convolutional manner. Our latent diffusion models (LDMs) achieve new state of the art scores for image inpainting and class-conditional image synthesis and highly competitive performance on various tasks, including unconditional image generation, text-to-image synthesis, and super-resolution, while significantly reducing computational requirements compared to pixel-based DMs.", "year": 2021, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Robin Rombach", "A. Blattmann", "Dominik Lorenz", "Patrick Esser", "B. Ommer" ], "externalIds": { "ArXiv": "2112.10752", "DBLP": "journals/corr/abs-2112-10752", "DOI": "10.1109/CVPR52688.2022.01042", "CorpusId": 245335280 }, "url": "https://www.semanticscholar.org/paper/c10075b3746a9f3dd5811970e93c8ca3ad39b39d", "referenceCount": 110, "citationCount": 9847, "influentialCitationCount": 2744, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Vector-quantized Image Modeling with Improved VQGAN", "abstract": "Pretraining language models with next-token prediction on massive text corpora has delivered phenomenal zero-shot, few-shot, transfer learning and multi-tasking capabilities on both generative and discriminative language tasks. Motivated by this success, we explore a Vector-quantized Image Modeling (VIM) approach that involves pretraining a Transformer to predict rasterized image tokens autoregressively. The discrete image tokens are encoded from a learned Vision-Transformer-based VQGAN (ViT-VQGAN). We first propose multiple improvements over vanilla VQGAN from architecture to codebook learning, yielding better efficiency and reconstruction fidelity. The improved ViT-VQGAN further improves vector-quantized image modeling tasks, including unconditional, class-conditioned image generation and unsupervised representation learning. When trained on ImageNet at \\(256\\times256\\) resolution, we achieve Inception Score (IS) of 175.1 and Fr'echet Inception Distance (FID) of 4.17, a dramatic improvement over the vanilla VQGAN, which obtains 70.6 and 17.04 for IS and FID, respectively. Based on ViT-VQGAN and unsupervised pretraining, we further evaluate the pretrained Transformer by averaging intermediate features, similar to Image GPT (iGPT). This ImageNet-pretrained VIM-L significantly beats iGPT-L on linear-probe accuracy from 60.3% to 73.2% for a similar model size. VIM-L also outperforms iGPT-XL which is trained with extra web image data and larger model size.", "year": 2021, "venue": "International Conference on Learning Representations", "authors": [ "Jiahui Yu", "Xin Li", "Jing Yu Koh", "Han Zhang", "Ruoming Pang", "James Qin", "Alexander Ku", "Yuanzhong Xu", "Jason Baldridge", "Yonghui Wu" ], "externalIds": { "ArXiv": "2110.04627", "DBLP": "journals/corr/abs-2110-04627", "CorpusId": 238582653 }, "url": "https://www.semanticscholar.org/paper/9c7a2cd13b783bb73ad2d1ec2880bdd9b995cbdc", "referenceCount": 69, "citationCount": 304, "influentialCitationCount": 33, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Diffusion Models Beat GANs on Image Synthesis", "abstract": "We show that diffusion models can achieve image sample quality superior to the current state-of-the-art generative models. We achieve this on unconditional image synthesis by finding a better architecture through a series of ablations. For conditional image synthesis, we further improve sample quality with classifier guidance: a simple, compute-efficient method for trading off diversity for fidelity using gradients from a classifier. We achieve an FID of 2.97 on ImageNet 128$\\times$128, 4.59 on ImageNet 256$\\times$256, and 7.72 on ImageNet 512$\\times$512, and we match BigGAN-deep even with as few as 25 forward passes per sample, all while maintaining better coverage of the distribution. Finally, we find that classifier guidance combines well with upsampling diffusion models, further improving FID to 3.94 on ImageNet 256$\\times$256 and 3.85 on ImageNet 512$\\times$512. We release our code at https://github.com/openai/guided-diffusion", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Prafulla Dhariwal", "Alex Nichol" ], "externalIds": { "ArXiv": "2105.05233", "DBLP": "journals/corr/abs-2105-05233", "CorpusId": 234357997 }, "url": "https://www.semanticscholar.org/paper/64ea8f180d0682e6c18d1eb688afdb2027c02794", "referenceCount": 81, "citationCount": 5177, "influentialCitationCount": 661, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Regularizing Generative Adversarial Networks under Limited Data", "abstract": "Recent years have witnessed the rapid progress of generative adversarial networks (GANs). However, the success of the GAN models hinges on a large amount of training data. This work proposes a regularization approach for training robust GAN models on limited data. We theoretically show a connection between the regularized loss and an f-divergence called LeCam-divergence, which we find is more robust under limited training data. Extensive experiments on several benchmark datasets demonstrate that the proposed regularization scheme 1) improves the generalization performance and stabilizes the learning dynamics of GAN models under limited training data, and 2) complements the recent data augmentation methods. These properties facilitate training GAN models to achieve state-of-theart performance when only limited training data of the ImageNet benchmark is available. The source code is available at https://github.com/google/lecam-gan.", "year": 2021, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Hung-Yu Tseng", "Lu Jiang", "Ce Liu", "Ming-Hsuan Yang", "Weilong Yang" ], "externalIds": { "DBLP": "journals/corr/abs-2104-03310", "ArXiv": "2104.03310", "DOI": "10.1109/CVPR46437.2021.00783", "CorpusId": 233168639 }, "url": "https://www.semanticscholar.org/paper/cb2bd9549791520deccadfde221f8ca699675a96", "referenceCount": 88, "citationCount": 121, "influentialCitationCount": 24, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Taming Transformers for High-Resolution Image Synthesis", "abstract": "Designed to learn long-range interactions on sequential data, transformers continue to show state-of-the-art results on a wide variety of tasks. In contrast to CNNs, they contain no inductive bias that prioritizes local interactions. This makes them expressive, but also computationally infeasible for long sequences, such as high-resolution images. We demonstrate how combining the effectiveness of the inductive bias of CNNs with the expressivity of transformers enables them to model and thereby synthesize high-resolution images. We show how to (i) use CNNs to learn a context-rich vocabulary of image constituents, and in turn (ii) utilize transformers to efficiently model their composition within high-resolution images. Our approach is readily applied to conditional synthesis tasks, where both non-spatial information, such as object classes, and spatial information, such as segmentations, can control the generated image. In particular, we present the first results on semantically-guided synthesis of megapixel images with transformers. Project page at https://git.io/JLlvY.", "year": 2020, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Patrick Esser", "Robin Rombach", "B. Ommer" ], "externalIds": { "ArXiv": "2012.09841", "MAG": "3111551570", "DBLP": "journals/corr/abs-2012-09841", "DOI": "10.1109/CVPR46437.2021.01268", "CorpusId": 229297973 }, "url": "https://www.semanticscholar.org/paper/47f7ec3d0a5e6e83b6768ece35206a94dc81919c", "referenceCount": 82, "citationCount": 2069, "influentialCitationCount": 389, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Generative Pretraining From Pixels", "abstract": "Inspired by progress in unsupervised representation learning for natural language, we examine whether similar models can learn useful representations for images. We train a sequence Transformer to auto-regressively predict pixels, without incorporating knowledge of the 2D input structure. Despite training on low-resolution ImageNet without labels, we find that a GPT-2 scale model learns strong image representations as measured by linear probing, fine-tuning, and low-data classification. On CIFAR-10, we achieve 96.3% accuracy with a linear probe, outperforming a supervised Wide ResNet, and 99.0% accuracy with full fine-tuning, matching the top supervised pre-trained models. An even larger model trained on a mix-ture of ImageNet and web images is competitive with self-supervised benchmarks on ImageNet, achieving 72.0% top-1 accuracy on a linear probe of our features.", "year": 2020, "venue": "International Conference on Machine Learning", "authors": [ "Mark Chen", "Alec Radford", "Jeff Wu", "Heewoo Jun", "Prafulla Dhariwal", "D. Luan", "I. Sutskever" ], "externalIds": { "MAG": "3034445277", "DBLP": "conf/icml/ChenRC0JLS20", "CorpusId": 219781060 }, "url": "https://www.semanticscholar.org/paper/bc022dbb37b1bbf3905a7404d19c03ccbf6b81a8", "referenceCount": 79, "citationCount": 1310, "influentialCitationCount": 111, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Denoising Diffusion Probabilistic Models", "abstract": "We present high quality image synthesis results using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics. Our best results are obtained by training on a weighted variational bound designed according to a novel connection between diffusion probabilistic models and denoising score matching with Langevin dynamics, and our models naturally admit a progressive lossy decompression scheme that can be interpreted as a generalization of autoregressive decoding. On the unconditional CIFAR10 dataset, we obtain an Inception score of 9.46 and a state-of-the-art FID score of 3.17. On 256x256 LSUN, we obtain sample quality similar to ProgressiveGAN. Our implementation is available at this https URL", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Jonathan Ho", "Ajay Jain", "P. Abbeel" ], "externalIds": { "ArXiv": "2006.11239", "MAG": "3100572490", "DBLP": "journals/corr/abs-2006-11239", "CorpusId": 219955663 }, "url": "https://www.semanticscholar.org/paper/5c126ae3421f05768d8edd97ecd44b1364e2c99a", "referenceCount": 73, "citationCount": 10778, "influentialCitationCount": 2337, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "A Style-Based Generator Architecture for Generative Adversarial Networks", "abstract": "We propose an alternative generator architecture for generative adversarial networks, borrowing from style transfer literature. The new architecture leads to an automatically learned, unsupervised separation of high-level attributes (e.g., pose and identity when trained on human faces) and stochastic variation in the generated images (e.g., freckles, hair), and it enables intuitive, scale-specific control of the synthesis. The new generator improves the state-of-the-art in terms of traditional distribution quality metrics, leads to demonstrably better interpolation properties, and also better disentangles the latent factors of variation. To quantify interpolation quality and disentanglement, we propose two new, automated methods that are applicable to any generator architecture. Finally, we introduce a new, highly varied and high-quality dataset of human faces.", "year": 2018, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Tero Karras", "S. Laine", "Timo Aila" ], "externalIds": { "DBLP": "journals/corr/abs-1812-04948", "MAG": "2904367110", "ArXiv": "1812.04948", "DOI": "10.1109/CVPR.2019.00453", "CorpusId": 54482423 }, "url": "https://www.semanticscholar.org/paper/ceb2ebef0b41e31c1a21b28c2734123900c005e2", "referenceCount": 65, "citationCount": 8967, "influentialCitationCount": 1761, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Group Normalization", "abstract": null, "year": 2018, "venue": "International Journal of Computer Vision", "authors": [ "Yuxin Wu", "Kaiming He" ], "externalIds": { "DBLP": "journals/corr/abs-1803-08494", "ArXiv": "1803.08494", "MAG": "2795783309", "DOI": "10.1007/s11263-019-01198-w", "CorpusId": 4076251 }, "url": "https://www.semanticscholar.org/paper/d08b35243edc5be07387a9ed218070b31e502901", "referenceCount": 76, "citationCount": 3238, "influentialCitationCount": 185, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The Unreasonable Effectiveness of Deep Features as a Perceptual Metric", "abstract": "While it is nearly effortless for humans to quickly assess the perceptual similarity between two images, the underlying processes are thought to be quite complex. Despite this, the most widely used perceptual metrics today, such as PSNR and SSIM, are simple, shallow functions, and fail to account for many nuances of human perception. Recently, the deep learning community has found that features of the VGG network trained on ImageNet classification has been remarkably useful as a training loss for image synthesis. But how perceptual are these so-called \"perceptual losses\"? What elements are critical for their success? To answer these questions, we introduce a new dataset of human perceptual similarity judgments. We systematically evaluate deep features across different architectures and tasks and compare them with classic metrics. We find that deep features outperform all previous metrics by large margins on our dataset. More surprisingly, this result is not restricted to ImageNet-trained VGG features, but holds across different deep architectures and levels of supervision (supervised, self-supervised, or even unsupervised). Our results suggest that perceptual similarity is an emergent property shared across deep visual representations.", "year": 2018, "venue": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition", "authors": [ "Richard Zhang", "Phillip Isola", "Alexei A. Efros", "Eli Shechtman", "Oliver Wang" ], "externalIds": { "ArXiv": "1801.03924", "MAG": "2783879794", "DBLP": "journals/corr/abs-1801-03924", "DOI": "10.1109/CVPR.2018.00068", "CorpusId": 4766599 }, "url": "https://www.semanticscholar.org/paper/c468bbde6a22d961829e1970e6ad5795e05418d1", "referenceCount": 71, "citationCount": 8862, "influentialCitationCount": 1545, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Decoupled Weight Decay Regularization", "abstract": "L$_2$ regularization and weight decay regularization are equivalent for standard stochastic gradient descent (when rescaled by the learning rate), but as we demonstrate this is \\emph{not} the case for adaptive gradient algorithms, such as Adam. While common implementations of these algorithms employ L$_2$ regularization (often calling it \"weight decay\" in what may be misleading due to the inequivalence we expose), we propose a simple modification to recover the original formulation of weight decay regularization by \\emph{decoupling} the weight decay from the optimization steps taken w.r.t. the loss function. We provide empirical evidence that our proposed modification (i) decouples the optimal choice of weight decay factor from the setting of the learning rate for both standard SGD and Adam and (ii) substantially improves Adam's generalization performance, allowing it to compete with SGD with momentum on image classification datasets (on which it was previously typically outperformed by the latter). Our proposed decoupled weight decay has already been adopted by many researchers, and the community has implemented it in TensorFlow and PyTorch; the complete source code for our experiments is available at this https URL", "year": 2017, "venue": "International Conference on Learning Representations", "authors": [ "I. Loshchilov", "F. Hutter" ], "externalIds": { "MAG": "2950541952", "DBLP": "conf/iclr/LoshchilovH19", "CorpusId": 53592270 }, "url": "https://www.semanticscholar.org/paper/d07284a6811f1b2745d91bdb06b040b57f226882", "referenceCount": 35, "citationCount": 17312, "influentialCitationCount": 3078, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Neural Discrete Representation Learning", "abstract": "Learning useful representations without supervision remains a key challenge in machine learning. In this paper, we propose a simple yet powerful generative model that learns such discrete representations. Our model, the Vector Quantised-Variational AutoEncoder (VQ-VAE), differs from VAEs in two key ways: the encoder network outputs discrete, rather than continuous, codes; and the prior is learnt rather than static. In order to learn a discrete latent representation, we incorporate ideas from vector quantisation (VQ). Using the VQ method allows the model to circumvent issues of \"posterior collapse\" -- where the latents are ignored when they are paired with a powerful autoregressive decoder -- typically observed in the VAE framework. Pairing these representations with an autoregressive prior, the model can generate high quality images, videos, and speech as well as doing high quality speaker conversion and unsupervised learning of phonemes, providing further evidence of the utility of the learnt representations.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "Aäron van den Oord", "O. Vinyals", "K. Kavukcuoglu" ], "externalIds": { "MAG": "2752796333", "ArXiv": "1711.00937", "DBLP": "conf/nips/OordVK17", "CorpusId": 20282961 }, "url": "https://www.semanticscholar.org/paper/f466157848d1a7772fb6d02cdac9a7a5e7ef982e", "referenceCount": 43, "citationCount": 3716, "influentialCitationCount": 609, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "GANs Trained by a Two Time-Scale Update Rule Converge to a Local Nash Equilibrium", "abstract": "Generative Adversarial Networks (GANs) excel at creating realistic images with complex models for which maximum likelihood is infeasible. However, the convergence of GAN training has still not been proved. We propose a two time-scale update rule (TTUR) for training GANs with stochastic gradient descent on arbitrary GAN loss functions. TTUR has an individual learning rate for both the discriminator and the generator. Using the theory of stochastic approximation, we prove that the TTUR converges under mild assumptions to a stationary local Nash equilibrium. The convergence carries over to the popular Adam optimization, for which we prove that it follows the dynamics of a heavy ball with friction and thus prefers flat minima in the objective landscape. For the evaluation of the performance of GANs at image generation, we introduce the \"Frechet Inception Distance\" (FID) which captures the similarity of generated images to real ones better than the Inception Score. In experiments, TTUR improves learning for DCGANs and Improved Wasserstein GANs (WGAN-GP) outperforming conventional GAN training on CelebA, CIFAR-10, SVHN, LSUN Bedrooms, and the One Billion Word Benchmark.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "M. Heusel", "Hubert Ramsauer", "Thomas Unterthiner", "Bernhard Nessler", "Sepp Hochreiter" ], "externalIds": { "MAG": "2963981733", "DBLP": "conf/nips/HeuselRUNH17", "CorpusId": 326772 }, "url": "https://www.semanticscholar.org/paper/231af7dc01a166cac3b5b01ca05778238f796e41", "referenceCount": 62, "citationCount": 10842, "influentialCitationCount": 2301, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Attention is All you Need", "abstract": "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "Ashish Vaswani", "Noam M. Shazeer", "Niki Parmar", "Jakob Uszkoreit", "Llion Jones", "Aidan N. Gomez", "Lukasz Kaiser", "Illia Polosukhin" ], "externalIds": { "MAG": "2963403868", "DBLP": "conf/nips/VaswaniSPUJGKP17", "ArXiv": "1706.03762", "CorpusId": 13756489 }, "url": "https://www.semanticscholar.org/paper/204e3073870fae3d05bcbc2f6a8e263d9b72e776", "referenceCount": 41, "citationCount": 104988, "influentialCitationCount": 15363, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "beta-VAE: Learning Basic Visual Concepts with a Constrained Variational Framework", "abstract": "an", "year": 2016, "venue": "International Conference on Learning Representations", "authors": [ "I. Higgins", "L. Matthey", "Arka Pal", "Christopher P. Burgess", "Xavier Glorot", "M. Botvinick", "S. Mohamed", "Alexander Lerchner" ], "externalIds": { "MAG": "2753738274", "CorpusId": 46798026 }, "url": "https://www.semanticscholar.org/paper/a90226c41b79f8b06007609f39f82757073641e2", "referenceCount": 34, "citationCount": 4451, "influentialCitationCount": 549, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "SGDR: Stochastic Gradient Descent with Warm Restarts", "abstract": "Restart techniques are common in gradient-free optimization to deal with multimodal functions. Partial warm restarts are also gaining popularity in gradient-based optimization to improve the rate of convergence in accelerated gradient schemes to deal with ill-conditioned functions. In this paper, we propose a simple warm restart technique for stochastic gradient descent to improve its anytime performance when training deep neural networks. We empirically study its performance on the CIFAR-10 and CIFAR-100 datasets, where we demonstrate new state-of-the-art results at 3.14% and 16.21%, respectively. We also demonstrate its advantages on a dataset of EEG recordings and on a downsampled version of the ImageNet dataset. Our source code is available at this https URL", "year": 2016, "venue": "International Conference on Learning Representations", "authors": [ "I. Loshchilov", "F. Hutter" ], "externalIds": { "MAG": "2963263347", "ArXiv": "1608.03983", "DBLP": "conf/iclr/LoshchilovH17", "CorpusId": 14337532 }, "url": "https://www.semanticscholar.org/paper/b022f2a277a4bf5f42382e86e4380b96340b9e86", "referenceCount": 42, "citationCount": 6841, "influentialCitationCount": 927, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Improved Techniques for Training GANs", "abstract": "We present a variety of new architectural features and training procedures that we apply to the generative adversarial networks (GANs) framework. We focus on two applications of GANs: semi-supervised learning, and the generation of images that humans find visually realistic. Unlike most work on generative models, our primary goal is not to train a model that assigns high likelihood to test data, nor do we require the model to be able to learn well without using any labels. Using our new techniques, we achieve state-of-the-art results in semi-supervised classification on MNIST, CIFAR-10 and SVHN. The generated images are of high quality as confirmed by a visual Turing test: our model generates MNIST samples that humans cannot distinguish from real data, and CIFAR-10 samples that yield a human error rate of 21.3%. We also present ImageNet samples with unprecedented resolution and show that our methods enable the model to learn recognizable features of ImageNet classes.", "year": 2016, "venue": "Neural Information Processing Systems", "authors": [ "Tim Salimans", "I. Goodfellow", "Wojciech Zaremba", "Vicki Cheung", "Alec Radford", "Xi Chen" ], "externalIds": { "DBLP": "conf/nips/SalimansGZCRCC16", "ArXiv": "1606.03498", "MAG": "2949938177", "CorpusId": 1687220 }, "url": "https://www.semanticscholar.org/paper/571b0750085ae3d939525e62af510ee2cee9d5ea", "referenceCount": 28, "citationCount": 8234, "influentialCitationCount": 903, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Perceptual Losses for Real-Time Style Transfer and Super-Resolution", "abstract": null, "year": 2016, "venue": "European Conference on Computer Vision", "authors": [ "Justin Johnson", "Alexandre Alahi", "Li Fei-Fei" ], "externalIds": { "MAG": "2331128040", "DBLP": "journals/corr/JohnsonAL16", "ArXiv": "1603.08155", "DOI": "10.1007/978-3-319-46475-6_43", "CorpusId": 980236 }, "url": "https://www.semanticscholar.org/paper/915c4bb289b3642489e904c65a47fa56efb60658", "referenceCount": 71, "citationCount": 9478, "influentialCitationCount": 734, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep Residual Learning for Image Recognition", "abstract": "Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. We provide comprehensive empirical evidence showing that these residual networks are easier to optimize, and can gain accuracy from considerably increased depth. On the ImageNet dataset we evaluate residual nets with a depth of up to 152 layers - 8× deeper than VGG nets [40] but still having lower complexity. An ensemble of these residual nets achieves 3.57% error on the ImageNet test set. This result won the 1st place on the ILSVRC 2015 classification task. We also present analysis on CIFAR-10 with 100 and 1000 layers. The depth of representations is of central importance for many visual recognition tasks. Solely due to our extremely deep representations, we obtain a 28% relative improvement on the COCO object detection dataset. Deep residual nets are foundations of our submissions to ILSVRC & COCO 2015 competitions1, where we also won the 1st places on the tasks of ImageNet detection, ImageNet localization, COCO detection, and COCO segmentation.", "year": 2015, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Kaiming He", "X. Zhang", "Shaoqing Ren", "Jian Sun" ], "externalIds": { "DBLP": "conf/cvpr/HeZRS16", "MAG": "2949650786", "ArXiv": "1512.03385", "DOI": "10.1109/cvpr.2016.90", "CorpusId": 206594692 }, "url": "https://www.semanticscholar.org/paper/2c03df8b48bf3fa39054345bafabfeff15bfd11d", "referenceCount": 53, "citationCount": 172713, "influentialCitationCount": 28229, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift", "abstract": "Training Deep Neural Networks is complicated by the fact that the distribution of each layer's inputs changes during training, as the parameters of the previous layers change. This slows down the training by requiring lower learning rates and careful parameter initialization, and makes it notoriously hard to train models with saturating nonlinearities. We refer to this phenomenon as internal covariate shift, and address the problem by normalizing layer inputs. Our method draws its strength from making normalization a part of the model architecture and performing the normalization for each training mini-batch. Batch Normalization allows us to use much higher learning rates and be less careful about initialization, and in some cases eliminates the need for Dropout. Applied to a state-of-the-art image classification model, Batch Normalization achieves the same accuracy with 14 times fewer training steps, and beats the original model by a significant margin. Using an ensemble of batch-normalized networks, we improve upon the best published result on ImageNet classification: reaching 4.82% top-5 test error, exceeding the accuracy of human raters.", "year": 2015, "venue": "International Conference on Machine Learning", "authors": [ "Sergey Ioffe", "Christian Szegedy" ], "externalIds": { "MAG": "2949117887", "ArXiv": "1502.03167", "DBLP": "conf/icml/IoffeS15", "CorpusId": 5808102 }, "url": "https://www.semanticscholar.org/paper/995c5f5e62614fcb4d2796ad2faab969da51713e", "referenceCount": 54, "citationCount": 40937, "influentialCitationCount": 2038, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Very Deep Convolutional Networks for Large-Scale Image Recognition", "abstract": "In this work we investigate the effect of the convolutional network depth on its accuracy in the large-scale image recognition setting. Our main contribution is a thorough evaluation of networks of increasing depth using an architecture with very small (3x3) convolution filters, which shows that a significant improvement on the prior-art configurations can be achieved by pushing the depth to 16-19 weight layers. These findings were the basis of our ImageNet Challenge 2014 submission, where our team secured the first and the second places in the localisation and classification tracks respectively. We also show that our representations generalise well to other datasets, where they achieve state-of-the-art results. We have made our two best-performing ConvNet models publicly available to facilitate further research on the use of deep visual representations in computer vision.", "year": 2014, "venue": "International Conference on Learning Representations", "authors": [ "K. Simonyan", "Andrew Zisserman" ], "externalIds": { "MAG": "2949429431", "ArXiv": "1409.1556", "DBLP": "journals/corr/SimonyanZ14a", "CorpusId": 14124313 }, "url": "https://www.semanticscholar.org/paper/eb42cf88027de515750f230b23b1a057dc782108", "referenceCount": 43, "citationCount": 93036, "influentialCitationCount": 13588, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ImageNet: A large-scale hierarchical image database", "abstract": "The explosion of image data on the Internet has the potential to foster more sophisticated and robust models and algorithms to index, retrieve, organize and interact with images and multimedia data. But exactly how such data can be harnessed and organized remains a critical problem. We introduce here a new database called “ImageNet”, a large-scale ontology of images built upon the backbone of the WordNet structure. ImageNet aims to populate the majority of the 80,000 synsets of WordNet with an average of 500-1000 clean and full resolution images. This will result in tens of millions of annotated images organized by the semantic hierarchy of WordNet. This paper offers a detailed analysis of ImageNet in its current state: 12 subtrees with 5247 synsets and 3.2 million images in total. We show that ImageNet is much larger in scale and diversity and much more accurate than the current image datasets. Constructing such a large-scale database is a challenging task. We describe the data collection scheme with Amazon Mechanical Turk. Lastly, we illustrate the usefulness of ImageNet through three simple applications in object recognition, image classification and automatic object clustering. We hope that the scale, accuracy, diversity and hierarchical structure of ImageNet can offer unparalleled opportunities to researchers in the computer vision community and beyond.", "year": 2009, "venue": "2009 IEEE Conference on Computer Vision and Pattern Recognition", "authors": [ "Jia Deng", "Wei Dong", "R. Socher", "Li-Jia Li", "K. Li", "Li Fei-Fei" ], "externalIds": { "DBLP": "conf/cvpr/DengDSLL009", "MAG": "2108598243", "DOI": "10.1109/CVPR.2009.5206848", "CorpusId": 57246310 }, "url": "https://www.semanticscholar.org/paper/d2c733e34d48784a37d717fe43d9e93277a8c53e", "referenceCount": 27, "citationCount": 56678, "influentialCitationCount": 8947, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Extracting and composing robust features with denoising autoencoders", "abstract": "Previous work has shown that the difficulties in learning deep generative or discriminative models can be overcome by an initial unsupervised learning step that maps inputs to useful intermediate representations. We introduce and motivate a new training principle for unsupervised learning of a representation based on the idea of making the learned representations robust to partial corruption of the input pattern. This approach can be used to train autoencoders, and these denoising autoencoders can be stacked to initialize deep architectures. The algorithm can be motivated from a manifold learning and information theoretic perspective or from a generative model perspective. Comparative experiments clearly show the surprising advantage of corrupting the input of autoencoders on a pattern classification benchmark suite.", "year": 2008, "venue": "International Conference on Machine Learning", "authors": [ "Pascal Vincent", "H. Larochelle", "Yoshua Bengio", "Pierre-Antoine Manzagol" ], "externalIds": { "MAG": "2025768430", "DBLP": "conf/icml/VincentLBM08", "DOI": "10.1145/1390156.1390294", "CorpusId": 207168299 }, "url": "https://www.semanticscholar.org/paper/843959ffdccf31c6694d135fad07425924f785b1", "referenceCount": 30, "citationCount": 7124, "influentialCitationCount": 470, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Acceleration of stochastic approximation by averaging", "abstract": "A new recursive algorithm of stochastic approximation type with the averaging of trajectories is investigated. Convergence with probability one is proved for a variety of classical optimization and identification problems. It is also demonstrated for these problems that the proposed algorithm achieves the highest possible rate of convergence.", "year": 1992, "venue": "", "authors": [ "Boris Polyak", "A. Juditsky" ], "externalIds": { "MAG": "2086161653", "DOI": "10.1137/0330046", "CorpusId": 3548228 }, "url": "https://www.semanticscholar.org/paper/6dc61f37ecc552413606d8c89ffbc46ec98ed887", "referenceCount": 34, "citationCount": 1981, "influentialCitationCount": 228, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "AUTO-ENCODING VARIATIONAL BAYES", "abstract": "To make decisions based on a model fit by Auto-Encoding Variational Bayes (AEVB), practitioners typically use importance sampling to estimate a functional of the posterior distribution. The variational distribution found by AEVB serves as the proposal distribution for importance sampling. However, this proposal distribution may give unreliable (high variance) importance sampling estimates, thus leading to poor decisions. We explore how changing the objective function for learning the variational distribution, while continuing to learn the generative model based on the ELBO, affects the quality of downstream decisions. For a particular model, we characterize the error of importance sampling as a function of posterior variance and show that proposal distributions learned with evidence upper bounds are better. Motivated by these theoretical results, we propose a novel variant of the VAE. In addition to experimenting with MNIST, we present a full-fledged application of the proposed method to single-cell RNA sequencing. In this challenging instance of multiple hypothesis testing, the proposed method surpasses the current state of the art.", "year": 2020, "venue": "", "authors": [ "Romain Lopez", "Pierre Boyeau", "N. Yosef", "Michael I. Jordan", "J. Regier" ], "externalIds": { "CorpusId": 211146177 }, "url": "https://www.semanticscholar.org/paper/ef4f5a50837a7c1b3e87b9300ffc7ba00d461a0f", "referenceCount": 53, "citationCount": 11952, "influentialCitationCount": 1706, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "The open images dataset v4: Unified image classification, object detection, and visual relationship detection at scale", "abstract": null, "year": 2020, "venue": "IJCV", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding", "abstract": "We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models (Peters et al., 2018a; Radford et al., 2018), BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5 (7.7 point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).", "year": 2019, "venue": "North American Chapter of the Association for Computational Linguistics", "authors": [ "Jacob Devlin", "Ming-Wei Chang", "Kenton Lee", "Kristina Toutanova" ], "externalIds": { "MAG": "2951055169", "ACL": "N19-1423", "DBLP": "journals/corr/abs-1810-04805", "ArXiv": "1810.04805", "DOI": "10.18653/v1/N19-1423", "CorpusId": 52967399 }, "url": "https://www.semanticscholar.org/paper/df2b0e26d0599ce3e70df8a9da02e51594e0e992", "referenceCount": 63, "citationCount": 81678, "influentialCitationCount": 19056, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "GENERATIVE ADVERSARIAL NETS", "abstract": "Estimating individualized treatment effects (ITE) is a challenging task due to the need for an individual’s potential outcomes to be learned from biased data and without having access to the counterfactuals. We propose a novel method for inferring ITE based on the Generative Adversarial Nets (GANs) framework. Our method, termed Generative Adversarial Nets for inference of Individualized Treatment Effects (GANITE), is motivated by the possibility that we can capture the uncertainty in the counterfactual distributions by attempting to learn them using a GAN. We generate proxies of the counterfactual outcomes using a counterfactual generator, G, and then pass these proxies to an ITE generator, I, in order to train it. By modeling both of these using the GAN framework, we are able to infer based on the factual data, while still accounting for the unseen counterfactuals. We test our method on three real-world datasets (with both binary and multiple treatments) and show that GANITE outperforms state-of-the-art methods.", "year": 2018, "venue": "", "authors": [ "Individualized Treat", "Jinsung Yoon" ], "externalIds": { "CorpusId": 10319744 }, "url": "https://www.semanticscholar.org/paper/c68796f833a7151f0a63d1d1608dc902b4fdc9b6", "referenceCount": 24, "citationCount": 28002, "influentialCitationCount": 3321, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Supporting Online Material for Reducing the Dimensionality of Data with Neural Networks", "abstract": null, "year": 2006, "venue": "", "authors": [ "Geoffrey E. Hinton", "R. Salakhutdinov" ], "externalIds": { "MAG": "2338600138", "CorpusId": 262637400 }, "url": "https://www.semanticscholar.org/paper/7c59908c946a4157abc030cdbe2b63d08ba97db3", "referenceCount": 9, "citationCount": 9069, "influentialCitationCount": 490, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Predictor-corrector sampling for discrete diffusion models", "abstract": null, "year": null, "venue": "ICLR", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Stable diffusion - vae", "abstract": null, "year": null, "venue": "huggingface.co/stabilityai/ sd-vae-ft-ema", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "ASD-Diffusion: Anomalous Sound Detection with Diffusion Models": { "paper_title": "ASD-Diffusion: Anomalous Sound Detection with Diffusion Models", "arxiv_id": "2409.15957v1", "keyword": "diffusion model", "authors": [ "Fengrun Zhang", "Xiang Xie", "Kai Guo" ], "references": [ { "title": "Transformer-based autoencoder with ID constraint for unsupervised anomalous sound detection", "abstract": null, "year": 2023, "venue": "EURASIP Journal on Audio, Speech, and Music Processing", "authors": [ "Jian Guan", "Youde Liu", "Qiuqiang Kong", "Feiyang Xiao", "Qiaoxi Zhu", "Jiantong Tian", "Wenwu Wang" ], "externalIds": { "DBLP": "journals/ejasmp/GuanLKXZTW23", "ArXiv": "2310.08950", "DOI": "10.1186/s13636-023-00308-4", "CorpusId": 263912442 }, "url": "https://www.semanticscholar.org/paper/e10457f331b744ff1d9bc98b3be9554d9f386621", "referenceCount": 61, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Variational Classifier for Unsupervised Anomalous Sound Detection under Domain Generalization", "abstract": "Unsupervised anomalous sound detection typically involves us-ing a classifier with the last layer removed to extract embed-dings. After that the cosine distance between train and test embeddings as anomaly score is used. In this paper, we pro-pose a new idea which we call variational classifier that force the embeddings to follow a distribution imposed by design that can depend on the class of the input among other factors. To achieve this goal, in addition to the cross-entropy, we add to the loss function the KL divergence between these distributions and the one followed by the training embeddings. This enhances the ability of the system to differentiate between classes and it allows us to use sampling methods and to calculate the log-likelihood of a test embedding in the train embeddings distributions. We tested this proposal on the DCASE 2022 Task 2 dataset and observed improvements in both classification and unsupervised anomaly detection, which is the primary task.", "year": 2023, "venue": "Interspeech", "authors": [ "Antonio Almudévar", "Alfonso Ortega", "Luis Vicente", "A. Miguel", "EDUARDO LLEIDA SOLANO" ], "externalIds": { "DBLP": "conf/interspeech/Almudevar0VML23", "DOI": "10.21437/interspeech.2023-1965", "CorpusId": 263780922 }, "url": "https://www.semanticscholar.org/paper/a07379ca1717c16fbe9c505f6435472aa99bcd0c", "referenceCount": 22, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Description and Discussion on DCASE 2023 Challenge Task 2: First-Shot Unsupervised Anomalous Sound Detection for Machine Condition Monitoring", "abstract": "We present the task description of the Detection and Classification of Acoustic Scenes and Events (DCASE) 2023 Challenge Task 2: ``First-shot unsupervised anomalous sound detection (ASD) for machine condition monitoring''. The main goal is to enable rapid deployment of ASD systems for new kinds of machines without the need for hyperparameter tuning. In the past ASD tasks, developed methods tuned hyperparameters for each machine type, as the development and evaluation datasets had the same machine types. However, collecting normal and anomalous data as the development dataset can be infeasible in practice. In 2023 Task 2, we focus on solving the first-shot problem, which is the challenge of training a model on a completely novel machine type. Specifically, (i) each machine type has only one section (a subset of machine type) and (ii) machine types in the development and evaluation datasets are completely different. Analysis of 86 submissions from 23 teams revealed that the keys to outperform baselines were: 1) sampling techniques for dealing with class imbalances across different domains and attributes, 2) generation of synthetic samples for robust detection, and 3) use of multiple large pre-trained models to extract meaningful embeddings for the anomaly detector.", "year": 2023, "venue": "arXiv.org", "authors": [ "Kota Dohi", "Keisuke Imoto", "Noboru Harada", "Daisuke Niizumi", "Yuma Koizumi", "Tomoya Nishida", "Harsh Purohit", "Ryo Tanabe", "Takashi Endo", "Y. Kawaguchi" ], "externalIds": { "ArXiv": "2305.07828", "DBLP": "journals/corr/abs-2305-07828", "DOI": "10.48550/arXiv.2305.07828", "CorpusId": 258685340 }, "url": "https://www.semanticscholar.org/paper/21abcfc622c603d35a2b13a860bd1b38851abc33", "referenceCount": 22, "citationCount": 23, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Anomalous Sound Detection Using Audio Representation with Machine ID Based Contrastive Learning Pretraining", "abstract": "Existing contrastive learning methods for anomalous sound detection refine the audio representation of each audio sample by using the contrast between the samples’ augmentations (e.g., with time or frequency masking). However, they might be biased by the augmented data, due to the lack of physical properties of machine sound, thereby limiting the detection performance. This paper uses contrastive learning to refine audio representations for each machine ID, rather than for each audio sample. The proposed two-stage method uses contrastive learning to pretrain the audio representation model by incorporating machine ID and a self-supervised ID classifier to fine-tune the learnt model, while enhancing the relation between audio features from the same ID. Experiments show that our method outperforms the state-of-the-art methods using contrastive learning or self-supervised classification in overall anomaly detection performance and stability on DCASE 2020 Challenge Task2 dataset.", "year": 2023, "venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", "authors": [ "Jian Guan", "Feiyang Xiao", "Youde Liu", "Qiaoxi Zhu", "Wenwu Wang" ], "externalIds": { "DBLP": "conf/icassp/GuanXLZW23", "ArXiv": "2304.03588", "DOI": "10.1109/ICASSP49357.2023.10096054", "CorpusId": 258041223 }, "url": "https://www.semanticscholar.org/paper/1a06e5e4fd103f45e1ae6171396e07361f505fa0", "referenceCount": 20, "citationCount": 10, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Unsupervised Anomaly Detection and Localization of Machine Audio: A Gan-Based Approach", "abstract": "Automatic detection of machine anomaly remains challenging for machine learning. We believe the capability of generative adversarial network (GAN) suits the need of machine audio anomaly detection, yet rarely has this been investigated by previous work. In this paper, we propose AEGAN-AD, a totally unsupervised approach in which the generator (also an autoencoder) is trained to reconstruct input spectrograms. It is pointed out that the denoising nature of reconstruction deprecates its capacity. Thus, the discriminator is redesigned to aid the generator during both training stage and detection stage. The performance of AEGAN-AD on the dataset of DCASE 2022 Challenge TASK 2 demonstrates the state-of-the-art result on five machine types. A novel anomaly localization method is also investigated. Source code available at: www.github.com/jianganbai/AEGAN-AD", "year": 2023, "venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", "authors": [ "Anbai Jiang", "Weiqiang Zhang", "Yufeng Deng", "Pingyi Fan", "Jia Liu" ], "externalIds": { "DBLP": "journals/corr/abs-2303-17949", "ArXiv": "2303.17949", "DOI": "10.1109/ICASSP49357.2023.10096813", "CorpusId": 257901121 }, "url": "https://www.semanticscholar.org/paper/06f16a20ef6fc091fcf197f1127188ecf7453b32", "referenceCount": 20, "citationCount": 10, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Your Diffusion Model is Secretly a Zero-Shot Classifier", "abstract": "The recent wave of large-scale text-to-image diffusion models has dramatically increased our text-based image generation abilities. These models can generate realistic images for a staggering variety of prompts and exhibit impressive compositional generalization abilities. Almost all use cases thus far have solely focused on sampling; however, diffusion models can also provide conditional density estimates, which are useful for tasks beyond image generation. In this paper, we show that the density estimates from large-scale text-to-image diffusion models like Stable Diffusion can be leveraged to perform zero-shot classification without any additional training. Our generative approach to classification, which we call Diffusion Classifier, attains strong results on a variety of benchmarks and outperforms alternative methods of extracting knowledge from diffusion models. Although a gap remains between generative and discriminative approaches on zero-shot recognition tasks, our diffusion-based approach has stronger multimodal compositional reasoning abilities than competing discriminative approaches. Finally, we use Diffusion Classifier to extract standard classifiers from class-conditional diffusion models trained on ImageNet. These models approach the performance of SOTA discriminative classifiers and exhibit strong \"effective robustness\" to distribution shift. Overall, our results are a step toward using generative over discriminative models for downstream tasks. Results and visualizations on our website: diffusion-classifier.github.io/", "year": 2023, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Alexander C. Li", "Mihir Prabhudesai", "Shivam Duggal", "Ellis L Brown", "Deepak Pathak" ], "externalIds": { "DBLP": "conf/iccv/LiPDBP23", "ArXiv": "2303.16203", "DOI": "10.1109/ICCV51070.2023.00210", "CorpusId": 257771787 }, "url": "https://www.semanticscholar.org/paper/4702d5a163477c734a54f3ed2d171dca1504eaae", "referenceCount": 90, "citationCount": 129, "influentialCitationCount": 15, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "First-Shot Anomaly Sound Detection for Machine Condition Monitoring: A Domain Generalization Baseline", "abstract": "This paper provides a baseline system for First-shot-compliant unsupervised anomaly detection (ASD) for machine condition monitoring. First-shot ASD does not allow systems to do machine-type dependent hyperparameter tuning or tool ensembling based on the performance metric calculated with the grand truth. To show benchmark performance for First-shot ASD, this paper proposes an anomaly sound detection system that works on the domain generalization task in the Detection and Classification of Acoustic Scenes and Events (DCASE) 2022 Challenge Task 2: “Unsupervised Anomalous Sound Detection for Machine Condition Monitoring Applying Domain Generalization Technique” while complying with the First-shot requirements introduced in the DCASE 2023 Challenge Task 2 (DCASE2023T2). A simple autoencoder-based implementation combined with a selective Mahalanobis metric is implemented as a baseline system. The performance evaluation is conducted to set the target benchmark for the forthcoming DCASE2023T2. The source code of the baseline system has been made available on GitHub.", "year": 2023, "venue": "European Signal Processing Conference", "authors": [ "Noboru Harada", "Daisuke Niizumi", "Yasunori Ohishi", "Daiki Takeuchi", "Masahiro Yasuda" ], "externalIds": { "DBLP": "journals/corr/abs-2303-00455", "ArXiv": "2303.00455", "DOI": "10.23919/EUSIPCO58844.2023.10289721", "CorpusId": 257255574 }, "url": "https://www.semanticscholar.org/paper/3e3fea1f10f6dc34e3caddd1e138092860194e7d", "referenceCount": 16, "citationCount": 16, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Scalable Adaptive Computation for Iterative Generation", "abstract": "Natural data is redundant yet predominant architectures tile computation uniformly across their input and output space. We propose the Recurrent Interface Networks (RINs), an attention-based architecture that decouples its core computation from the dimensionality of the data, enabling adaptive computation for more scalable generation of high-dimensional data. RINs focus the bulk of computation (i.e. global self-attention) on a set of latent tokens, using cross-attention to read and write (i.e. route) information between latent and data tokens. Stacking RIN blocks allows bottom-up (data to latent) and top-down (latent to data) feedback, leading to deeper and more expressive routing. While this routing introduces challenges, this is less problematic in recurrent computation settings where the task (and routing problem) changes gradually, such as iterative generation with diffusion models. We show how to leverage recurrence by conditioning the latent tokens at each forward pass of the reverse diffusion process with those from prior computation, i.e. latent self-conditioning. RINs yield state-of-the-art pixel diffusion models for image and video generation, scaling to 1024X1024 images without cascades or guidance, while being domain-agnostic and up to 10X more efficient than 2D and 3D U-Nets.", "year": 2022, "venue": "International Conference on Machine Learning", "authors": [ "A. Jabri", "David J. Fleet", "Ting Chen" ], "externalIds": { "ArXiv": "2212.11972", "DBLP": "conf/icml/JabriFC23", "DOI": "10.48550/arXiv.2212.11972", "CorpusId": 254974283 }, "url": "https://www.semanticscholar.org/paper/7acc71fad70c4c65203739f156bcb440587df901", "referenceCount": 63, "citationCount": 77, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "AnoDDPM: Anomaly Detection with Denoising Diffusion Probabilistic Models using Simplex Noise", "abstract": "Generative models have been shown to provide a powerful mechanism for anomaly detection by learning to model healthy or normal reference data which can subsequently be used as a baseline for scoring anomalies. In this work we consider denoising diffusion probabilistic models (DDPMs) for unsupervised anomaly detection. DDPMs have superior mode coverage over generative adversarial networks (GANs) and higher sample quality than variational autoencoders (VAEs). However, this comes at the expense of poor scalability and increased sampling times due to the long Markov chain sequences required. We observe that within reconstruction-based anomaly detection a full-length Markov chain diffusion is not required. This leads us to develop a novel partial diffusion anomaly detection strategy that scales to high-resolution imagery, named AnoDDPM. A secondary problem is that Gaussian diffusion fails to capture larger anomalies; therefore we develop a multi-scale simplex noise diffusion process that gives control over the target anomaly size. AnoDDPM with simplex noise is shown to significantly outperform both f-AnoGAN and Gaussian diffusion for the tumorous dataset of 22 T1-weighted MRI scans (CCBS Edinburgh) qualitatively and quantitatively (improvement of +25.5% Sørensen–Dice coefficient, +17.6% IoU and +7.4% AUC).", "year": 2022, "venue": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "authors": [ "Julian Wyatt", "Adam Leach", "Sebastian M. Schmon", "Chris G. Willcocks" ], "externalIds": { "DBLP": "conf/cvpr/WyattLSW22", "DOI": "10.1109/CVPRW56347.2022.00080", "CorpusId": 251020113 }, "url": "https://www.semanticscholar.org/paper/20f6fce7726e7b3ab4ca45ef40d92b79f093f825", "referenceCount": 30, "citationCount": 178, "influentialCitationCount": 25, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MIMII DG: Sound Dataset for Malfunctioning Industrial Machine Investigation and Inspection for Domain Generalization Task", "abstract": "We present a machine sound dataset to benchmark domain generalization techniques for anomalous sound detection (ASD). Domain shifts are differences in data distributions that can degrade the detection performance, and handling them is a major issue for the application of ASD systems. While currently available datasets for ASD tasks assume that occurrences of domain shifts are known, in practice, they can be difficult to detect. To handle such domain shifts, domain generalization techniques that perform well regardless of the domains should be investigated. In this paper, we present the first ASD dataset for the domain generalization techniques, called MIMII DG. The dataset consists of five machine types and three domain shift scenarios for each machine type. The dataset is dedicated to the domain generalization task with features such as multiple different values for parameters that cause domain shifts and introduction of domain shifts that can be difficult to detect, such as shifts in the background noise. Experimental results using two baseline systems indicate that the dataset reproduces domain shift scenarios and is useful for benchmarking domain generalization techniques.", "year": 2022, "venue": "Workshop on Detection and Classification of Acoustic Scenes and Events", "authors": [ "Kota Dohi", "Tomoya Nishida", "Harsh Purohit", "Ryo Tanabe", "Takashi Endo", "Masaaki Yamamoto", "Yuki Nikaido", "Y. Kawaguchi" ], "externalIds": { "ArXiv": "2205.13879", "DBLP": "journals/corr/abs-2205-13879", "DOI": "10.48550/arXiv.2205.13879", "CorpusId": 249151977 }, "url": "https://www.semanticscholar.org/paper/1d2c4b783fe25b77a7ab0b7c785bc630c0c1f6c7", "referenceCount": 24, "citationCount": 47, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Self-Supervised Acoustic Anomaly Detection Via Contrastive Learning", "abstract": "We propose an acoustic anomaly detection algorithm based on the framework of contrastive learning. Contrastive learning is a recently proposed self-supervised approach that has shown promising results in image classification and speech recognition. However, its application in anomaly detection is underexplored. Earlier studies have demonstrated that it can achieve state-of-the-art performance in image anomaly detection, but its capability in anomalous sound detection is yet to be investigated. For the first time, we propose a contrastive learning-based framework that is suitable for acoustic anomaly detection. Since most existing contrastive learning approaches are targeted toward images, the effect of other data transformations on the performance of the algorithm is unknown. Our framework learns a representation from unlabeled data by applying audio-specific data augmentations. We show that in the resulting latent space, normal and abnormal points are distinguishable. Experiments conducted on the MIMII dataset confirm that our approach can outperform competing methods in detecting anomalies.", "year": 2021, "venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", "authors": [ "H. Hojjati", "N. Armanfard" ], "externalIds": { "DBLP": "conf/icassp/HojjatiA22", "DOI": "10.36227/techrxiv.16828363", "CorpusId": 239532031 }, "url": "https://www.semanticscholar.org/paper/c35b9a59851c55e680ed46a315f7539eb7b7025b", "referenceCount": 0, "citationCount": 27, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ToyADMOS2: Another Dataset of Miniature-Machine Operating Sounds for Anomalous Sound Detection under Domain Shift Conditions", "abstract": "This paper proposes a new large-scale dataset called\"ToyADMOS2\"for anomaly detection in machine operating sounds (ADMOS). As did for our previous ToyADMOS dataset, we collected a large number of operating sounds of miniature machines (toys) under normal and anomaly conditions by deliberately damaging them but extended with providing controlled depth of damages in anomaly samples. Since typical application scenarios of ADMOS often require robust performance under domain-shift conditions, the ToyADMOS2 dataset is designed for evaluating systems under such conditions. The released dataset consists of two sub-datasets for machine-condition inspection: fault diagnosis of machines with geometrically fixed tasks and fault diagnosis of machines with moving tasks. Domain shifts are represented by introducing several differences in operating conditions, such as the use of the same machine type but with different machine models and parts configurations, different operating speeds, microphone arrangements, etc. Each sub-dataset contains over 27 k samples of normal machine-operating sounds and over 8 k samples of anomalous sounds recorded with five to eight microphones. The dataset is freely available for download at https://github.com/nttcslab/ToyADMOS2-dataset and https://doi.org/10.5281/zenodo.4580270.", "year": 2021, "venue": "Workshop on Detection and Classification of Acoustic Scenes and Events", "authors": [ "N. Harada", "Daisuke Niizumi", "Daiki Takeuchi", "Yasunori Ohishi", "Masahiro Yasuda", "Shoichiro Saito" ], "externalIds": { "DBLP": "conf/dcase/HaradaNTOYS21", "ArXiv": "2106.02369", "CorpusId": 235352630 }, "url": "https://www.semanticscholar.org/paper/42833ffa1ca9ae23d1cccdc8d1d4a31d5ce793f5", "referenceCount": 31, "citationCount": 75, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Engineering", "Computer Science" ] }, { "title": "Diffusion Models Beat GANs on Image Synthesis", "abstract": "We show that diffusion models can achieve image sample quality superior to the current state-of-the-art generative models. We achieve this on unconditional image synthesis by finding a better architecture through a series of ablations. For conditional image synthesis, we further improve sample quality with classifier guidance: a simple, compute-efficient method for trading off diversity for fidelity using gradients from a classifier. We achieve an FID of 2.97 on ImageNet 128$\\times$128, 4.59 on ImageNet 256$\\times$256, and 7.72 on ImageNet 512$\\times$512, and we match BigGAN-deep even with as few as 25 forward passes per sample, all while maintaining better coverage of the distribution. Finally, we find that classifier guidance combines well with upsampling diffusion models, further improving FID to 3.94 on ImageNet 256$\\times$256 and 3.85 on ImageNet 512$\\times$512. We release our code at https://github.com/openai/guided-diffusion", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Prafulla Dhariwal", "Alex Nichol" ], "externalIds": { "ArXiv": "2105.05233", "DBLP": "journals/corr/abs-2105-05233", "CorpusId": 234357997 }, "url": "https://www.semanticscholar.org/paper/64ea8f180d0682e6c18d1eb688afdb2027c02794", "referenceCount": 81, "citationCount": 5177, "influentialCitationCount": 661, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Flow-Based Self-Supervised Density Estimation for Anomalous Sound Detection", "abstract": "To develop a machine sound monitoring system, a method for detecting anomalous sound is proposed. Exact likelihood estimation using Normalizing Flows is a promising technique for unsupervised anomaly detection, but it can fail at out-of-distribution detection since the likelihood is affected by the smoothness of the data. To improve the detection performance, we train the model to assign higher likelihood to target machine sounds and lower likelihood to sounds from other machines of the same machine type. We demonstrate that this enables the model to incorporate a self-supervised classification-based approach. Experiments conducted using the DCASE 2020 Challenge Task2 dataset showed that the proposed method improves the AUC by 4.6% on average when using Masked Autoregressive Flow (MAF) and by 5.8% when using Glow, which is a significant improvement over the previous method.", "year": 2021, "venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", "authors": [ "Kota Dohi", "Takashi Endo", "Harsh Purohit", "Ryo Tanabe", "Y. Kawaguchi" ], "externalIds": { "DBLP": "conf/icassp/DohiEPTK21", "ArXiv": "2103.08801", "DOI": "10.1109/ICASSP39728.2021.9414662", "CorpusId": 232240238 }, "url": "https://www.semanticscholar.org/paper/a3a099559665abbbb15299161d5dd9c57ca83f03", "referenceCount": 23, "citationCount": 51, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "Improved Denoising Diffusion Probabilistic Models", "abstract": "Denoising diffusion probabilistic models (DDPM) are a class of generative models which have recently been shown to produce excellent samples. We show that with a few simple modifications, DDPMs can also achieve competitive log-likelihoods while maintaining high sample quality. Additionally, we find that learning variances of the reverse diffusion process allows sampling with an order of magnitude fewer forward passes with a negligible difference in sample quality, which is important for the practical deployment of these models. We additionally use precision and recall to compare how well DDPMs and GANs cover the target distribution. Finally, we show that the sample quality and likelihood of these models scale smoothly with model capacity and training compute, making them easily scalable. We release our code at https://github.com/openai/improved-diffusion", "year": 2021, "venue": "International Conference on Machine Learning", "authors": [ "Alex Nichol", "Prafulla Dhariwal" ], "externalIds": { "ArXiv": "2102.09672", "DBLP": "conf/icml/NicholD21", "CorpusId": 231979499 }, "url": "https://www.semanticscholar.org/paper/de18baa4964804cf471d85a5a090498242d2e79f", "referenceCount": 47, "citationCount": 2547, "influentialCitationCount": 282, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Denoising Diffusion Implicit Models", "abstract": "Denoising diffusion probabilistic models (DDPMs) have achieved high quality image generation without adversarial training, yet they require simulating a Markov chain for many steps to produce a sample. To accelerate sampling, we present denoising diffusion implicit models (DDIMs), a more efficient class of iterative implicit probabilistic models with the same training procedure as DDPMs. In DDPMs, the generative process is defined as the reverse of a Markovian diffusion process. We construct a class of non-Markovian diffusion processes that lead to the same training objective, but whose reverse process can be much faster to sample from. We empirically demonstrate that DDIMs can produce high quality samples $10 \\times$ to $50 \\times$ faster in terms of wall-clock time compared to DDPMs, allow us to trade off computation for sample quality, and can perform semantically meaningful image interpolation directly in the latent space.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Jiaming Song", "Chenlin Meng", "Stefano Ermon" ], "externalIds": { "ArXiv": "2010.02502", "DBLP": "journals/corr/abs-2010-02502", "MAG": "3092442149", "CorpusId": 222140788 }, "url": "https://www.semanticscholar.org/paper/014576b866078524286802b1d0e18628520aa886", "referenceCount": 47, "citationCount": 4422, "influentialCitationCount": 877, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Denoising Diffusion Probabilistic Models", "abstract": "We present high quality image synthesis results using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics. Our best results are obtained by training on a weighted variational bound designed according to a novel connection between diffusion probabilistic models and denoising score matching with Langevin dynamics, and our models naturally admit a progressive lossy decompression scheme that can be interpreted as a generalization of autoregressive decoding. On the unconditional CIFAR10 dataset, we obtain an Inception score of 9.46 and a state-of-the-art FID score of 3.17. On 256x256 LSUN, we obtain sample quality similar to ProgressiveGAN. Our implementation is available at this https URL", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Jonathan Ho", "Ajay Jain", "P. Abbeel" ], "externalIds": { "ArXiv": "2006.11239", "MAG": "3100572490", "DBLP": "journals/corr/abs-2006-11239", "CorpusId": 219955663 }, "url": "https://www.semanticscholar.org/paper/5c126ae3421f05768d8edd97ecd44b1364e2c99a", "referenceCount": 73, "citationCount": 10778, "influentialCitationCount": 2337, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Description and Discussion on DCASE2020 Challenge Task2: Unsupervised Anomalous Sound Detection for Machine Condition Monitoring", "abstract": "In this paper, we present the task description and discuss the results of the DCASE 2020 Challenge Task 2: Unsupervised Detection of Anomalous Sounds for Machine Condition Monitoring. The goal of anomalous sound detection (ASD) is to identify whether the sound emitted from a target machine is normal or anomalous. The main challenge of this task is to detect unknown anomalous sounds under the condition that only normal sound samples have been provided as training data. We have designed this challenge as the first benchmark of ASD research, which includes a large-scale dataset, evaluation metrics, and a simple baseline system. We received 117 submissions from 40 teams, and several novel approaches have been developed as a result of this challenge. On the basis of the analysis of the evaluation results, we discuss two new approaches and their problems.", "year": 2020, "venue": "Workshop on Detection and Classification of Acoustic Scenes and Events", "authors": [ "Yuma Koizumi", "Y. Kawaguchi", "Keisuke Imoto", "Toshiki Nakamura", "Yuki Nikaido", "Ryo Tanabe", "Harsh Purohit", "Kaori Suefusa", "Takashi Endo", "Masahiro Yasuda", "N. Harada" ], "externalIds": { "DBLP": "journals/corr/abs-2006-05822", "MAG": "3034883497", "ArXiv": "2006.05822", "CorpusId": 219559355 }, "url": "https://www.semanticscholar.org/paper/31d2fdf79eccd83f5eef8840bfd93530051252d8", "referenceCount": 42, "citationCount": 161, "influentialCitationCount": 23, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "Adam: A Method for Stochastic Optimization", "abstract": "We introduce Adam, an algorithm for first-order gradient-based optimization of stochastic objective functions, based on adaptive estimates of lower-order moments. The method is straightforward to implement, is computationally efficient, has little memory requirements, is invariant to diagonal rescaling of the gradients, and is well suited for problems that are large in terms of data and/or parameters. The method is also appropriate for non-stationary objectives and problems with very noisy and/or sparse gradients. The hyper-parameters have intuitive interpretations and typically require little tuning. Some connections to related algorithms, on which Adam was inspired, are discussed. We also analyze the theoretical convergence properties of the algorithm and provide a regret bound on the convergence rate that is comparable to the best known results under the online convex optimization framework. Empirical results demonstrate that Adam works well in practice and compares favorably to other stochastic optimization methods. Finally, we discuss AdaMax, a variant of Adam based on the infinity norm.", "year": 2014, "venue": "International Conference on Learning Representations", "authors": [ "Diederik P. Kingma", "Jimmy Ba" ], "externalIds": { "MAG": "2964121744", "DBLP": "journals/corr/KingmaB14", "ArXiv": "1412.6980", "CorpusId": 6628106 }, "url": "https://www.semanticscholar.org/paper/a6cb366736791bcccc5c8639de5a8f9636bf87e8", "referenceCount": 26, "citationCount": 139990, "influentialCitationCount": 22063, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Anomalous sound detection based on self-supervised learning", "abstract": null, "year": 2023, "venue": "Tech. rep. DCASE2023 Challenge", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Unsupervised abnormal sound detection based on machine condition mixup", "abstract": null, "year": 2023, "venue": "Tech. rep.", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "DiffusionAD: Denoising Diffusion for Anomaly Detection", "abstract": "Anomaly detection is widely applied due to its remarkable effectiveness and efficiency in meeting the needs of real-world industrial manufacturing. We introduce a new pipeline, DiffusionAD, to anomaly detection. We frame anomaly detection as a “noise-to-norm” paradigm, in which anomalies are identified as inconsistencies between a query image and its flawless approximation. Our pipeline achieves this by restoring the anomalous regions from the noisy corrupted query image while keeping the normal regions unchanged. DiffusionAD includes a denoising sub-network and a segmentation sub-network, which work together to provide intuitive anomaly detection and localization in an end-to-end manner, without the need for complicated post-processing steps. Remarkably, during inference, this framework delivers satisfactory performance with just one diffusion reverse process step, which is tens to hundreds of times faster than general diffusion methods. Extensive evaluations on standard and challenging benchmarks including VisA and DAGM show that DiffusionAD outperforms current state-of-the-art paradigms, demonstrating the effectiveness and generalizability of the proposed pipeline.", "year": 2023, "venue": "arXiv.org", "authors": [ "H. Zhang", "Z. Wang", "Zuxuan Wu", "Yuwei Jiang" ], "externalIds": { "DBLP": "journals/corr/abs-2303-08730", "DOI": "10.48550/arXiv.2303.08730", "CorpusId": 257532414 }, "url": "https://www.semanticscholar.org/paper/55d49aa6bd3e6605c6510a147c1fb5bdd7af0b12", "referenceCount": 62, "citationCount": 17, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Ensemble of auto-encoder based systems for anomaly detection", "abstract": null, "year": 2020, "venue": "Tech. rep.", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "ASD-Diffusion", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "TFG: Unified Training-Free Guidance for Diffusion Models": { "paper_title": "TFG: Unified Training-Free Guidance for Diffusion Models", "arxiv_id": "2409.15761v1", "keyword": "diffusion model", "authors": [ "Haotian Ye", "Haowei Lin", "Jiaqi Han", "Minkai Xu", "Sheng Liu", "Yitao Liang", "Jianzhu Ma", "James Zou", "Stefano Ermon" ], "references": [ { "title": "Generalizing Alignment Paradigm of Text-to-Image Generation with Preferences through $f$-divergence Minimization", "abstract": "Direct Preference Optimization (DPO) has recently expanded its successful application from aligning large language models (LLMs) to aligning text-to-image models with human preferences, which has generated considerable interest within the community. However, we have observed that these approaches rely solely on minimizing the reverse Kullback-Leibler divergence during alignment process between the fine-tuned model and the reference model, neglecting the incorporation of other divergence constraints. In this study, we focus on extending reverse Kullback-Leibler divergence in the alignment paradigm of text-to-image models to $f$-divergence, which aims to garner better alignment performance as well as good generation diversity. We provide the generalized formula of the alignment paradigm under the $f$-divergence condition and thoroughly analyze the impact of different divergence constraints on alignment process from the perspective of gradient fields. We conduct comprehensive evaluation on image-text alignment performance, human value alignment performance and generation diversity performance under different divergence constraints, and the results indicate that alignment based on Jensen-Shannon divergence achieves the best trade-off among them. The option of divergence employed for aligning text-to-image models significantly impacts the trade-off between alignment performance (especially human value alignment) and generation diversity, which highlights the necessity of selecting an appropriate divergence for practical applications.", "year": 2024, "venue": "", "authors": [ "Haoyuan Sun", "Bo Xia", "Yongzhe Chang", "Xueqian Wang" ], "externalIds": { "ArXiv": "2409.09774", "CorpusId": 272690234 }, "url": "https://www.semanticscholar.org/paper/809db82227bcc74322840c2b16851ec23da91006", "referenceCount": 71, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Accurate structure prediction of biomolecular interactions with AlphaFold 3", "abstract": null, "year": 2024, "venue": "Nature", "authors": [ "Josh Abramson", "Jonas Adler", "Jack Dunger", "Richard Evans", "Tim Green", "A. Pritzel", "Olaf Ronneberger", "Lindsay Willmore", "Andrew J Ballard", "Joshua Bambrick", "Sebastian W Bodenstein", "David A Evans", "Chia-Chun Hung", "Michael O’Neill", "D. Reiman", "Kathryn Tunyasuvunakool", "Zachary Wu", "Akvilė Žemgulytė", "Eirini Arvaniti", "Charles Beattie", "Ottavia Bertolli", "Alex Bridgland", "Alexey Cherepanov", "Miles Congreve", "A. Cowen-Rivers", "Andrew Cowie", "Michael Figurnov", "Fabian B Fuchs", "Hannah Gladman", "Rishub Jain", "Yousuf A. Khan", "Caroline M R Low", "Kuba Perlin", "Anna Potapenko", "Pascal Savy", "Sukhdeep Singh", "A. Stecula", "Ashok Thillaisundaram", "Catherine Tong", "Sergei Yakneen", "Ellen D. Zhong", "Michal Zielinski", "Augustin Žídek", "V. Bapst", "Pushmeet Kohli", "Max Jaderberg", "D. Hassabis", "J. Jumper" ], "externalIds": { "PubMedCentral": "11168924", "DOI": "10.1038/s41586-024-07487-w", "CorpusId": 269633210, "PubMed": "38718835" }, "url": "https://www.semanticscholar.org/paper/7572ba7f604ef95d7acdd657ebac458106bd35df", "referenceCount": 67, "citationCount": 557, "influentialCitationCount": 21, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Diffusion Models for Audio Restoration", "abstract": "With the development of audio playback devices and fast data transmission, the demand for high sound quality is rising for both entertainment and communications. In this quest for better sound quality, challenges emerge from distortions and interferences originating at the recording side or caused by an imperfect transmission pipeline. To address this problem, audio restoration methods aim to recover clean sound signals from the corrupted input data. We present here audio restoration algorithms based on diffusion models, with a focus on speech enhancement and music restoration tasks. Traditional approaches, often grounded in handcrafted rules and statistical heuristics, have shaped our understanding of audio signals. In the past decades, there has been a notable shift towards data-driven methods that exploit the modeling capabilities of DNNs. Deep generative models, and among them diffusion models, have emerged as powerful techniques for learning complex data distributions. However, relying solely on DNN-based learning approaches carries the risk of reducing interpretability, particularly when employing end-to-end models. Nonetheless, data-driven approaches allow more flexibility in comparison to statistical model-based frameworks, whose performance depends on distributional and statistical assumptions that can be difficult to guarantee. Here, we aim to show that diffusion models can combine the best of both worlds and offer the opportunity to design audio restoration algorithms with a good degree of interpretability and a remarkable performance in terms of sound quality. We explain the diffusion formalism and its application to the conditional generation of clean audio signals. We believe that diffusion models open an exciting field of research with the potential to spawn new audio restoration algorithms that are natural-sounding and remain robust in difficult acoustic situations.", "year": 2024, "venue": "arXiv.org", "authors": [ "Jean-Marie Lemercier", "Julius Richter", "Simon Welker", "Eloi Moliner", "V. Välimäki", "Timo Gerkmann" ], "externalIds": { "ArXiv": "2402.09821", "DBLP": "journals/corr/abs-2402-09821", "DOI": "10.48550/arXiv.2402.09821", "CorpusId": 267681712 }, "url": "https://www.semanticscholar.org/paper/2befe6e08c33c72a62d779759076d2c7ab9f410a", "referenceCount": 36, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Motion Guidance: Diffusion-Based Image Editing with Differentiable Motion Estimators", "abstract": "Diffusion models are capable of generating impressive images conditioned on text descriptions, and extensions of these models allow users to edit images at a relatively coarse scale. However, the ability to precisely edit the layout, position, pose, and shape of objects in images with diffusion models is still difficult. To this end, we propose motion guidance, a zero-shot technique that allows a user to specify dense, complex motion fields that indicate where each pixel in an image should move. Motion guidance works by steering the diffusion sampling process with the gradients through an off-the-shelf optical flow network. Specifically, we design a guidance loss that encourages the sample to have the desired motion, as estimated by a flow network, while also being visually similar to the source image. By simultaneously sampling from a diffusion model and guiding the sample to have low guidance loss, we can obtain a motion-edited image. We demonstrate that our technique works on complex motions and produces high quality edits of real and generated images.", "year": 2024, "venue": "International Conference on Learning Representations", "authors": [ "Daniel Geng", "Andrew Owens" ], "externalIds": { "ArXiv": "2401.18085", "DBLP": "conf/iclr/GengO24", "DOI": "10.48550/arXiv.2401.18085", "CorpusId": 267334704 }, "url": "https://www.semanticscholar.org/paper/c27da349811cbbafd5896befb0bb138d87583873", "referenceCount": 42, "citationCount": 14, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A framework for conditional diffusion modelling with applications in motif scaffolding for protein design", "abstract": "Many protein design applications, such as binder or enzyme design, require scaffolding a structural motif with high precision. Generative modelling paradigms based on denoising diffusion processes emerged as a leading candidate to address this motif scaffolding problem and have shown early experimental success in some cases. In the diffusion paradigm, motif scaffolding is treated as a conditional generation task, and several conditional generation protocols were proposed or imported from the Computer Vision literature. However, most of these protocols are motivated heuristically, e.g. via analogies to Langevin dynamics, and lack a unifying framework, obscuring connections between the different approaches. In this work, we unify conditional training and conditional sampling procedures under one common framework based on the mathematically well-understood Doob's h-transform. This new perspective allows us to draw connections between existing methods and propose a new variation on existing conditional training protocols. We illustrate the effectiveness of this new protocol in both, image outpainting and motif scaffolding and find that it outperforms standard methods.", "year": 2023, "venue": "", "authors": [ "Kieran Didi", "Francisco Vargas", "Simon V. Mathis", "Vincent Dutordoir", "Emile Mathieu", "U. J. Komorowska", "Pietro Liò" ], "externalIds": { "ArXiv": "2312.09236", "CorpusId": 266209840 }, "url": "https://www.semanticscholar.org/paper/84cd9279669c056fa3c38fe90a06265ec7852e03", "referenceCount": 39, "citationCount": 7, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Biology" ] }, { "title": "Manifold Preserving Guided Diffusion", "abstract": "Despite the recent advancements, conditional image generation still faces challenges of cost, generalizability, and the need for task-specific training. In this paper, we propose Manifold Preserving Guided Diffusion (MPGD), a training-free conditional generation framework that leverages pretrained diffusion models and off-the-shelf neural networks with minimal additional inference cost for a broad range of tasks. Specifically, we leverage the manifold hypothesis to refine the guided diffusion steps and introduce a shortcut algorithm in the process. We then propose two methods for on-manifold training-free guidance using pre-trained autoencoders and demonstrate that our shortcut inherently preserves the manifolds when applied to latent diffusion models. Our experiments show that MPGD is efficient and effective for solving a variety of conditional generation applications in low-compute settings, and can consistently offer up to 3.8x speed-ups with the same number of diffusion steps while maintaining high sample quality compared to the baselines.", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Yutong He", "Naoki Murata", "Chieh-Hsin Lai", "Yuhta Takida", "Toshimitsu Uesaka", "Dongjun Kim", "Wei-Hsiang Liao", "Yuki Mitsufuji", "J. Z. Kolter", "Ruslan Salakhutdinov", "Stefano Ermon" ], "externalIds": { "DBLP": "conf/iclr/HeMLTUKLMKSE24", "ArXiv": "2311.16424", "DOI": "10.48550/arXiv.2311.16424", "CorpusId": 265466093 }, "url": "https://www.semanticscholar.org/paper/19f4d1533d8f7c4c13e9732c36fb374edd5ad470", "referenceCount": 55, "citationCount": 20, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Controllable Music Production with Diffusion Models and Guidance Gradients", "abstract": "We demonstrate how conditional generation from diffusion models can be used to tackle a variety of realistic tasks in the production of music in 44.1kHz stereo audio with sampling-time guidance. The scenarios we consider include continuation, inpainting and regeneration of musical audio, the creation of smooth transitions between two different music tracks, and the transfer of desired stylistic characteristics to existing audio clips. We achieve this by applying guidance at sampling time in a simple framework that supports both reconstruction and classification losses, or any combination of the two. This approach ensures that generated audio can match its surrounding context, or conform to a class distribution or latent representation specified relative to any suitable pre-trained classifier or embedding model. Audio samples are available at https://machinelearning.apple.com/research/controllable-music", "year": 2023, "venue": "arXiv.org", "authors": [ "Mark Levy", "Bruno Di Giorgi", "Floris Weers", "Angelos Katharopoulos", "Tom Nickson" ], "externalIds": { "DBLP": "journals/corr/abs-2311-00613", "ArXiv": "2311.00613", "DOI": "10.48550/arXiv.2311.00613", "CorpusId": 264833434 }, "url": "https://www.semanticscholar.org/paper/87e36f66b0c28ca7a327257ccb00282bdf7fe7d5", "referenceCount": 37, "citationCount": 12, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Steered Diffusion: A Generalized Framework for Plug-and-Play Conditional Image Synthesis", "abstract": "Conditional generative models typically demand large annotated training sets to achieve high-quality synthesis. As a result, there has been significant interest in designing models that perform plug-and-play generation, i.e., to use a predefined or pretrained model, which is not explicitly trained on the generative task, to guide the generative process (e.g., using language). However, such guidance is typically useful only towards synthesizing high-level semantics rather than editing fine-grained details as in image-to-image translation tasks. To this end, and capitalizing on the powerful fine-grained generative control offered by the recent diffusion-based generative models, we introduce Steered Diffusion, a generalized framework for photorealistic zero-shot conditional image generation using a diffusion model trained for unconditional generation. The key idea is to steer the image generation of the diffusion model at inference time via designing a loss using a pre-trained inverse model that characterizes the conditional task. This loss modulates the sampling trajectory of the diffusion process. Our framework allows for easy incorporation of multiple conditions during inference. We present experiments using steered diffusion on several tasks including inpainting, colorization, text-guided semantic editing, and image super-resolution. Our results demonstrate clear qualitative and quantitative improvements over state-of-the-art diffusion-based plug-and-play models while adding negligible additional computational cost.", "year": 2023, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Nithin Gopalakrishnan Nair", "A. Cherian", "Suhas Lohit", "Ye Wang", "T. Koike-Akino", "Vishal M. Patel", "Tim K. Marks" ], "externalIds": { "ArXiv": "2310.00224", "DBLP": "journals/corr/abs-2310-00224", "DOI": "10.1109/ICCV51070.2023.01906", "CorpusId": 263674911 }, "url": "https://www.semanticscholar.org/paper/fc9f15b5dd0feb7d5c7d0ab75774c9324a3ebe04", "referenceCount": 44, "citationCount": 12, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "VRDMG: Vocal Restoration via Diffusion Posterior Sampling with Multiple Guidance", "abstract": "Restoring degraded music signals is essential to enhance audio quality for downstream music manipulation. Recent diffusion-based music restoration methods have demonstrated impressive performance, and among them, diffusion posterior sampling (DPS) stands out given its intrinsic properties, making it versatile across various restoration tasks. In this paper, we identify that there are potential issues which will degrade current DPS-based methods’ performance and introduce the way to mitigate the issues inspired by diverse diffusion guidance techniques including the RePaint (RP) strategy and the Pseudoinverse-Guided Diffusion Models (ΠGDM). We demonstrate our methods for the vocal declipping and bandwidth extension tasks under various levels of distortion and cutoff frequency, respectively. In both tasks, our methods outperform the current DPS-based music restoration benchmarks. We refer to http://carlosholivan.github.io/demos/audio-restoration-2023.html for examples of the restored audio samples.", "year": 2023, "venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", "authors": [ "Carlos Hernandez-Olivan", "Koichi Saito", "Naoki Murata", "Chieh-Hsin Lai", "Marco A. Mart'inez-Ramirez", "Wei-Hsiang Liao", "Yuki Mitsufuji" ], "externalIds": { "DBLP": "conf/icassp/Hernandez-Olivan24", "ArXiv": "2309.06934", "DOI": "10.1109/ICASSP48485.2024.10446423", "CorpusId": 261705895 }, "url": "https://www.semanticscholar.org/paper/89953401cab837161178b1fe2a6c87447b126cbf", "referenceCount": 34, "citationCount": 3, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Engineering", "Computer Science" ] }, { "title": "De novo design of protein structure and function with RFdiffusion", "abstract": null, "year": 2023, "venue": "Nature", "authors": [ "Joseph L. Watson", "David Juergens", "N. Bennett", "Brian L. Trippe", "Jason Yim", "Helen E. Eisenach", "Woody Ahern", "Andrew J. Borst", "Robert J. Ragotte", "L. Milles", "B. Wicky", "Nikita Hanikel", "S. Pellock", "A. Courbet", "W. Sheffler", "Jue Wang", "Preetham Venkatesh", "Isaac Sappington", "Susana Vázquez Torres", "Anna Lauko", "Valentin De Bortoli", "Emile Mathieu", "Sergey Ovchinnikov", "R. Barzilay", "T. Jaakkola", "F. DiMaio", "M. Baek", "D. Baker" ], "externalIds": { "PubMedCentral": "10468394", "DOI": "10.1038/s41586-023-06415-8", "CorpusId": 271161349, "PubMed": "37433327" }, "url": "https://www.semanticscholar.org/paper/eb8c0df75993c1c19c51cde9345e45fc260f661c", "referenceCount": 61, "citationCount": 139, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Conditional Generation from Unconditional Diffusion Models using Denoiser Representations", "abstract": "Denoising diffusion models have gained popularity as a generative modeling technique for producing high-quality and diverse images. Applying these models to downstream tasks requires conditioning, which can take the form of text, class labels, or other forms of guidance. However, providing conditioning information to these models can be challenging, particularly when annotations are scarce or imprecise. In this paper, we propose adapting pre-trained unconditional diffusion models to new conditions using the learned internal representations of the denoiser network. We demonstrate the effectiveness of our approach on various conditional generation tasks, including attribute-conditioned generation and mask-conditioned generation. Additionally, we show that augmenting the Tiny ImageNet training set with synthetic images generated by our approach improves the classification accuracy of ResNet baselines by up to 8%. Our approach provides a powerful and flexible way to adapt diffusion models to new conditions and generate high-quality augmented data for various conditional generation tasks.", "year": 2023, "venue": "arXiv.org", "authors": [ "Alexandros Graikos", "Srikar Yellapragada", "D. Samaras" ], "externalIds": { "DBLP": "journals/corr/abs-2306-01900", "ArXiv": "2306.01900", "DOI": "10.48550/arXiv.2306.01900", "CorpusId": 259075414 }, "url": "https://www.semanticscholar.org/paper/b14a3b26aa5884501ec0f66133d5c591ad29f8e2", "referenceCount": 34, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Geometric Latent Diffusion Models for 3D Molecule Generation", "abstract": "Generative models, especially diffusion models (DMs), have achieved promising results for generating feature-rich geometries and advancing foundational science problems such as molecule design. Inspired by the recent huge success of Stable (latent) Diffusion models, we propose a novel and principled method for 3D molecule generation named Geometric Latent Diffusion Models (GeoLDM). GeoLDM is the first latent DM model for the molecular geometry domain, composed of autoencoders encoding structures into continuous latent codes and DMs operating in the latent space. Our key innovation is that for modeling the 3D molecular geometries, we capture its critical roto-translational equivariance constraints by building a point-structured latent space with both invariant scalars and equivariant tensors. Extensive experiments demonstrate that GeoLDM can consistently achieve better performance on multiple molecule generation benchmarks, with up to 7\\% improvement for the valid percentage of large biomolecules. Results also demonstrate GeoLDM's higher capacity for controllable generation thanks to the latent modeling. Code is provided at \\url{https://github.com/MinkaiXu/GeoLDM}.", "year": 2023, "venue": "International Conference on Machine Learning", "authors": [ "Minkai Xu", "Alexander Powers", "R. Dror", "Stefano Ermon", "J. Leskovec" ], "externalIds": { "DBLP": "conf/icml/XuPDEL23", "ArXiv": "2305.01140", "DOI": "10.48550/arXiv.2305.01140", "CorpusId": 258436871 }, "url": "https://www.semanticscholar.org/paper/584915a73b8ff2ce29cf644e388279baa14be550", "referenceCount": 62, "citationCount": 75, "influentialCitationCount": 9, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Biology" ] }, { "title": "Contrastive Energy Prediction for Exact Energy-Guided Diffusion Sampling in Offline Reinforcement Learning", "abstract": "Guided sampling is a vital approach for applying diffusion models in real-world tasks that embeds human-defined guidance during the sampling procedure. This paper considers a general setting where the guidance is defined by an (unnormalized) energy function. The main challenge for this setting is that the intermediate guidance during the diffusion sampling procedure, which is jointly defined by the sampling distribution and the energy function, is unknown and is hard to estimate. To address this challenge, we propose an exact formulation of the intermediate guidance as well as a novel training objective named contrastive energy prediction (CEP) to learn the exact guidance. Our method is guaranteed to converge to the exact guidance under unlimited model capacity and data samples, while previous methods can not. We demonstrate the effectiveness of our method by applying it to offline reinforcement learning (RL). Extensive experiments on D4RL benchmarks demonstrate that our method outperforms existing state-of-the-art algorithms. We also provide some examples of applying CEP for image synthesis to demonstrate the scalability of CEP on high-dimensional data.", "year": 2023, "venue": "International Conference on Machine Learning", "authors": [ "Cheng Lu", "Huayu Chen", "Jianfei Chen", "Hang Su", "Chongxuan Li", "Jun Zhu" ], "externalIds": { "ArXiv": "2304.12824", "DBLP": "journals/corr/abs-2304-12824", "DOI": "10.48550/arXiv.2304.12824", "CorpusId": 258309302 }, "url": "https://www.semanticscholar.org/paper/3a09f8a8d64955c0bab002c08579ba3ff567b6c5", "referenceCount": 85, "citationCount": 32, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Control3Diff: Learning Controllable 3D Diffusion Models from Single-view Images", "abstract": "Diffusion models have recently become the de-facto approach for generative modeling in the 2D domain. However, extending diffusion models to 3D is challenging, due to the difficulties in acquiring 3D ground truth data for training. On the other hand, 3D GANs that integrate implicit 3D representations into GANs have shown remarkable 3D-aware generation when trained only on single-view image datasets. However, 3D GANs do not provide straightforward ways to precisely control image synthesis. To address these challenges, We present Control3Diff, a 3D diffusion model that combines the strengths of diffusion models and 3D GANs for versatile controllable 3D-aware image synthesis for single-view datasets. Control3Diff explicitly models the underlying latent distribution (optionally conditioned on external inputs), thus enabling direct control during the diffusion process. Moreover, our approach is general and applicable to any types of controlling inputs, allowing us to train it with the same diffusion objective without any auxiliary supervision. We validate the efficacy of Control3Diff on standard image generation benchmarks including FFHQ, AFHQ, and ShapeNet, using various conditioning inputs such as images, sketches, and text prompts.", "year": 2023, "venue": "International Conference on 3D Vision", "authors": [ "Jiatao Gu", "Qingzhe Gao", "Shuangfei Zhai", "Baoquan Chen", "Lingjie Liu", "J. Susskind" ], "externalIds": { "DBLP": "conf/3dim/GuGZCLS24", "ArXiv": "2304.06700", "DOI": "10.1109/3DV62453.2024.00030", "CorpusId": 258108230 }, "url": "https://www.semanticscholar.org/paper/18e5fecd0ce09ac71706147393301dd25f42b359", "referenceCount": 98, "citationCount": 26, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FreeDoM: Training-Free Energy-Guided Conditional Diffusion Model", "abstract": "Recently, conditional diffusion models have gained popularity in numerous applications due to their exceptional generation ability. However, many existing methods are training-required. They need to train a time-dependent classifier or a condition-dependent score estimator, which increases the cost of constructing conditional diffusion models and is inconvenient to transfer across different conditions. Some current works aim to overcome this limitation by proposing training-free solutions, but most can only be applied to a specific category of tasks and not to more general conditions. In this work, we propose a training-Free conditional Diffusion Model (FreeDoM) used for various conditions. Specifically, we leverage off-the-shelf pretrained networks, such as a face detection model, to construct time-independent energy functions, which guide the generation process without requiring training. Furthermore, because the construction of the energy function is very flexible and adaptable to various conditions, our proposed FreeDoM has a broader range of applications than existing training-free methods. FreeDoM is advantageous in its simplicity, effectiveness, and low cost. Experiments demonstrate that FreeDoM is effective for various conditions and suitable for diffusion models of diverse data domains, including image and latent code domains. Code is available at https://github.com/vvictoryuki/FreeDoM.", "year": 2023, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Jiwen Yu", "Yinhuai Wang", "Chen Zhao", "Bernard Ghanem", "Jian Zhang" ], "externalIds": { "DBLP": "journals/corr/abs-2303-09833", "ArXiv": "2303.09833", "DOI": "10.1109/ICCV51070.2023.02118", "CorpusId": 257622962 }, "url": "https://www.semanticscholar.org/paper/ce06533ecc98ba221a4db427738884c6a6af6eee", "referenceCount": 54, "citationCount": 82, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Reduce, Reuse, Recycle: Compositional Generation with Energy-Based Diffusion Models and MCMC", "abstract": "Since their introduction, diffusion models have quickly become the prevailing approach to generative modeling in many domains. They can be interpreted as learning the gradients of a time-varying sequence of log-probability density functions. This interpretation has motivated classifier-based and classifier-free guidance as methods for post-hoc control of diffusion models. In this work, we build upon these ideas using the score-based interpretation of diffusion models, and explore alternative ways to condition, modify, and reuse diffusion models for tasks involving compositional generation and guidance. In particular, we investigate why certain types of composition fail using current techniques and present a number of solutions. We conclude that the sampler (not the model) is responsible for this failure and propose new samplers, inspired by MCMC, which enable successful compositional generation. Further, we propose an energy-based parameterization of diffusion models which enables the use of new compositional operators and more sophisticated, Metropolis-corrected samplers. Intriguingly we find these samplers lead to notable improvements in compositional generation across a wide set of problems such as classifier-guided ImageNet modeling and compositional text-to-image generation.", "year": 2023, "venue": "International Conference on Machine Learning", "authors": [ "Yilun Du", "Conor Durkan", "Robin Strudel", "J. Tenenbaum", "S. Dieleman", "R. Fergus", "Jascha Narain Sohl-Dickstein", "A. Doucet", "Will Grathwohl" ], "externalIds": { "ArXiv": "2302.11552", "DBLP": "journals/corr/abs-2302-11552", "DOI": "10.48550/arXiv.2302.11552", "CorpusId": 257078922 }, "url": "https://www.semanticscholar.org/paper/3ac2d89388a816786234aa9f8ef2de9a635b0a69", "referenceCount": 52, "citationCount": 87, "influentialCitationCount": 12, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Universal Guidance for Diffusion Models", "abstract": "Typical diffusion models are trained to accept a particular form of conditioning, most commonly text, and cannot be conditioned on other modalities without retraining. In this work, we propose a universal guidance algorithm that enables diffusion models to be controlled by arbitrary guidance modalities without the need to retrain any use-specific components. We show that our algorithm successfully generates quality images with guidance functions including segmentation, face recognition, object detection, and classifier signals. Code is available at github.com/arpitbansal297/Universal-Guided-Diffusion.", "year": 2023, "venue": "2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "authors": [ "Arpit Bansal", "Hong-Min Chu", "Avi Schwarzschild", "Soumyadip Sengupta", "Micah Goldblum", "Jonas Geiping", "T. Goldstein" ], "externalIds": { "ArXiv": "2302.07121", "DBLP": "conf/iclr/BansalCSSGGG24", "DOI": "10.1109/CVPRW59228.2023.00091", "CorpusId": 256846836 }, "url": "https://www.semanticscholar.org/paper/d24b4f34197df0257390b57f02537e6ce3284f2e", "referenceCount": 31, "citationCount": 146, "influentialCitationCount": 18, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "AudioLDM: Text-to-Audio Generation with Latent Diffusion Models", "abstract": "Text-to-audio (TTA) system has recently gained attention for its ability to synthesize general audio based on text descriptions. However, previous studies in TTA have limited generation quality with high computational costs. In this study, we propose AudioLDM, a TTA system that is built on a latent space to learn the continuous audio representations from contrastive language-audio pretraining (CLAP) latents. The pretrained CLAP models enable us to train LDMs with audio embedding while providing text embedding as a condition during sampling. By learning the latent representations of audio signals and their compositions without modeling the cross-modal relationship, AudioLDM is advantageous in both generation quality and computational efficiency. Trained on AudioCaps with a single GPU, AudioLDM achieves state-of-the-art TTA performance measured by both objective and subjective metrics (e.g., frechet distance). Moreover, AudioLDM is the first TTA system that enables various text-guided audio manipulations (e.g., style transfer) in a zero-shot fashion. Our implementation and demos are available at https://audioldm.github.io.", "year": 2023, "venue": "International Conference on Machine Learning", "authors": [ "Haohe Liu", "Zehua Chen", "Yiitan Yuan", "Xinhao Mei", "Xubo Liu", "Danilo P. Mandic", "Wenwu Wang", "M. Plumbley" ], "externalIds": { "DBLP": "journals/corr/abs-2301-12503", "ArXiv": "2301.12503", "DOI": "10.48550/arXiv.2301.12503", "CorpusId": 256390486 }, "url": "https://www.semanticscholar.org/paper/fa0f3d8aa20e8987dbc7a516d5399cfa3dc97b1b", "referenceCount": 65, "citationCount": 342, "influentialCitationCount": 54, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Zero-Shot Image Restoration Using Denoising Diffusion Null-Space Model", "abstract": "Most existing Image Restoration (IR) models are task-specific, which can not be generalized to different degradation operators. In this work, we propose the Denoising Diffusion Null-Space Model (DDNM), a novel zero-shot framework for arbitrary linear IR problems, including but not limited to image super-resolution, colorization, inpainting, compressed sensing, and deblurring. DDNM only needs a pre-trained off-the-shelf diffusion model as the generative prior, without any extra training or network modifications. By refining only the null-space contents during the reverse diffusion process, we can yield diverse results satisfying both data consistency and realness. We further propose an enhanced and robust version, dubbed DDNM+, to support noisy restoration and improve restoration quality for hard tasks. Our experiments on several IR tasks reveal that DDNM outperforms other state-of-the-art zero-shot IR methods. We also demonstrate that DDNM+ can solve complex real-world applications, e.g., old photo restoration.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Yinhuai Wang", "Jiwen Yu", "Jian Zhang" ], "externalIds": { "ArXiv": "2212.00490", "DBLP": "journals/corr/abs-2212-00490", "DOI": "10.48550/arXiv.2212.00490", "CorpusId": 254125609 }, "url": "https://www.semanticscholar.org/paper/3a75ed3e9e81c9db573ef73d20e2c66c12aaedf8", "referenceCount": 44, "citationCount": 276, "influentialCitationCount": 51, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Solving Audio Inverse Problems with a Diffusion Model", "abstract": "This paper presents CQT-Diff, a data-driven generative audio model that can, once trained, be used for solving various different audio inverse problems in a problem-agnostic setting. CQT-Diff is a neural diffusion model with an architecture that is carefully constructed to exploit pitch-equivariant symmetries in music. This is achieved by preconditioning the model with an invertible Constant-Q Transform (CQT), whose logarithmically-spaced frequency axis represents pitch equivariance as translation equivariance. The proposed method is evaluated with solo piano music, using objective and subjective metrics in three different and varied tasks: audio bandwidth extension, inpainting, and declipping. The results show that CQT-Diff outperforms the compared baselines and ablations in audio bandwidth extension and, without retraining, delivers competitive performance against modern baselines in audio inpainting and declipping. This work represents the first diffusion-based general framework for solving inverse problems in audio processing.", "year": 2022, "venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", "authors": [ "Eloi Moliner", "J. Lehtinen", "V. Välimäki" ], "externalIds": { "DBLP": "journals/corr/abs-2210-15228", "ArXiv": "2210.15228", "DOI": "10.1109/ICASSP49357.2023.10095637", "CorpusId": 253157756 }, "url": "https://www.semanticscholar.org/paper/0b4a66789722834b0bc05e2328d42e8a9bdeb5e8", "referenceCount": 36, "citationCount": 39, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Freeze then Train: Towards Provable Representation Learning under Spurious Correlations and Feature Noise", "abstract": "The existence of spurious correlations such as image backgrounds in the training environment can make empirical risk minimization (ERM) perform badly in the test environment. To address this problem, Kirichenko et al. (2022) empirically found that the core features that are related to the outcome can still be learned well even with the presence of spurious correlations. This opens a promising strategy to first train a feature learner rather than a classifier, and then perform linear probing (last layer retraining) in the test environment. However, a theoretical understanding of when and why this approach works is lacking. In this paper, we find that core features are only learned well when their associated non-realizable noise is smaller than that of spurious features, which is not necessarily true in practice. We provide both theories and experiments to support this finding and to illustrate the importance of non-realizable noise. Moreover, we propose an algorithm called Freeze then Train (FTT), that first freezes certain salient features and then trains the rest of the features using ERM. We theoretically show that FTT preserves features that are more beneficial to test time probing. Across two commonly used spurious correlation datasets, FTT outperforms ERM, IRM, JTT and CVaR-DRO, with substantial improvement in accuracy (by 4.5%) when the feature noise is large. FTT also performs better on general distribution shift benchmarks.", "year": 2022, "venue": "International Conference on Artificial Intelligence and Statistics", "authors": [ "Haotian Ye", "James Y. Zou", "Linjun Zhang" ], "externalIds": { "ArXiv": "2210.11075", "DBLP": "conf/aistats/Ye0Z23", "DOI": "10.48550/arXiv.2210.11075", "CorpusId": 253018507 }, "url": "https://www.semanticscholar.org/paper/0e90ab64cb1f5894a4d4a895ed61f578e24cd494", "referenceCount": 76, "citationCount": 15, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "LAION-5B: An open large-scale dataset for training next generation image-text models", "abstract": "Groundbreaking language-vision architectures like CLIP and DALL-E proved the utility of training on large amounts of noisy image-text data, without relying on expensive accurate labels used in standard vision unimodal supervised learning. The resulting models showed capabilities of strong text-guided image generation and transfer to downstream tasks, while performing remarkably at zero-shot classification with noteworthy out-of-distribution robustness. Since then, large-scale language-vision models like ALIGN, BASIC, GLIDE, Flamingo and Imagen made further improvements. Studying the training and capabilities of such models requires datasets containing billions of image-text pairs. Until now, no datasets of this size have been made openly available for the broader research community. To address this problem and democratize research on large-scale multi-modal models, we present LAION-5B - a dataset consisting of 5.85 billion CLIP-filtered image-text pairs, of which 2.32B contain English language. We show successful replication and fine-tuning of foundational models like CLIP, GLIDE and Stable Diffusion using the dataset, and discuss further experiments enabled with an openly available dataset of this scale. Additionally we provide several nearest neighbor indices, an improved web-interface for dataset exploration and subset generation, and detection scores for watermark, NSFW, and toxic content detection. Announcement page https://laion.ai/laion-5b-a-new-era-of-open-large-scale-multi-modal-datasets/", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Christoph Schuhmann", "Romain Beaumont", "Richard Vencu", "Cade Gordon", "Ross Wightman", "Mehdi Cherti", "Theo Coombes", "Aarush Katta", "Clayton Mullis", "Mitchell Wortsman", "P. Schramowski", "Srivatsa Kundurthy", "Katherine Crowson", "Ludwig Schmidt", "R. Kaczmarczyk", "J. Jitsev" ], "externalIds": { "DBLP": "conf/nips/SchuhmannBVGWCC22", "ArXiv": "2210.08402", "DOI": "10.48550/arXiv.2210.08402", "CorpusId": 252917726 }, "url": "https://www.semanticscholar.org/paper/e5c8960eb2ec034ffbd353ef39fd1cb541d3c7c9", "referenceCount": 109, "citationCount": 2214, "influentialCitationCount": 256, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "OpenOOD: Benchmarking Generalized Out-of-Distribution Detection", "abstract": "Out-of-distribution (OOD) detection is vital to safety-critical machine learning applications and has thus been extensively studied, with a plethora of methods developed in the literature. However, the field currently lacks a unified, strictly formulated, and comprehensive benchmark, which often results in unfair comparisons and inconclusive results. From the problem setting perspective, OOD detection is closely related to neighboring fields including anomaly detection (AD), open set recognition (OSR), and model uncertainty, since methods developed for one domain are often applicable to each other. To help the community to improve the evaluation and advance, we build a unified, well-structured codebase called OpenOOD, which implements over 30 methods developed in relevant fields and provides a comprehensive benchmark under the recently proposed generalized OOD detection framework. With a comprehensive comparison of these methods, we are gratified that the field has progressed significantly over the past few years, where both preprocessing methods and the orthogonal post-hoc methods show strong potential.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Jingkang Yang", "Pengyun Wang", "Dejian Zou", "Zitang Zhou", "Kun Ding", "Wen-Hsiao Peng", "Haoqi Wang", "Guangyao Chen", "Bo Li", "Yiyou Sun", "Xuefeng Du", "Kaiyang Zhou", "Wayne Zhang", "Dan Hendrycks", "Yixuan Li", "Ziwei Liu" ], "externalIds": { "DBLP": "conf/nips/YangWZZDPWCLSDZ22", "ArXiv": "2210.07242", "DOI": "10.48550/arXiv.2210.07242", "CorpusId": 252873458 }, "url": "https://www.semanticscholar.org/paper/4e9e30f4702f64af5aacbb5791172c5b37510dc3", "referenceCount": 86, "citationCount": 157, "influentialCitationCount": 26, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Equivariant Energy-Guided SDE for Inverse Molecular Design", "abstract": "Inverse molecular design is critical in material science and drug discovery, where the generated molecules should satisfy certain desirable properties. In this paper, we propose equivariant energy-guided stochastic differential equations (EEGSDE), a flexible framework for controllable 3D molecule generation under the guidance of an energy function in diffusion models. Formally, we show that EEGSDE naturally exploits the geometric symmetry in 3D molecular conformation, as long as the energy function is invariant to orthogonal transformations. Empirically, under the guidance of designed energy functions, EEGSDE significantly improves the baseline on QM9, in inverse molecular design targeted to quantum properties and molecular structures. Furthermore, EEGSDE is able to generate molecules with multiple target properties by combining the corresponding energy functions linearly.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Fan Bao", "Min Zhao", "Zhongkai Hao", "Pei‐Yun Li", "Chongxuan Li", "Jun Zhu" ], "externalIds": { "DBLP": "conf/iclr/Bao0HLL023", "ArXiv": "2209.15408", "DOI": "10.48550/arXiv.2209.15408", "CorpusId": 252668426 }, "url": "https://www.semanticscholar.org/paper/154fa935150bb2c9e4fd7f6fc7820007c6390990", "referenceCount": 55, "citationCount": 40, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Physics", "Biology" ] }, { "title": "Classifier-Free Diffusion Guidance", "abstract": "Classifier guidance is a recently introduced method to trade off mode coverage and sample fidelity in conditional diffusion models post training, in the same spirit as low temperature sampling or truncation in other types of generative models. Classifier guidance combines the score estimate of a diffusion model with the gradient of an image classifier and thereby requires training an image classifier separate from the diffusion model. It also raises the question of whether guidance can be performed without a classifier. We show that guidance can be indeed performed by a pure generative model without such a classifier: in what we call classifier-free guidance, we jointly train a conditional and an unconditional diffusion model, and we combine the resulting conditional and unconditional score estimates to attain a trade-off between sample quality and diversity similar to that obtained using classifier guidance.", "year": 2022, "venue": "arXiv.org", "authors": [ "Jonathan Ho" ], "externalIds": { "ArXiv": "2207.12598", "DBLP": "journals/corr/abs-2207-12598", "DOI": "10.48550/arXiv.2207.12598", "CorpusId": 249145348 }, "url": "https://www.semanticscholar.org/paper/af9f365ed86614c800f082bd8eb14be76072ad16", "referenceCount": 25, "citationCount": 2396, "influentialCitationCount": 313, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Scaling Autoregressive Models for Content-Rich Text-to-Image Generation", "abstract": "We present the Pathways Autoregressive Text-to-Image (Parti) model, which generates high-fidelity photorealistic images and supports content-rich synthesis involving complex compositions and world knowledge. Parti treats text-to-image generation as a sequence-to-sequence modeling problem, akin to machine translation, with sequences of image tokens as the target outputs rather than text tokens in another language. This strategy can naturally tap into the rich body of prior work on large language models, which have seen continued advances in capabilities and performance through scaling data and model sizes. Our approach is simple: First, Parti uses a Transformer-based image tokenizer, ViT-VQGAN, to encode images as sequences of discrete tokens. Second, we achieve consistent quality improvements by scaling the encoder-decoder Transformer model up to 20B parameters, with a new state-of-the-art zero-shot FID score of 7.23 and finetuned FID score of 3.22 on MS-COCO. Our detailed analysis on Localized Narratives as well as PartiPrompts (P2), a new holistic benchmark of over 1600 English prompts, demonstrate the effectiveness of Parti across a wide variety of categories and difficulty aspects. We also explore and highlight limitations of our models in order to define and exemplify key areas of focus for further improvements. See https://parti.research.google/ for high-resolution images.", "year": 2022, "venue": "Trans. Mach. Learn. Res.", "authors": [ "Jiahui Yu", "Yuanzhong Xu", "Jing Yu Koh", "Thang Luong", "Gunjan Baid", "Zirui Wang", "Vijay Vasudevan", "Alexander Ku", "Yinfei Yang", "Burcu Karagol Ayan", "Ben Hutchinson", "Wei Han", "Zarana Parekh", "Xin Li", "Han Zhang", "Jason Baldridge", "Yonghui Wu" ], "externalIds": { "DBLP": "journals/tmlr/YuXKLBWVKYAHHPLZBW22", "ArXiv": "2206.10789", "DOI": "10.48550/arXiv.2206.10789", "CorpusId": 249926846 }, "url": "https://www.semanticscholar.org/paper/1243e13254bb4ea1f71b4be8a3e4e54ffd02d2fe", "referenceCount": 115, "citationCount": 827, "influentialCitationCount": 79, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Equivariant Diffusion for Molecule Generation in 3D", "abstract": "This work introduces a diffusion model for molecule generation in 3D that is equivariant to Euclidean transformations. Our E(3) Equivariant Diffusion Model (EDM) learns to denoise a diffusion process with an equivariant network that jointly operates on both continuous (atom coordinates) and categorical features (atom types). In addition, we provide a probabilistic analysis which admits likelihood computation of molecules using our model. Experimentally, the proposed method significantly outperforms previous 3D molecular generative methods regarding the quality of generated samples and efficiency at training time.", "year": 2022, "venue": "International Conference on Machine Learning", "authors": [ "Emiel Hoogeboom", "Victor Garcia Satorras", "Clément Vignac", "M. Welling" ], "externalIds": { "DBLP": "conf/icml/HoogeboomSVW22", "ArXiv": "2203.17003", "DOI": "10.48550/arXiv.2203.17003", "CorpusId": 247839510 }, "url": "https://www.semanticscholar.org/paper/7dbb386a617eacc954940c9540d9cb262529b8b1", "referenceCount": 53, "citationCount": 411, "influentialCitationCount": 67, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Biology", "Mathematics" ] }, { "title": "GeoDiff: a Geometric Diffusion Model for Molecular Conformation Generation", "abstract": "Predicting molecular conformations from molecular graphs is a fundamental problem in cheminformatics and drug discovery. Recently, significant progress has been achieved with machine learning approaches, especially with deep generative models. Inspired by the diffusion process in classical non-equilibrium thermodynamics where heated particles will diffuse from original states to a noise distribution, in this paper, we propose a novel generative model named GeoDiff for molecular conformation prediction. GeoDiff treats each atom as a particle and learns to directly reverse the diffusion process (i.e., transforming from a noise distribution to stable conformations) as a Markov chain. Modeling such a generation process is however very challenging as the likelihood of conformations should be roto-translational invariant. We theoretically show that Markov chains evolving with equivariant Markov kernels can induce an invariant distribution by design, and further propose building blocks for the Markov kernels to preserve the desirable equivariance property. The whole framework can be efficiently trained in an end-to-end fashion by optimizing a weighted variational lower bound to the (conditional) likelihood. Experiments on multiple benchmarks show that GeoDiff is superior or comparable to existing state-of-the-art approaches, especially on large molecules.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Minkai Xu", "Lantao Yu", "Yang Song", "Chence Shi", "Stefano Ermon", "Jian Tang" ], "externalIds": { "DBLP": "conf/iclr/XuY0SE022", "ArXiv": "2203.02923", "DOI": "10.48550/arXiv.2203.02923", "CorpusId": 247292764 }, "url": "https://www.semanticscholar.org/paper/c871d2dc802d276608a6734637f8bc9e6da0d837", "referenceCount": 58, "citationCount": 381, "influentialCitationCount": 38, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Biology" ] }, { "title": "Denoising Diffusion Restoration Models", "abstract": "Many interesting tasks in image restoration can be cast as linear inverse problems. A recent family of approaches for solving these problems uses stochastic algorithms that sample from the posterior distribution of natural images given the measurements. However, efficient solutions often require problem-specific supervised training to model the posterior, whereas unsupervised methods that are not problem-specific typically rely on inefficient iterative methods. This work addresses these issues by introducing Denoising Diffusion Restoration Models (DDRM), an efficient, unsupervised posterior sampling method. Motivated by variational inference, DDRM takes advantage of a pre-trained denoising diffusion generative model for solving any linear inverse problem. We demonstrate DDRM's versatility on several image datasets for super-resolution, deblurring, inpainting, and colorization under various amounts of measurement noise. DDRM outperforms the current leading unsupervised methods on the diverse ImageNet dataset in reconstruction quality, perceptual quality, and runtime, being 5x faster than the nearest competitor. DDRM also generalizes well for natural images out of the distribution of the observed ImageNet training set.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Bahjat Kawar", "Michael Elad", "Stefano Ermon", "Jiaming Song" ], "externalIds": { "ArXiv": "2201.11793", "DBLP": "journals/corr/abs-2201-11793", "CorpusId": 246411364 }, "url": "https://www.semanticscholar.org/paper/3d3c5fcbc40aadccceda58d3d9c5cd00588ea0b7", "referenceCount": 70, "citationCount": 552, "influentialCitationCount": 73, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "RePaint: Inpainting using Denoising Diffusion Probabilistic Models", "abstract": "Free-form inpainting is the task of adding new content to an image in the regions specified by an arbitrary binary mask. Most existing approaches train for a certain distribution of masks, which limits their generalization capabilities to unseen mask types. Furthermore, training with pixel-wise and perceptual losses often leads to simple textural extensions towards the missing areas instead of semantically meaningful generation. In this work, we propose RePaint: A Denoising Diffusion Probabilistic Model (DDPM) based inpainting approach that is applicable to even extreme masks. We employ a pretrained unconditional DDPM as the generative prior. To condition the generation process, we only alter the reverse diffusion iterations by sampling the unmasked regions using the given image infor-mation. Since this technique does not modify or condition the original DDPM network itself, the model produces high-quality and diverse output images for any inpainting form. We validate our method for both faces and general-purpose image inpainting using standard and extreme masks. Re-Paint outperforms state-of-the-art Autoregressive, and GAN approaches for at least five out of six mask distributions. Github Repository: git.io/RePaint", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Andreas Lugmayr", "Martin Danelljan", "Andrés Romero", "F. Yu", "R. Timofte", "L. Gool" ], "externalIds": { "ArXiv": "2201.09865", "DBLP": "journals/corr/abs-2201-09865", "DOI": "10.1109/CVPR52688.2022.01117", "CorpusId": 246240274 }, "url": "https://www.semanticscholar.org/paper/1e91fa21b890a8f5d615578f4ddf46c3cb394691", "referenceCount": 60, "citationCount": 971, "influentialCitationCount": 81, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "High-Resolution Image Synthesis with Latent Diffusion Models", "abstract": "By decomposing the image formation process into a sequential application of denoising autoencoders, diffusion models (DMs) achieve state-of-the-art synthesis results on image data and beyond. Additionally, their formulation allows for a guiding mechanism to control the image generation process without retraining. However, since these models typically operate directly in pixel space, optimization of powerful DMs often consumes hundreds of GPU days and inference is expensive due to sequential evaluations. To enable DM training on limited computational resources while retaining their quality and flexibility, we apply them in the latent space of powerful pretrained autoencoders. In contrast to previous work, training diffusion models on such a representation allows for the first time to reach a near-optimal point between complexity reduction and detail preservation, greatly boosting visual fidelity. By introducing cross-attention layers into the model architecture, we turn diffusion models into powerful and flexible generators for general conditioning inputs such as text or bounding boxes and high-resolution synthesis becomes possible in a convolutional manner. Our latent diffusion models (LDMs) achieve new state of the art scores for image inpainting and class-conditional image synthesis and highly competitive performance on various tasks, including unconditional image generation, text-to-image synthesis, and super-resolution, while significantly reducing computational requirements compared to pixel-based DMs.", "year": 2021, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Robin Rombach", "A. Blattmann", "Dominik Lorenz", "Patrick Esser", "B. Ommer" ], "externalIds": { "ArXiv": "2112.10752", "DBLP": "journals/corr/abs-2112-10752", "DOI": "10.1109/CVPR52688.2022.01042", "CorpusId": 245335280 }, "url": "https://www.semanticscholar.org/paper/c10075b3746a9f3dd5811970e93c8ca3ad39b39d", "referenceCount": 110, "citationCount": 9847, "influentialCitationCount": 2744, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "GLIDE: Towards Photorealistic Image Generation and Editing with Text-Guided Diffusion Models", "abstract": "Diffusion models have recently been shown to generate high-quality synthetic images, especially when paired with a guidance technique to trade off diversity for fidelity. We explore diffusion models for the problem of text-conditional image synthesis and compare two different guidance strategies: CLIP guidance and classifier-free guidance. We find that the latter is preferred by human evaluators for both photorealism and caption similarity, and often produces photorealistic samples. Samples from a 3.5 billion parameter text-conditional diffusion model using classifier-free guidance are favored by human evaluators to those from DALL-E, even when the latter uses expensive CLIP reranking. Additionally, we find that our models can be fine-tuned to perform image inpainting, enabling powerful text-driven image editing. We train a smaller model on a filtered dataset and release the code and weights at https://github.com/openai/glide-text2im.", "year": 2021, "venue": "International Conference on Machine Learning", "authors": [ "Alex Nichol", "Prafulla Dhariwal", "A. Ramesh", "Pranav Shyam", "Pamela Mishkin", "Bob McGrew", "I. Sutskever", "Mark Chen" ], "externalIds": { "ArXiv": "2112.10741", "DBLP": "journals/corr/abs-2112-10741", "CorpusId": 245335086 }, "url": "https://www.semanticscholar.org/paper/7002ae048e4b8c9133a55428441e8066070995cb", "referenceCount": 51, "citationCount": 2645, "influentialCitationCount": 244, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Conditional Point Diffusion-Refinement Paradigm for 3D Point Cloud Completion", "abstract": "3D point cloud is an important 3D representation for capturing real world 3D objects. However, real-scanned 3D point clouds are often incomplete, and it is important to recover complete point clouds for downstream applications. Most existing point cloud completion methods use Chamfer Distance (CD) loss for training. The CD loss estimates correspondences between two point clouds by searching nearest neighbors, which does not capture the overall point density distribution on the generated shape, and therefore likely leads to non-uniform point cloud generation. To tackle this problem, we propose a novel Point Diffusion-Refinement (PDR) paradigm for point cloud completion. PDR consists of a Conditional Generation Network (CGNet) and a ReFinement Network (RFNet). The CGNet uses a conditional generative model called the denoising diffusion probabilistic model (DDPM) to generate a coarse completion conditioned on the partial observation. DDPM establishes a one-to-one pointwise mapping between the generated point cloud and the uniform ground truth, and then optimizes the mean squared error loss to realize uniform generation. The RFNet refines the coarse output of the CGNet and further improves quality of the completed point cloud. Furthermore, we develop a novel dual-path architecture for both networks. The architecture can (1) effectively and efficiently extract multi-level features from partially observed point clouds to guide completion, and (2) accurately manipulate spatial locations of 3D points to obtain smooth surfaces and sharp details. Extensive experimental results on various benchmark datasets show that our PDR paradigm outperforms previous state-of-the-art methods for point cloud completion. Remarkably, with the help of the RFNet, we can accelerate the iterative generation process of the DDPM by up to 50 times without much performance drop.", "year": 2021, "venue": "International Conference on Learning Representations", "authors": [ "Zhaoyang Lyu", "Zhifeng Kong", "Xudong Xu", "Liang Pan", "Dahua Lin" ], "externalIds": { "ArXiv": "2112.03530", "DBLP": "journals/corr/abs-2112-03530", "CorpusId": 244920632 }, "url": "https://www.semanticscholar.org/paper/c940509c5b1ee8db9e4ce70254726719b8d56c54", "referenceCount": 34, "citationCount": 97, "influentialCitationCount": 9, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Conditional Image Generation with Score-Based Diffusion Models", "abstract": "Score-based diffusion models have emerged as one of the most promising frameworks for deep generative modelling. In this work we conduct a systematic comparison and theoretical analysis of different approaches to learning conditional probability distributions with score-based diffusion models. In particular, we prove results which provide a theoretical justification for one of the most successful estimators of the conditional score. Moreover, we introduce a multi-speed diffusion framework, which leads to a new estimator for the conditional score, performing on par with previous state-of-the-art approaches. Our theoretical and experimental findings are accompanied by an open source library MSDiff which allows for application and further research of multi-speed diffusion models.", "year": 2021, "venue": "arXiv.org", "authors": [ "Georgios Batzolis", "Jan Stanczuk", "C. Schonlieb", "Christian Etmann" ], "externalIds": { "DBLP": "journals/corr/abs-2111-13606", "ArXiv": "2111.13606", "CorpusId": 244709128 }, "url": "https://www.semanticscholar.org/paper/35356feaaf1a739a7db2b76f32e3e5a71ec74eb5", "referenceCount": 30, "citationCount": 131, "influentialCitationCount": 6, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Score-based diffusion models for accelerated MRI", "abstract": null, "year": 2021, "venue": "Medical Image Anal.", "authors": [ "Hyungjin Chung", "Jong-Chul Ye" ], "externalIds": { "DBLP": "journals/corr/abs-2110-05243", "ArXiv": "2110.05243", "DOI": "10.1016/j.media.2022.102479", "CorpusId": 238583682, "PubMed": "35696876" }, "url": "https://www.semanticscholar.org/paper/ff85a9d7182063bb71b47ab239d662fd2975c4fc", "referenceCount": 75, "citationCount": 276, "influentialCitationCount": 21, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Engineering", "Computer Science" ] }, { "title": "Cascaded Diffusion Models for High Fidelity Image Generation", "abstract": "We show that cascaded diffusion models are capable of generating high fidelity images on the class-conditional ImageNet generation benchmark, without any assistance from auxiliary image classifiers to boost sample quality. A cascaded diffusion model comprises a pipeline of multiple diffusion models that generate images of increasing resolution, beginning with a standard diffusion model at the lowest resolution, followed by one or more super-resolution diffusion models that successively upsample the image and add higher resolution details. We find that the sample quality of a cascading pipeline relies crucially on conditioning augmentation, our proposed method of data augmentation of the lower resolution conditioning inputs to the super-resolution models. Our experiments show that conditioning augmentation prevents compounding error during sampling in a cascaded model, helping us to train cascading pipelines achieving FID scores of 1.48 at 64x64, 3.52 at 128x128 and 4.88 at 256x256 resolutions, outperforming BigGAN-deep, and classification accuracy scores of 63.02% (top-1) and 84.06% (top-5) at 256x256, outperforming VQ-VAE-2.", "year": 2021, "venue": "Journal of machine learning research", "authors": [ "Jonathan Ho", "Chitwan Saharia", "William Chan", "David J. Fleet", "Mohammad Norouzi", "Tim Salimans" ], "externalIds": { "DBLP": "journals/jmlr/HoSCFNS22", "ArXiv": "2106.15282", "CorpusId": 235619773 }, "url": "https://www.semanticscholar.org/paper/0f183bcfe65781c06b1a48a6f56e0f3c63e8e4a4", "referenceCount": 37, "citationCount": 914, "influentialCitationCount": 38, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Diffusion Models Beat GANs on Image Synthesis", "abstract": "We show that diffusion models can achieve image sample quality superior to the current state-of-the-art generative models. We achieve this on unconditional image synthesis by finding a better architecture through a series of ablations. For conditional image synthesis, we further improve sample quality with classifier guidance: a simple, compute-efficient method for trading off diversity for fidelity using gradients from a classifier. We achieve an FID of 2.97 on ImageNet 128$\\times$128, 4.59 on ImageNet 256$\\times$256, and 7.72 on ImageNet 512$\\times$512, and we match BigGAN-deep even with as few as 25 forward passes per sample, all while maintaining better coverage of the distribution. Finally, we find that classifier guidance combines well with upsampling diffusion models, further improving FID to 3.94 on ImageNet 256$\\times$256 and 3.85 on ImageNet 512$\\times$512. We release our code at https://github.com/openai/guided-diffusion", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Prafulla Dhariwal", "Alex Nichol" ], "externalIds": { "ArXiv": "2105.05233", "DBLP": "journals/corr/abs-2105-05233", "CorpusId": 234357997 }, "url": "https://www.semanticscholar.org/paper/64ea8f180d0682e6c18d1eb688afdb2027c02794", "referenceCount": 81, "citationCount": 5177, "influentialCitationCount": 661, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Diffusion Probabilistic Models for 3D Point Cloud Generation", "abstract": "We present a probabilistic model for point cloud generation, which is fundamental for various 3D vision tasks such as shape completion, upsampling, synthesis and data augmentation. Inspired by the diffusion process in non-equilibrium thermodynamics, we view points in point clouds as particles in a thermodynamic system in contact with a heat bath, which diffuse from the original distribution to a noise distribution. Point cloud generation thus amounts to learning the reverse diffusion process that transforms the noise distribution to the distribution of a desired shape. Specifically, we propose to model the reverse diffusion process for point clouds as a Markov chain conditioned on certain shape latent. We derive the variational bound in closed form for training and provide implementations of the model. Experimental results demonstrate that our model achieves competitive performance in point cloud generation and auto-encoding. The code is available at https://github.com/luost26/diffusion-point-cloud.", "year": 2021, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Shitong Luo", "Wei Hu" ], "externalIds": { "DBLP": "journals/corr/abs-2103-01458", "ArXiv": "2103.01458", "DOI": "10.1109/CVPR46437.2021.00286", "CorpusId": 232092778 }, "url": "https://www.semanticscholar.org/paper/c32fd8ea1b3f2df410410fb18d569dede102c53a", "referenceCount": 27, "citationCount": 568, "influentialCitationCount": 63, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "E(n) Equivariant Graph Neural Networks", "abstract": "This paper introduces a new model to learn graph neural networks equivariant to rotations, translations, reflections and permutations called E(n)-Equivariant Graph Neural Networks (EGNNs). In contrast with existing methods, our work does not require computationally expensive higher-order representations in intermediate layers while it still achieves competitive or better performance. In addition, whereas existing methods are limited to equivariance on 3 dimensional spaces, our model is easily scaled to higher-dimensional spaces. We demonstrate the effectiveness of our method on dynamical systems modelling, representation learning in graph autoencoders and predicting molecular properties.", "year": 2021, "venue": "International Conference on Machine Learning", "authors": [ "Victor Garcia Satorras", "Emiel Hoogeboom", "M. Welling" ], "externalIds": { "DBLP": "conf/icml/SatorrasHW21", "ArXiv": "2102.09844", "CorpusId": 231979049 }, "url": "https://www.semanticscholar.org/paper/8ea9cb53779a8c1bb0e53764f88669bd7edf38f0", "referenceCount": 41, "citationCount": 762, "influentialCitationCount": 151, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Improved Denoising Diffusion Probabilistic Models", "abstract": "Denoising diffusion probabilistic models (DDPM) are a class of generative models which have recently been shown to produce excellent samples. We show that with a few simple modifications, DDPMs can also achieve competitive log-likelihoods while maintaining high sample quality. Additionally, we find that learning variances of the reverse diffusion process allows sampling with an order of magnitude fewer forward passes with a negligible difference in sample quality, which is important for the practical deployment of these models. We additionally use precision and recall to compare how well DDPMs and GANs cover the target distribution. Finally, we show that the sample quality and likelihood of these models scale smoothly with model capacity and training compute, making them easily scalable. We release our code at https://github.com/openai/improved-diffusion", "year": 2021, "venue": "International Conference on Machine Learning", "authors": [ "Alex Nichol", "Prafulla Dhariwal" ], "externalIds": { "ArXiv": "2102.09672", "DBLP": "conf/icml/NicholD21", "CorpusId": 231979499 }, "url": "https://www.semanticscholar.org/paper/de18baa4964804cf471d85a5a090498242d2e79f", "referenceCount": 47, "citationCount": 2547, "influentialCitationCount": 282, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Maximum Likelihood Training of Score-Based Diffusion Models", "abstract": "Score-based diffusion models synthesize samples by reversing a stochastic process that diffuses data to noise, and are trained by minimizing a weighted combination of score matching losses. The log-likelihood of score-based diffusion models can be tractably computed through a connection to continuous normalizing flows, but log-likelihood is not directly optimized by the weighted combination of score matching losses. We show that for a specific weighting scheme, the objective upper bounds the negative log-likelihood, thus enabling approximate maximum likelihood training of score-based diffusion models. We empirically observe that maximum likelihood training consistently improves the likelihood of score-based diffusion models across multiple datasets, stochastic processes, and model architectures. Our best models achieve negative log-likelihoods of 2.83 and 3.76 bits/dim on CIFAR-10 and ImageNet 32x32 without any data augmentation, on a par with state-of-the-art autoregressive models on these tasks.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Yang Song", "Conor Durkan", "Iain Murray", "Stefano Ermon" ], "externalIds": { "ArXiv": "2101.09258", "DBLP": "conf/nips/SongDME21", "CorpusId": 235352469 }, "url": "https://www.semanticscholar.org/paper/9cf6f42806a35fd1d410dbc34d8e8df73a29d094", "referenceCount": 64, "citationCount": 472, "influentialCitationCount": 82, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Training data-efficient image transformers & distillation through attention", "abstract": "Recently, neural networks purely based on attention were shown to address image understanding tasks such as image classification. However, these visual transformers are pre-trained with hundreds of millions of images using an expensive infrastructure, thereby limiting their adoption. In this work, we produce a competitive convolution-free transformer by training on Imagenet only. We train them on a single computer in less than 3 days. Our reference vision transformer (86M parameters) achieves top-1 accuracy of 83.1% (single-crop evaluation) on ImageNet with no external data. More importantly, we introduce a teacher-student strategy specific to transformers. It relies on a distillation token ensuring that the student learns from the teacher through attention. We show the interest of this token-based distillation, especially when using a convnet as a teacher. This leads us to report results competitive with convnets for both Imagenet (where we obtain up to 85.2% accuracy) and when transferring to other tasks. We share our code and models.", "year": 2020, "venue": "International Conference on Machine Learning", "authors": [ "Hugo Touvron", "M. Cord", "Matthijs Douze", "Francisco Massa", "Alexandre Sablayrolles", "Herv'e J'egou" ], "externalIds": { "ArXiv": "2012.12877", "DBLP": "journals/corr/abs-2012-12877", "CorpusId": 229363322 }, "url": "https://www.semanticscholar.org/paper/ad7ddcc14984caae308c397f1a589aae75d4ab71", "referenceCount": 66, "citationCount": 5424, "influentialCitationCount": 892, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Score-Based Generative Modeling through Stochastic Differential Equations", "abstract": "Creating noise from data is easy; creating data from noise is generative modeling. We present a stochastic differential equation (SDE) that smoothly transforms a complex data distribution to a known prior distribution by slowly injecting noise, and a corresponding reverse-time SDE that transforms the prior distribution back into the data distribution by slowly removing the noise. Crucially, the reverse-time SDE depends only on the time-dependent gradient field (\\aka, score) of the perturbed data distribution. By leveraging advances in score-based generative modeling, we can accurately estimate these scores with neural networks, and use numerical SDE solvers to generate samples. We show that this framework encapsulates previous approaches in score-based generative modeling and diffusion probabilistic modeling, allowing for new sampling procedures and new modeling capabilities. In particular, we introduce a predictor-corrector framework to correct errors in the evolution of the discretized reverse-time SDE. We also derive an equivalent neural ODE that samples from the same distribution as the SDE, but additionally enables exact likelihood computation, and improved sampling efficiency. In addition, we provide a new way to solve inverse problems with score-based models, as demonstrated with experiments on class-conditional generation, image inpainting, and colorization. Combined with multiple architectural improvements, we achieve record-breaking performance for unconditional image generation on CIFAR-10 with an Inception score of 9.89 and FID of 2.20, a competitive likelihood of 2.99 bits/dim, and demonstrate high fidelity generation of 1024 x 1024 images for the first time from a score-based generative model.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Yang Song", "Jascha Narain Sohl-Dickstein", "Diederik P. Kingma", "Abhishek Kumar", "Stefano Ermon", "Ben Poole" ], "externalIds": { "DBLP": "journals/corr/abs-2011-13456", "ArXiv": "2011.13456", "MAG": "3110257065", "CorpusId": 227209335 }, "url": "https://www.semanticscholar.org/paper/633e2fbfc0b21e959a244100937c5853afca4853", "referenceCount": 66, "citationCount": 4108, "influentialCitationCount": 956, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale", "abstract": "While the Transformer architecture has become the de-facto standard for natural language processing tasks, its applications to computer vision remain limited. In vision, attention is either applied in conjunction with convolutional networks, or used to replace certain components of convolutional networks while keeping their overall structure in place. We show that this reliance on CNNs is not necessary and a pure transformer applied directly to sequences of image patches can perform very well on image classification tasks. When pre-trained on large amounts of data and transferred to multiple mid-sized or small image recognition benchmarks (ImageNet, CIFAR-100, VTAB, etc.), Vision Transformer (ViT) attains excellent results compared to state-of-the-art convolutional networks while requiring substantially fewer computational resources to train.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Alexey Dosovitskiy", "Lucas Beyer", "Alexander Kolesnikov", "Dirk Weissenborn", "Xiaohua Zhai", "Thomas Unterthiner", "Mostafa Dehghani", "Matthias Minderer", "G. Heigold", "S. Gelly", "Jakob Uszkoreit", "N. Houlsby" ], "externalIds": { "MAG": "3094502228", "ArXiv": "2010.11929", "DBLP": "conf/iclr/DosovitskiyB0WZ21", "CorpusId": 225039882 }, "url": "https://www.semanticscholar.org/paper/268d347e8a55b5eb82fb5e7d2f800e33c75ab18a", "referenceCount": 65, "citationCount": 28233, "influentialCitationCount": 4121, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Denoising Diffusion Implicit Models", "abstract": "Denoising diffusion probabilistic models (DDPMs) have achieved high quality image generation without adversarial training, yet they require simulating a Markov chain for many steps to produce a sample. To accelerate sampling, we present denoising diffusion implicit models (DDIMs), a more efficient class of iterative implicit probabilistic models with the same training procedure as DDPMs. In DDPMs, the generative process is defined as the reverse of a Markovian diffusion process. We construct a class of non-Markovian diffusion processes that lead to the same training objective, but whose reverse process can be much faster to sample from. We empirically demonstrate that DDIMs can produce high quality samples $10 \\times$ to $50 \\times$ faster in terms of wall-clock time compared to DDPMs, allow us to trade off computation for sample quality, and can perform semantically meaningful image interpolation directly in the latent space.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Jiaming Song", "Chenlin Meng", "Stefano Ermon" ], "externalIds": { "ArXiv": "2010.02502", "DBLP": "journals/corr/abs-2010-02502", "MAG": "3092442149", "CorpusId": 222140788 }, "url": "https://www.semanticscholar.org/paper/014576b866078524286802b1d0e18628520aa886", "referenceCount": 47, "citationCount": 4422, "influentialCitationCount": 877, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DiffWave: A Versatile Diffusion Model for Audio Synthesis", "abstract": "In this work, we propose DiffWave, a versatile Diffusion probabilistic model for conditional and unconditional Waveform generation. The model is non-autoregressive, and converts the white noise signal into structured waveform through a Markov chain with a constant number of steps at synthesis. It is efficiently trained by optimizing a variant of variational bound on the data likelihood. DiffWave produces high-fidelity audios in Different Waveform generation tasks, including neural vocoding conditioned on mel spectrogram, class-conditional generation, and unconditional generation. We demonstrate that DiffWave matches a strong WaveNet vocoder in terms of speech quality~(MOS: 4.44 versus 4.43), while synthesizing orders of magnitude faster. In particular, it significantly outperforms autoregressive and GAN-based waveform models in the challenging unconditional generation task in terms of audio quality and sample diversity from various automatic and human evaluations.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Zhifeng Kong", "Wei Ping", "Jiaji Huang", "Kexin Zhao", "Bryan Catanzaro" ], "externalIds": { "MAG": "3087665158", "DBLP": "conf/iclr/KongPHZC21", "ArXiv": "2009.09761", "CorpusId": 221818900 }, "url": "https://www.semanticscholar.org/paper/34bf13e58c7226d615afead0c0f679432502940e", "referenceCount": 56, "citationCount": 1087, "influentialCitationCount": 75, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "Denoising Diffusion Probabilistic Models", "abstract": "We present high quality image synthesis results using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics. Our best results are obtained by training on a weighted variational bound designed according to a novel connection between diffusion probabilistic models and denoising score matching with Langevin dynamics, and our models naturally admit a progressive lossy decompression scheme that can be interpreted as a generalization of autoregressive decoding. On the unconditional CIFAR10 dataset, we obtain an Inception score of 9.46 and a state-of-the-art FID score of 3.17. On 256x256 LSUN, we obtain sample quality similar to ProgressiveGAN. Our implementation is available at this https URL", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Jonathan Ho", "Ajay Jain", "P. Abbeel" ], "externalIds": { "ArXiv": "2006.11239", "MAG": "3100572490", "DBLP": "journals/corr/abs-2006-11239", "CorpusId": 219955663 }, "url": "https://www.semanticscholar.org/paper/5c126ae3421f05768d8edd97ecd44b1364e2c99a", "referenceCount": 73, "citationCount": 10778, "influentialCitationCount": 2337, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Generative Modeling by Estimating Gradients of the Data Distribution", "abstract": "We introduce a new generative model where samples are produced via Langevin dynamics using gradients of the data distribution estimated with score matching. Because gradients can be ill-defined and hard to estimate when the data resides on low-dimensional manifolds, we perturb the data with different levels of Gaussian noise, and jointly estimate the corresponding scores, i.e., the vector fields of gradients of the perturbed data distribution for all noise levels. For sampling, we propose an annealed Langevin dynamics where we use gradients corresponding to gradually decreasing noise levels as the sampling process gets closer to the data manifold. Our framework allows flexible model architectures, requires no sampling during training or the use of adversarial methods, and provides a learning objective that can be used for principled model comparisons. Our models produce samples comparable to GANs on MNIST, CelebA and CIFAR-10 datasets, achieving a new state-of-the-art inception score of 8.87 on CIFAR-10. Additionally, we demonstrate that our models learn effective representations via image inpainting experiments.", "year": 2019, "venue": "Neural Information Processing Systems", "authors": [ "Yang Song", "Stefano Ermon" ], "externalIds": { "MAG": "2971034910", "ArXiv": "1907.05600", "DBLP": "conf/nips/SongE19", "CorpusId": 196470871 }, "url": "https://www.semanticscholar.org/paper/965359b3008ab50dd04e171551220ec0e7f83aba", "referenceCount": 68, "citationCount": 2763, "influentialCitationCount": 330, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Deep Learning for Image Super-Resolution: A Survey", "abstract": "Image Super-Resolution (SR) is an important class of image processing techniqueso enhance the resolution of images and videos in computer vision. Recent years have witnessed remarkable progress of image super-resolution using deep learning techniques. This article aims to provide a comprehensive survey on recent advances of image super-resolution using deep learning approaches. In general, we can roughly group the existing studies of SR techniques into three major categories: supervised SR, unsupervised SR, and domain-specific SR. In addition, we also cover some other important issues, such as publicly available benchmark datasets and performance evaluation metrics. Finally, we conclude this survey by highlighting several future directions and open issues which should be further addressed by the community in the future.", "year": 2019, "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", "authors": [ "Zhihao Wang", "Jian Chen", "S. Hoi" ], "externalIds": { "ArXiv": "1902.06068", "MAG": "3013529009", "DBLP": "journals/corr/abs-1902-06068", "DOI": "10.1109/TPAMI.2020.2982166", "CorpusId": 62841491, "PubMed": "32217470" }, "url": "https://www.semanticscholar.org/paper/155f27879f185f1ab04107c91c2ae7cf6a910a03", "referenceCount": 234, "citationCount": 1207, "influentialCitationCount": 59, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Fréchet Audio Distance: A Metric for Evaluating Music Enhancement Algorithms", "abstract": "We propose the Fr\\'echet Audio Distance (FAD), a novel, reference-free evaluation metric for music enhancement algorithms. We demonstrate how typical evaluation metrics for speech enhancement and blind source separation can fail to accurately measure the perceived effect of a wide variety of distortions. As an alternative, we propose adapting the Fr\\'echet Inception Distance (FID) metric used to evaluate generative image models to the audio domain. FAD is validated using a wide variety of artificial distortions and is compared to the signal based metrics signal to distortion ratio (SDR), cosine distance and magnitude L2 distance. We show that, with a correlation coefficient of 0.52, FAD correlates more closely with human perception than either SDR, cosine distance or magnitude L2 distance, with correlation coefficients of 0.39, -0.15 and -0.01 respectively.", "year": 2018, "venue": "arXiv.org", "authors": [ "Kevin Kilgour", "Mauricio Zuluaga", "Dominik Roblek", "Matthew Sharifi" ], "externalIds": { "MAG": "2905488776", "ArXiv": "1812.08466", "DBLP": "journals/corr/abs-1812-08466", "CorpusId": 56517486 }, "url": "https://www.semanticscholar.org/paper/4766415f046760e16eb09dd7d04e3789d732dec1", "referenceCount": 31, "citationCount": 127, "influentialCitationCount": 27, "isOpenAccess": false, "fieldsOfStudy": [ "Engineering", "Computer Science", "Mathematics" ] }, { "title": "The Unreasonable Effectiveness of Deep Features as a Perceptual Metric", "abstract": "While it is nearly effortless for humans to quickly assess the perceptual similarity between two images, the underlying processes are thought to be quite complex. Despite this, the most widely used perceptual metrics today, such as PSNR and SSIM, are simple, shallow functions, and fail to account for many nuances of human perception. Recently, the deep learning community has found that features of the VGG network trained on ImageNet classification has been remarkably useful as a training loss for image synthesis. But how perceptual are these so-called \"perceptual losses\"? What elements are critical for their success? To answer these questions, we introduce a new dataset of human perceptual similarity judgments. We systematically evaluate deep features across different architectures and tasks and compare them with classic metrics. We find that deep features outperform all previous metrics by large margins on our dataset. More surprisingly, this result is not restricted to ImageNet-trained VGG features, but holds across different deep architectures and levels of supervision (supervised, self-supervised, or even unsupervised). Our results suggest that perceptual similarity is an emergent property shared across deep visual representations.", "year": 2018, "venue": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition", "authors": [ "Richard Zhang", "Phillip Isola", "Alexei A. Efros", "Eli Shechtman", "Oliver Wang" ], "externalIds": { "ArXiv": "1801.03924", "MAG": "2783879794", "DBLP": "journals/corr/abs-1801-03924", "DOI": "10.1109/CVPR.2018.00068", "CorpusId": 4766599 }, "url": "https://www.semanticscholar.org/paper/c468bbde6a22d961829e1970e6ad5795e05418d1", "referenceCount": 71, "citationCount": 8862, "influentialCitationCount": 1545, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Demystifying MMD GANs", "abstract": "We investigate the training and performance of generative adversarial networks using the Maximum Mean Discrepancy (MMD) as critic, termed MMD GANs. As our main theoretical contribution, we clarify the situation with bias in GAN loss functions raised by recent work: we show that gradient estimators used in the optimization process for both MMD GANs and Wasserstein GANs are unbiased, but learning a discriminator based on samples leads to biased gradients for the generator parameters. We also discuss the issue of kernel choice for the MMD critic, and characterize the kernel corresponding to the energy distance used for the Cramer GAN critic. Being an integral probability metric, the MMD benefits from training strategies recently developed for Wasserstein GANs. In experiments, the MMD GAN is able to employ a smaller critic network than the Wasserstein GAN, resulting in a simpler and faster-training algorithm with matching performance. We also propose an improved measure of GAN convergence, the Kernel Inception Distance, and show how to use it to dynamically adapt learning rates during GAN training.", "year": 2018, "venue": "International Conference on Learning Representations", "authors": [ "Mikolaj Binkowski", "Danica J. Sutherland", "M. Arbel", "A. Gretton" ], "externalIds": { "MAG": "2951480694", "DBLP": "conf/iclr/BinkowskiSAG18", "ArXiv": "1801.01401", "CorpusId": 3531856 }, "url": "https://www.semanticscholar.org/paper/9723066a5587e6267d8abfd7feefd0637a5a211c", "referenceCount": 69, "citationCount": 1192, "influentialCitationCount": 235, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Automatic differentiation in PyTorch", "abstract": "In this article, we describe an automatic differentiation module of PyTorch — a library designed to enable rapid research on machine learning models. It builds upon a few projects, most notably Lua Torch, Chainer, and HIPS Autograd [4], and provides a high performance environment with easy access to automatic differentiation of models executed on different devices (CPU and GPU). To make prototyping easier, PyTorch does not follow the symbolic approach used in many other deep learning frameworks, but focuses on differentiation of purely imperative programs, with a focus on extensibility and low overhead. Note that this preprint is a draft of certain sections from an upcoming paper covering all PyTorch features.", "year": 2017, "venue": "", "authors": [ "Adam Paszke", "Sam Gross", "Soumith Chintala", "Gregory Chanan", "E. Yang", "Zach DeVito", "Zeming Lin", "Alban Desmaison", "L. Antiga", "Adam Lerer" ], "externalIds": { "MAG": "2899771611", "CorpusId": 40027675 }, "url": "https://www.semanticscholar.org/paper/b36a5bb1707bb9c70025294b3a310138aae8327a", "referenceCount": 6, "citationCount": 14233, "influentialCitationCount": 1510, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Progressive Growing of GANs for Improved Quality, Stability, and Variation", "abstract": "We describe a new training methodology for generative adversarial networks. The key idea is to grow both the generator and discriminator progressively: starting from a low resolution, we add new layers that model increasingly fine details as training progresses. This both speeds the training up and greatly stabilizes it, allowing us to produce images of unprecedented quality, e.g., CelebA images at 1024^2. We also propose a simple way to increase the variation in generated images, and achieve a record inception score of 8.80 in unsupervised CIFAR10. Additionally, we describe several implementation details that are important for discouraging unhealthy competition between the generator and discriminator. Finally, we suggest a new metric for evaluating GAN results, both in terms of image quality and variation. As an additional contribution, we construct a higher-quality version of the CelebA dataset.", "year": 2017, "venue": "International Conference on Learning Representations", "authors": [ "Tero Karras", "Timo Aila", "S. Laine", "J. Lehtinen" ], "externalIds": { "MAG": "2766527293", "DBLP": "conf/iclr/KarrasALL18", "ArXiv": "1710.10196", "CorpusId": 3568073 }, "url": "https://www.semanticscholar.org/paper/744fe47157477235032f7bb3777800f9f2f45e52", "referenceCount": 66, "citationCount": 6666, "influentialCitationCount": 1046, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "GANs Trained by a Two Time-Scale Update Rule Converge to a Local Nash Equilibrium", "abstract": "Generative Adversarial Networks (GANs) excel at creating realistic images with complex models for which maximum likelihood is infeasible. However, the convergence of GAN training has still not been proved. We propose a two time-scale update rule (TTUR) for training GANs with stochastic gradient descent on arbitrary GAN loss functions. TTUR has an individual learning rate for both the discriminator and the generator. Using the theory of stochastic approximation, we prove that the TTUR converges under mild assumptions to a stationary local Nash equilibrium. The convergence carries over to the popular Adam optimization, for which we prove that it follows the dynamics of a heavy ball with friction and thus prefers flat minima in the objective landscape. For the evaluation of the performance of GANs at image generation, we introduce the \"Frechet Inception Distance\" (FID) which captures the similarity of generated images to real ones better than the Inception Score. In experiments, TTUR improves learning for DCGANs and Improved Wasserstein GANs (WGAN-GP) outperforming conventional GAN training on CelebA, CIFAR-10, SVHN, LSUN Bedrooms, and the One Billion Word Benchmark.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "M. Heusel", "Hubert Ramsauer", "Thomas Unterthiner", "Bernhard Nessler", "Sepp Hochreiter" ], "externalIds": { "MAG": "2963981733", "DBLP": "conf/nips/HeuselRUNH17", "CorpusId": 326772 }, "url": "https://www.semanticscholar.org/paper/231af7dc01a166cac3b5b01ca05778238f796e41", "referenceCount": 62, "citationCount": 10842, "influentialCitationCount": 2301, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Perceptual Losses for Real-Time Style Transfer and Super-Resolution", "abstract": null, "year": 2016, "venue": "European Conference on Computer Vision", "authors": [ "Justin Johnson", "Alexandre Alahi", "Li Fei-Fei" ], "externalIds": { "MAG": "2331128040", "DBLP": "journals/corr/JohnsonAL16", "ArXiv": "1603.08155", "DOI": "10.1007/978-3-319-46475-6_43", "CorpusId": 980236 }, "url": "https://www.semanticscholar.org/paper/915c4bb289b3642489e904c65a47fa56efb60658", "referenceCount": 71, "citationCount": 9478, "influentialCitationCount": 734, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep Residual Learning for Image Recognition", "abstract": "Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. We provide comprehensive empirical evidence showing that these residual networks are easier to optimize, and can gain accuracy from considerably increased depth. On the ImageNet dataset we evaluate residual nets with a depth of up to 152 layers - 8× deeper than VGG nets [40] but still having lower complexity. An ensemble of these residual nets achieves 3.57% error on the ImageNet test set. This result won the 1st place on the ILSVRC 2015 classification task. We also present analysis on CIFAR-10 with 100 and 1000 layers. The depth of representations is of central importance for many visual recognition tasks. Solely due to our extremely deep representations, we obtain a 28% relative improvement on the COCO object detection dataset. Deep residual nets are foundations of our submissions to ILSVRC & COCO 2015 competitions1, where we also won the 1st places on the tasks of ImageNet detection, ImageNet localization, COCO detection, and COCO segmentation.", "year": 2015, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Kaiming He", "X. Zhang", "Shaoqing Ren", "Jian Sun" ], "externalIds": { "DBLP": "conf/cvpr/HeZRS16", "MAG": "2949650786", "ArXiv": "1512.03385", "DOI": "10.1109/cvpr.2016.90", "CorpusId": 206594692 }, "url": "https://www.semanticscholar.org/paper/2c03df8b48bf3fa39054345bafabfeff15bfd11d", "referenceCount": 53, "citationCount": 172713, "influentialCitationCount": 28229, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "U-Net: Convolutional Networks for Biomedical Image Segmentation", "abstract": null, "year": 2015, "venue": "International Conference on Medical Image Computing and Computer-Assisted Intervention", "authors": [ "O. Ronneberger", "P. Fischer", "T. Brox" ], "externalIds": { "ArXiv": "1505.04597", "MAG": "1901129140", "DBLP": "journals/corr/RonnebergerFB15", "DOI": "10.1007/978-3-319-24574-4_28", "CorpusId": 3719281 }, "url": "https://www.semanticscholar.org/paper/6364fdaa0a0eccd823a779fcdd489173f938e91a", "referenceCount": 18, "citationCount": 66494, "influentialCitationCount": 9274, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Large-scale Classification of Fine-Art Paintings: Learning The Right Metric on The Right Feature", "abstract": "In the past few years, the number of fine-art collections that are digitized and publicly available has been growing rapidly. With the availability of such large collections of digitized artworks comes the need to develop multimedia systems to archive and retrieve this pool of data. Measuring the visual similarity between artistic items is an essential step for such multimedia systems, which can benefit more high-level multimedia tasks. In order to model this similarity between paintings, we should extract the appropriate visual features for paintings and find out the best approach to learn the similarity metric based on these features. We investigate a comprehensive list of visual features and metric learning approaches to learn an optimized similarity measure between paintings. We develop a machine that is able to make aesthetic-related semantic-level judgments, such as predicting a painting's style, genre, and artist, as well as providing similarity measures optimized based on the knowledge available in the domain of art historical interpretation. Our experiments show the value of using this similarity measure for the aforementioned prediction tasks.", "year": 2015, "venue": "arXiv.org", "authors": [ "Babak Saleh", "A. Elgammal" ], "externalIds": { "MAG": "2964332173", "ArXiv": "1505.00855", "DBLP": "journals/corr/SalehE15", "DOI": "10.11588/DAH.2016.2.23376", "CorpusId": 14168099 }, "url": "https://www.semanticscholar.org/paper/51fa7c573fcc98b05c2d15685d64463c40d57cff", "referenceCount": 36, "citationCount": 243, "influentialCitationCount": 37, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep Unsupervised Learning using Nonequilibrium Thermodynamics", "abstract": "A central problem in machine learning involves modeling complex data-sets using highly flexible families of probability distributions in which learning, sampling, inference, and evaluation are still analytically or computationally tractable. Here, we develop an approach that simultaneously achieves both flexibility and tractability. The essential idea, inspired by non-equilibrium statistical physics, is to systematically and slowly destroy structure in a data distribution through an iterative forward diffusion process. We then learn a reverse diffusion process that restores structure in data, yielding a highly flexible and tractable generative model of the data. This approach allows us to rapidly learn, sample from, and evaluate probabilities in deep generative models with thousands of layers or time steps, as well as to compute conditional and posterior probabilities under the learned model. We additionally release an open source reference implementation of the algorithm.", "year": 2015, "venue": "International Conference on Machine Learning", "authors": [ "Jascha Narain Sohl-Dickstein", "Eric A. Weiss", "Niru Maheswaranathan", "S. Ganguli" ], "externalIds": { "MAG": "2129069237", "DBLP": "journals/corr/Sohl-DicksteinW15", "ArXiv": "1503.03585", "CorpusId": 14888175 }, "url": "https://www.semanticscholar.org/paper/2dcef55a07f8607a819c21fe84131ea269cc2e3c", "referenceCount": 60, "citationCount": 4757, "influentialCitationCount": 356, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "ImageNet Large Scale Visual Recognition Challenge", "abstract": null, "year": 2014, "venue": "International Journal of Computer Vision", "authors": [ "Olga Russakovsky", "Jia Deng", "Hao Su", "J. Krause", "S. Satheesh", "Sean Ma", "Zhiheng Huang", "A. Karpathy", "A. Khosla", "Michael S. Bernstein", "A. Berg", "Li Fei-Fei" ], "externalIds": { "ArXiv": "1409.0575", "DBLP": "journals/corr/RussakovskyDSKSMHKKBBF14", "MAG": "2546241758", "DOI": "10.1007/s11263-015-0816-y", "CorpusId": 2930547 }, "url": "https://www.semanticscholar.org/paper/e74f9b7f8eec6ba4704c206b93bc8079af3da4bd", "referenceCount": 125, "citationCount": 36698, "influentialCitationCount": 4556, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Quantum chemistry structures and properties of 134 kilo molecules", "abstract": null, "year": 2014, "venue": "Scientific Data", "authors": [ "R. Ramakrishnan", "Pavlo O. Dral", "Pavlo O. Dral", "M. Rupp", "O. V. Lilienfeld" ], "externalIds": { "MAG": "2080635178", "PubMedCentral": "4322582", "DOI": "10.1038/sdata.2014.22", "CorpusId": 15367821, "PubMed": "25977779" }, "url": "https://www.semanticscholar.org/paper/89655dc3c3a794cb25e055aed79424c66301d70f", "referenceCount": 36, "citationCount": 1514, "influentialCitationCount": 159, "isOpenAccess": true, "fieldsOfStudy": [ "Physics", "Medicine" ] }, { "title": "Part-Based R-CNNs for Fine-Grained Category Detection", "abstract": null, "year": 2014, "venue": "European Conference on Computer Vision", "authors": [ "Ning Zhang", "Jeff Donahue", "Ross B. Girshick", "Trevor Darrell" ], "externalIds": { "MAG": "2949286765", "DBLP": "conf/eccv/ZhangDGD14", "ArXiv": "1407.3867", "DOI": "10.1007/978-3-319-10590-1_54", "CorpusId": 11710343 }, "url": "https://www.semanticscholar.org/paper/98bb60748eb8ef7a671cdd22faa87e377fd13060", "referenceCount": 50, "citationCount": 1177, "influentialCitationCount": 109, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Super-resolution: a comprehensive survey", "abstract": null, "year": 2014, "venue": "Machine Vision and Applications", "authors": [ "Kamal Nasrollahi", "T. Moeslund" ], "externalIds": { "DBLP": "journals/mva/NasrollahiM14", "MAG": "1973788353", "DOI": "10.1007/s00138-014-0623-4", "CorpusId": 253632927 }, "url": "https://www.semanticscholar.org/paper/1ef4e8b10fd29c29d452adca1c06a5c3943271c4", "referenceCount": 628, "citationCount": 607, "influentialCitationCount": 19, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Tweedie’s Formula and Selection Bias", "abstract": "We suppose that the statistician observes some large number of estimates zi, each with its own unobserved expectation parameter μi. The largest few of the zi’s are likely to substantially overestimate their corresponding μi’s, this being an example of selection bias, or regression to the mean. Tweedie’s formula, first reported by Robbins in 1956, offers a simple empirical Bayes approach for correcting selection bias. This article investigates its merits and limitations. In addition to the methodology, Tweedie’s formula raises more general questions concerning empirical Bayes theory, discussed here as “relevance” and “empirical Bayes information.” There is a close connection between applications of the formula and James–Stein estimation.", "year": 2011, "venue": "Journal of the American Statistical Association", "authors": [ "B. Efron" ], "externalIds": { "MAG": "2015810892", "DOI": "10.1198/jasa.2011.tm11181", "CorpusId": 23284154, "PubMed": "22505788" }, "url": "https://www.semanticscholar.org/paper/dd63a63ef76d23c898f122f0f757ac307b836c19", "referenceCount": 27, "citationCount": 394, "influentialCitationCount": 67, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Mathematics" ] }, { "title": "Understanding and evaluating blind deconvolution algorithms", "abstract": "Blind deconvolution is the recovery of a sharp version of a blurred image when the blur kernel is unknown. Recent algorithms have afforded dramatic progress, yet many aspects of the problem remain challenging and hard to understand. The goal of this paper is to analyze and evaluate recent blind deconvolution algorithms both theoretically and experimentally. We explain the previously reported failure of the naive MAP approach by demonstrating that it mostly favors no-blur explanations. On the other hand we show that since the kernel size is often smaller than the image size a MAP estimation of the kernel alone can be well constrained and accurately recover the true blur. The plethora of recent deconvolution techniques makes an experimental evaluation on ground-truth data important. We have collected blur data with ground truth and compared recent algorithms under equal settings. Additionally, our data demonstrates that the shift-invariant blur assumption made by most algorithms is often violated.", "year": 2009, "venue": "2009 IEEE Conference on Computer Vision and Pattern Recognition", "authors": [ "Anat Levin", "Yair Weiss", "F. Durand", "W. Freeman" ], "externalIds": { "MAG": "2138204001", "DBLP": "conf/cvpr/LevinWDF09", "DOI": "10.1109/CVPR.2009.5206815", "CorpusId": 1610143 }, "url": "https://www.semanticscholar.org/paper/262aead5e5db0842807597cc0a81c6e5c265a9a8", "referenceCount": 27, "citationCount": 1282, "influentialCitationCount": 218, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Asirra: a CAPTCHA that exploits interest-aligned manual image categorization", "abstract": "We present Asirra (Figure 1), a CAPTCHA that asks users to identify cats out of a set of 12 photographs of both cats and dogs. Asirra is easy for users; user studies indicate it can be solved by humans 99.6% of the time in under 30 seconds. Barring a major advance in machine vision, we expect computers will have no better than a 1/54,000 chance of solving it. Asirra’s image database is provided by a novel, mutually beneficial partnership with Petfinder.com. In exchange for the use of their three million images, we display an “adopt me” link beneath each one, promoting Petfinder’s primary mission of finding homes for homeless animals. We describe the design of Asirra, discuss threats to its security, and report early deployment experiences. We also describe two novel algorithms for amplifying the skill gap between humans and computers that can be used on many existing CAPTCHAs.", "year": 2007, "venue": "Conference on Computer and Communications Security", "authors": [ "J. Elson", "J. Douceur", "Jon Howell", "J. Saul" ], "externalIds": { "DBLP": "conf/ccs/ElsonDHS07", "MAG": "2156749117", "DOI": "10.1145/1315245.1315291", "CorpusId": 8348576 }, "url": "https://www.semanticscholar.org/paper/8c10a7d51d8c33a3daf2c39e16f2e11bf51de55e", "referenceCount": 15, "citationCount": 560, "influentialCitationCount": 76, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Spurious Correlation: A Causal Interpretation*", "abstract": "Abstract * I am indebted to Richard M. Cyert, Paul F. Lazarsfeld, Roy Radner, and T. C Koopmans for valuable comments on earlier drafts of this paper. To test whether a correlation between two variables is genuine or spurious, additional variables and equations must be introduced, and sufficient assumptions must be made to identify the parameters of this wider system. If the two original variables are causally related in the wider system, the correlation is “genuine.”", "year": 1954, "venue": "", "authors": [ "H. Simon" ], "externalIds": { "MAG": "2076332451", "DOI": "10.1080/01621459.1954.10483515", "CorpusId": 123603975 }, "url": "https://www.semanticscholar.org/paper/179ef2ab12e58c0fcf3355874ca1bb5ff5f9b211", "referenceCount": 10, "citationCount": 507, "influentialCitationCount": 16, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Training-free Multi-objective Diffusion Model for 3D Molecule Generation", "abstract": null, "year": 2024, "venue": "International Conference on Learning Representations", "authors": [ "Xu Han", "Caihua Shan", "Yifei Shen", "Can Xu", "Han Yang", "Xiang Li", "Dongsheng Li" ], "externalIds": { "DBLP": "conf/iclr/HanSSXYL024", "CorpusId": 271745991 }, "url": "https://www.semanticscholar.org/paper/f8337024e0de4902fe559f70196f66002087df51", "referenceCount": 0, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Loss-Guided Diffusion Models for Plug-and-Play Controllable Generation", "abstract": "We consider guiding denoising diffusion models with general differentiable loss functions in a plug-and-play fashion, enabling controllable generation without additional training. This paradigm, termed Loss-Guided Diffusion (LGD), can easily be integrated into all diffusion models and leverage various efficient samplers. Despite the benefits, the resulting guidance term is, unfortunately, an intractable integral and needs to be approximated. Existing methods compute the guidance term based on a point estimate. However, we show that such approaches have significant errors over the scale of the approximations. To address this issue, we propose a Monte Carlo method that uses multiple samples from a suitable distribution to reduce bias. Our method is effective in various synthetic and real-world settings, including image super-resolution, text or label-conditional image generation, and controllable motion synthesis. Notably, we show how our method can be applied to control a pretrained motion diffusion model to follow certain paths and avoid obstacles that are proven challenging to prior methods.", "year": 2023, "venue": "International Conference on Machine Learning", "authors": [ "Jiaming Song", "Qinsheng Zhang", "Hongxu Yin", "M. Mardani", "Ming-Yu Liu", "J. Kautz", "Yongxin Chen", "Arash Vahdat" ], "externalIds": { "DBLP": "conf/icml/SongZYM0KCV23", "CorpusId": 260957043 }, "url": "https://www.semanticscholar.org/paper/464df3e306a5b9a6a7131a582637aaef3de058f2", "referenceCount": 57, "citationCount": 49, "influentialCitationCount": 10, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Large-scale celebfaces attributes (celeba)", "abstract": null, "year": 2018, "venue": "dataset", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Learning Multiple Layers of Features from Tiny Images", "abstract": "Groups at MIT and NYU have collected a dataset of millions of tiny colour images from the web. It is, in principle, an excellent dataset for unsupervised training of deep generative models, but previous researchers who have tried this have found it dicult to learn a good set of lters from the images. We show how to train a multi-layer generative model that learns to extract meaningful features which resemble those found in the human visual cortex. Using a novel parallelization algorithm to distribute the work among multiple machines connected on a network, we show how training such a model can be done in reasonable time. A second problematic aspect of the tiny images dataset is that there are no reliable class labels which makes it hard to use for object recognition experiments. We created two sets of reliable labels. The CIFAR-10 set has 6000 examples of each of 10 classes and the CIFAR-100 set has 600 examples of each of 100 non-overlapping classes. Using these labels, we show that object recognition is signicantly improved by pre-training a layer of features on a large set of unlabeled tiny images.", "year": 2009, "venue": "", "authors": [ "A. Krizhevsky" ], "externalIds": { "MAG": "2945315962", "CorpusId": 18268744 }, "url": "https://www.semanticscholar.org/paper/5d90f06bb70a0a3dced62413346235c02b1aa086", "referenceCount": 15, "citationCount": 31347, "influentialCitationCount": 7621, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Ornstein–Uhlenbeck Processes and Extensions", "abstract": null, "year": 2009, "venue": "", "authors": [ "R. Maller", "Gernot Müller", "Alexander Szimayer" ], "externalIds": { "MAG": "156433631", "DOI": "10.1007/978-3-540-71297-8_18", "CorpusId": 10631156 }, "url": "https://www.semanticscholar.org/paper/4d22f17677442019e2a8e908265d51df03658481", "referenceCount": 61, "citationCount": 72, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Additional file 6", "abstract": "Presentation of network structure Figure 1 shows a network graph comparing anodal, cathodal, and dual tDCS and their corresponding stimulation location with their control interventions for improving functional communication after stroke. Figure 2 shows a network graph comparing anodal, cathodal, and dual tDCS and their corresponding stimulation location with their control interventions for improving language function (naming nouns) after stroke and Figure 3 for improving naming verbs. Figure 4 shows a network graph comparing tDCS and its corresponding stimulation location with their control interventions regarding safety (measured by the number of dropouts and adverse events).", "year": 2009, "venue": "", "authors": [ "Dimas Yusuf", "S. Butland", "Magdalena I. Swanson", "Eugene Bolotin", "Amy Ticoll", "Warren A. Cheung", "X. Zhang", "Christopher Td Dickman", "Debra L. Fulton", "Jonathan S. Lim", "Jake Schnabl", "Oscar Hp Ramos", "M. Vasseur-Cognet", "C. N. D. Leeuw", "Elizabeth M. Simpson", "G. Ryffel", "Eric Lam", "Ralf Kist", "M. Wilson", "Raquel Marco-Ferreres", "Jan J. Brosens", "Leonardo Beccari", "Paola Bovolenta", "B. Benayoun", "Lara J. Monteiro", "H. D. Schwenen", "Lars Grøntved", "Elizabeth D. Wederell", "Susanne Mandrup", "R. Veitia", "Harini Chakravarthy", "P. Hoodless", "M. Mancarelli", "B. Torbett", "A. Banham", "Sekhar P. Reddy", "Rebecca Cullum", "Michaela Liedtke", "M. Tschan", "M. Vaz", "Angie Rizzino", "M. Zannini", "S. Frietze", "Peggy J. Farnham", "A. Eijkelenboom", "Philip J. Brown", "D. Laperrière", "D. Leprince", "T. Cristofaro", "K. Prince", "M. Putker", "L. Peso", "G. Camenisch", "Roland H. Wenger", "M. Mikuła", "Marieke Rozendaal", "Sylvie Mader", "Jerzy Ostrowski", "S. Rhodes", "Capucine Van Rechem", "Gaylor Boulay", "S. Olechnowicz", "M. Breslin", "Michael S. Lan", "Kyster K. Nanan", "Michael Wegner", "J. Hou", "R. Mullen", "S. C. Colvin", "Peter J. Noy", "Carol F. Webb", "M. Witek", "Scott Ferrell", "Juliet M. Daniel", "Jason Y. Park", "Scott A. Waldman", "Dan J. Peet", "Michael Taggart", "Padma-Sheela Jayaraman", "J. Karrich", "Bianca Blom", "F. Vesuna", "H. O’Geen", "Yunfu Sun", "R. Gronostajski", "M. Woodcroft", "M. Hough", "Edwin Chen", "G. N. E. Finner", "M. Karolczak-Bayatti", "J. Bailey", "Oliver Hankinson", "Venu Raman", "David P. LeBrun", "Shyam Biswal", "C. Harvey", "Jason P. DeBruyne", "J. Hogenesch", "Robert F. Hevner", "Christophe Héligon", "Xin M Luo", "M. Blank", "K. Millen", "David S. Sharlin", "Douglas Forrest", "K. Dahlman-Wright", "Chunyan Zhao", "Y. Mishima", "S. K. Sinha", "R. Chakrabarti", "Elodie Portales-Casamar", "F. Sladek", "Philip Bradley", "W. Wasserman" ], "externalIds": { "MAG": "2910924531", "CorpusId": 2833697 }, "url": "https://www.semanticscholar.org/paper/09ef66adbc7cfa3356f5996c8bc01ca2506d379b", "referenceCount": 17, "citationCount": 2203, "influentialCitationCount": 259, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Biology" ] }, { "title": "A Tutorial on Energy-Based Learning", "abstract": "Energy-Based Models (EBMs) capture dependencies between variables by associating a scalar energy to each configuration of the variab les. Inference consists in clamping the value of observed variables and finding config urations of the remaining variables that minimize the energy. Learning consists in finding an energy function in which observed configurations of the variables a re given lower energies than unobserved ones. The EBM approach provides a common theoretical framework for many learning models, including traditional discr iminative and generative approaches, as well as graph-transformer networks, co nditional random fields, maximum margin Markov networks, and several manifold learning methods. Probabilistic models must be properly normalized, which sometimes requires evaluating intractable integrals over the space of all poss ible variable configurations. Since EBMs have no requirement for proper normalization, this problem is naturally circumvented. EBMs can be viewed as a form of non-probabilistic factor graphs, and they provide considerably more flexibility in th e design of architectures and training criteria than probabilistic approaches .", "year": 2006, "venue": "", "authors": [ "Yann LeCun", "S. Chopra", "R. Hadsell", "Aurelio Ranzato", "Fu Jie Huang" ], "externalIds": { "MAG": "2161914416", "CorpusId": 8531544 }, "url": "https://www.semanticscholar.org/paper/7fc604e1a3e45cd2d2742f96d62741930a363efa", "referenceCount": 73, "citationCount": 1345, "influentialCitationCount": 118, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Monte Carlo Sampling Methods", "abstract": null, "year": 2003, "venue": "", "authors": [ "A. Shapiro" ], "externalIds": { "MAG": "1490324987", "DOI": "10.1016/S0927-0507(03)10006-0", "CorpusId": 117549937 }, "url": "https://www.semanticscholar.org/paper/5e667bc87de82a007a94ed1a36ad2d774d12037c", "referenceCount": 46, "citationCount": 1118, "influentialCitationCount": 115, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Dynamic Time Warping", "abstract": null, "year": 1984, "venue": "", "authors": [ "A. Bundy", "L. Wallen" ], "externalIds": { "MAG": "2337127520", "DOI": "10.1007/978-3-642-96868-6_63", "CorpusId": 58653599 }, "url": "https://www.semanticscholar.org/paper/dd8d13c142859dbf27546582c3391b62e06a6e62", "referenceCount": 1, "citationCount": 1054, "influentialCitationCount": 92, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A bound for the error in the normal approximation to the distribution of a sum of dependent random variables", "abstract": "This paper has two aims, one fairly concrete and the other more abstract. In Section 3, bounds are obtained under certain conditions for the departure of the distribution of the sum of n terms of a stationary random sequence from a normal distribution. These bounds are derived from a more abstract normal approximation theorem proved in Section 2. I regret that, in order to complete this paper in time for publication, I have been forced to submit it with many defects remaining. In particular the proof of the concrete results of Section 3 is somewhat incomplete. A well known theorem of A. Berry [1] and C-G. Esseen [2] asserts that if X1, X2, . is a sequence of independent identically distributed random variables with EXi = 0, EXV = 1, and ,B = EIXij3 < oo. then the cumulative distribution function of (1//;n) Yi=l Xi differs from the unit normal distribution by at most Kf3/ n where K is a constant, which can be taken to be 2. It seems likely, but has never been proved and will not be proved here, that a similar result holds for stationary sequences in which the dependence falls off sufficiently rapidly and the variance of(1//;n) X1.1 Xi approaches a positive constant. I. Ibragimov and Yu. Linnik ([3], pp. 423-432) prove that, under these conditions, the limiting distribution of (1/ /n) E Xi is normal with mean 0 and a certain variance G2 Perhaps the best published results on bounds for the error are those of Phillip [5]. who shows that if in addition the Xi are bounded, with exponentially decreasing dependence, then the discrepancy is roughly of the order of n-114 In Corollary 3.2 of the present paper it is proved that under these conditions the discrepancy is of the order of n 1/2(log n)2. Actually the assumption of boundedness is weakened to the finiteness of eighth moments. In Corollary 3.1 it is proved that if the assumption of exponential decrease of dependence is strengthened tormdependence, the error in the normal approximation is of the order of n1/2 The abstract normal approximation theorem of Section 2 is elementary in the sense that it uses only the basic properties of conditional expectation and the elements of analysis, including the solution of a first order linear differential equation. It is also direct, in the sense that the expectation of a fairly arbitrary", "year": 1972, "venue": "", "authors": [ "C. Stein" ], "externalIds": { "MAG": "1873595945", "CorpusId": 53492374 }, "url": "https://www.semanticscholar.org/paper/c7bf61f72cce609ce7b754e570fe1ec05ca3827b", "referenceCount": 5, "citationCount": 1263, "influentialCitationCount": 148, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "An Empirical Bayes Approach to Statistics", "abstract": null, "year": 1956, "venue": "", "authors": [ "H. Robbins" ], "externalIds": { "MAG": "1590636096", "DOI": "10.1007/978-1-4612-0919-5_26", "CorpusId": 26161481 }, "url": "https://www.semanticscholar.org/paper/2c0cdd08a8d3cb834da33515037eafa4bdb5faff", "referenceCount": 7, "citationCount": 1151, "influentialCitationCount": 101, "isOpenAccess": false, "fieldsOfStudy": [ "Physics" ] }, { "title": "A blur ( x ) should be similar to the ground truth noisy mel spectrogram y . Evaluation metrics", "abstract": null, "year": null, "venue": "our experiments, we evaluate each guidance method on a set of 256 samples generated by Audio-diffusion. We apply the Dynamic time warping (DTW) [44] to assess the guidance validity, and Fréchet Audio Distance (FAD)", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "where x is the input molecule and c is the target property value", "abstract": null, "year": null, "venue": "Evaluation metrics", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Learning Diverse Robot Striking Motions with Diffusion Models and Kinematically Constrained Gradient Guidance": { "paper_title": "Learning Diverse Robot Striking Motions with Diffusion Models and Kinematically Constrained Gradient Guidance", "arxiv_id": "2409.15528v1", "keyword": "diffusion model", "authors": [ "Kin Man Lee", "Sean Ye", "Qingyu Xiao", "Zixuan Wu", "Zulfiqar Zaidi", "David B. D'Ambrosio", "Pannag R. Sanketi", "Matthew Gombolay" ], "references": [ { "title": "Achieving Human Level Competitive Robot Table Tennis", "abstract": "Achieving human-level speed and performance on real world tasks is a north star for the robotics research community. This work takes a step towards that goal and presents the first learned robot agent that reaches amateur human-level performance in competitive table tennis. Table tennis is a physically demanding sport which requires human players to undergo years of training to achieve an advanced level of proficiency. In this paper, we contribute (1) a hierarchical and modular policy architecture consisting of (i) low level controllers with their detailed skill descriptors which model the agent's capabilities and help to bridge the sim-to-real gap and (ii) a high level controller that chooses the low level skills, (2) techniques for enabling zero-shot sim-to-real including an iterative approach to defining the task distribution that is grounded in the real-world and defines an automatic curriculum, and (3) real time adaptation to unseen opponents. Policy performance was assessed through 29 robot vs. human matches of which the robot won 45% (13/29). All humans were unseen players and their skill level varied from beginner to tournament level. Whilst the robot lost all matches vs. the most advanced players it won 100% matches vs. beginners and 55% matches vs. intermediate players, demonstrating solidly amateur human-level performance. Videos of the matches can be viewed at https://sites.google.com/view/competitive-robot-table-tennis", "year": 2024, "venue": "arXiv.org", "authors": [ "David B. D'Ambrosio", "Saminda Abeyruwan", "L. Graesser", "Atil Iscen", "H. B. Amor", "Alex Bewley", "Barney J. Reed", "Krista Reymann", "Leila Takayama", "Yuval Tassa", "Krzysztof Choromanski", "Erwin Coumans", "Deepali Jain", "N. Jaitly", "Natasha Jaques", "Satoshi Kataoka", "Yuheng Kuang", "N. Lazic", "R. Mahjourian", "Sherry Moore", "Kenneth Oslund", "Anish Shankar", "Vikas Sindhwani", "Vincent Vanhoucke", "Grace Vesom", "Peng Xu", "Pannag R. Sanketi" ], "externalIds": { "ArXiv": "2408.03906", "DBLP": "journals/corr/abs-2408-03906", "DOI": "10.48550/arXiv.2408.03906", "CorpusId": 271745252 }, "url": "https://www.semanticscholar.org/paper/a9dc0f71e67e3af597d8e4b4e9c35fb30778aaba", "referenceCount": 100, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Diffusion-Reinforcement Learning Hierarchical Motion Planning in Adversarial Multi-agent Games", "abstract": "Reinforcement Learning- (RL-)based motion planning has recently shown the potential to outperform traditional approaches from autonomous navigation to robot manipulation. In this work, we focus on a motion planning task for an evasive target in a partially observable multi-agent adversarial pursuit-evasion games (PEG). These pursuit-evasion problems are relevant to various applications, such as search and rescue operations and surveillance robots, where robots must effectively plan their actions to gather intelligence or accomplish mission tasks while avoiding detection or capture themselves. We propose a hierarchical architecture that integrates a high-level diffusion model to plan global paths responsive to environment data while a low-level RL algorithm reasons about evasive versus global path-following behavior. Our approach outperforms baselines by 51.2% by leveraging the diffusion model to guide the RL algorithm for more efficient exploration and improves the explanability and predictability.", "year": 2024, "venue": "arXiv.org", "authors": [ "Zixuan Wu", "Sean Ye", "Manisha Natarajan", "M. Gombolay" ], "externalIds": { "ArXiv": "2403.10794", "DBLP": "journals/corr/abs-2403-10794", "DOI": "10.48550/arXiv.2403.10794", "CorpusId": 268512706 }, "url": "https://www.semanticscholar.org/paper/42173bbe136835c25365b4d63c3de151d1f221be", "referenceCount": 51, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Multi-Camera Asynchronous Ball Localization and Trajectory Prediction with Factor Graphs and Human Poses", "abstract": "The rapid and precise localization and prediction of a ball are critical for developing agile robots in ball sports, particularly in sports like tennis characterized by high-speed ball movements and powerful spins. The Magnus effect induced by spin adds complexity to trajectory prediction during flight and bounce dynamics upon contact with the ground. In this study, we introduce an innovative approach that combines a multi-camera system with factor graphs for real-time and asynchronous 3D tennis ball localization. Additionally, we estimate hidden states like velocity and spin for trajectory prediction. Furthermore, to enhance spin inference early in the ball’s flight, where limited observations are available, we integrate human pose data using a temporal convolutional network (TCN) to compute spin priors within the factor graph. This refinement provides more accurate spin priors at the beginning of the factor graph, leading to improved early-stage hidden state inference for prediction. Our results show the trained TCN can predict the spin priors with RMSE of 5.27 Hz. Integrating TCN into the factor graph reduces the prediction error of landing positions by over 63.6% compared to a baseline method that utilized an adaptive extended Kalman filter.", "year": 2024, "venue": "IEEE International Conference on Robotics and Automation", "authors": [ "Qingyu Xiao", "Z. Zaidi", "Matthew C. Gombolay" ], "externalIds": { "DBLP": "conf/icra/XiaoZG24", "ArXiv": "2401.17185", "DOI": "10.1109/ICRA57147.2024.10610631", "CorpusId": 267320926 }, "url": "https://www.semanticscholar.org/paper/bd52ee6479d3aeb5d44c4e1ffa82f2216cc1b460", "referenceCount": 46, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "SERL: A Software Suite for Sample-Efficient Robotic Reinforcement Learning", "abstract": "In recent years, significant progress has been made in the field of robotic reinforcement learning (RL), enabling methods that handle complex image observations, train in the real world, and incorporate auxiliary data, such as demonstrations and prior experience. However, despite these advances, robotic RL remains hard to use. It is acknowledged among practitioners that the particular implementation details of these algorithms are often just as important (if not more so) for performance as the choice of algorithm. We posit that a significant challenge to the widespread adoption of robotic RL, as well as the further development of robotic RL methods, is the comparative inaccessibility of such methods. To address this challenge, we developed a carefully implemented library containing a sample efficient off-policy deep RL method, together with methods for computing rewards and resetting the environment, a high-quality controller for a widely adopted robot, and a number of challenging example tasks. We provide this library as a resource for the community, describe its design choices, and present experimental results. Perhaps surprisingly, we find that our implementation can achieve very efficient learning, acquiring policies for PCB board assembly, cable routing, and object relocation between 25 to 50 minutes of training per policy on average, improving over state-of-the-art results reported for similar tasks in the literature. These policies achieve perfect or near-perfect success rates, extreme robustness even under perturbations, and exhibit emergent recovery and correction behaviors. We hope these promising results and our high-quality open-source implementation will provide a tool for the robotics community to facilitate further developments in robotic RL. Our code, documentation, and videos can be found at https://serl-robot.github.io/", "year": 2024, "venue": "IEEE International Conference on Robotics and Automation", "authors": [ "Jianlan Luo", "Zheyuan Hu", "Charles Xu", "You Liang Tan", "Jacob Berg", "Archit Sharma", "S. Schaal", "Chelsea Finn", "Abhishek Gupta", "Sergey Levine" ], "externalIds": { "ArXiv": "2401.16013", "DBLP": "conf/icra/LuoH0TBSSF0L24", "DOI": "10.1109/ICRA57147.2024.10610040", "CorpusId": 267311834 }, "url": "https://www.semanticscholar.org/paper/fc4d4a25aac973b516c0bf890e36660fe228e79a", "referenceCount": 69, "citationCount": 11, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Movement Primitive Diffusion: Learning Gentle Robotic Manipulation of Deformable Objects", "abstract": "Policy learning in robot-assisted surgery (RAS) lacks data efficient and versatile methods that exhibit the desired motion quality for delicate surgical interventions. To this end, we introduce Movement Primitive Diffusion (MPD), a novel method for imitation learning (IL) in RAS that focuses on gentle manipulation of deformable objects. The approach combines the versatility of diffusion-based imitation learning (DIL) with the high-quality motion generation capabilities of Probabilistic Dynamic Movement Primitives (ProDMPs). This combination enables MPD to achieve gentle manipulation of deformable objects, while maintaining data efficiency critical for RAS applications where demonstration data is scarce. We evaluate MPD across various simulated and real world robotic tasks on both state and image observations. MPD outperforms state-of-the-art DIL methods in success rate, motion quality, and data efficiency.", "year": 2023, "venue": "IEEE Robotics and Automation Letters", "authors": [ "Paul Maria Scheikl", "Nicolas Schreiber", "Christoph Haas", "Niklas Freymuth", "Gerhard Neumann", "Rudolf Lioutikov", "F. Mathis-Ullrich" ], "externalIds": { "ArXiv": "2312.10008", "DBLP": "journals/corr/abs-2312-10008", "DOI": "10.1109/LRA.2024.3382529", "CorpusId": 266335334 }, "url": "https://www.semanticscholar.org/paper/0a8ef3526c6dbf78ad1f9aa715218bf5f314ceea", "referenceCount": 27, "citationCount": 13, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Motion Planning Diffusion: Learning and Planning of Robot Motions with Diffusion Models", "abstract": "Learning priors on trajectory distributions can help accelerate robot motion planning optimization. Given previously successful plans, learning trajectory generative models as priors for a new planning problem is highly desirable. Prior works propose several ways on utilizing this prior to bootstrapping the motion planning problem. Either sampling the prior for initializations or using the prior distribution in a maximum-a-posterior formulation for trajectory optimization. In this work, we propose learning diffusion models as priors. We then can sample directly from the posterior trajectory distribution conditioned on task goals, by leveraging the inverse denoising process of diffusion models. Furthermore, diffusion has been recently shown to effectively encode data multi-modality in high-dimensional settings, which is particularly well-suited for large trajectory dataset. To demonstrate our method efficacy, we compare our proposed method - Motion Planning Diffusion - against several baselines in simulated planar robot and 7-dof robot arm manipulator environments. To assess the generalization capabilities of our method, we test it in environments with previously unseen obstacles. Our experiments show that diffusion models are strong priors to encode high-dimensional trajectory distributions of robot motions. https://sites.google.com/view/mp-diffusion", "year": 2023, "venue": "IEEE/RJS International Conference on Intelligent RObots and Systems", "authors": [ "João Carvalho", "An T. Le", "Mark Baierl", "Dorothea Koert", "Jan Peters" ], "externalIds": { "DBLP": "conf/iros/Carvalho0BK023", "ArXiv": "2308.01557", "DOI": "10.1109/IROS55552.2023.10342382", "CorpusId": 260191316 }, "url": "https://www.semanticscholar.org/paper/8223da35c0fc3143f2903b5bfbfac9e4cce81d0a", "referenceCount": 54, "citationCount": 46, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Robotic Table Tennis: A Case Study into a High Speed Learning System", "abstract": "We present a deep-dive into a real-world robotic learning system that, in previous work, was shown to be capable of hundreds of table tennis rallies with a human and has the ability to precisely return the ball to desired targets. This system puts together a highly optimized perception subsystem, a high-speed low-latency robot controller, a simulation paradigm that can prevent damage in the real world and also train policies for zero-shot transfer, and automated real world environment resets that enable autonomous training and evaluation on physical robots. We complement a complete system description, including numerous design decisions that are typically not widely disseminated, with a collection of studies that clarify the importance of mitigating various sources of latency, accounting for training and deployment distribution shifts, robustness of the perception system, sensitivity to policy hyper-parameters, and choice of action space. A video demonstrating the components of the system and details of experimental results can be found at https://youtu.be/uFcnWjB42I0.", "year": 2023, "venue": "Robotics: Science and Systems", "authors": [ "David B. D'Ambrosio", "Jonathan Abelian", "Saminda Abeyruwan", "Michael Ahn", "A. Bewley", "Justin Boyd", "K. Choromanski", "O. Cortes", "Erwin Coumans", "Tianli Ding", "Wenbo Gao", "L. Graesser", "Atil Iscen", "N. Jaitly", "Deepali Jain", "Juhana Kangaspunta", "S. Kataoka", "Gus Kouretas", "Yuheng Kuang", "N. Lazic", "Corey Lynch", "R. Mahjourian", "Sherry Moore", "T. Nguyen", "Kenneth Oslund", "Barney J. Reed", "Krista Reymann", "R. Sanketi", "Anish Shankar", "P. Sermanet", "Vikas Sindhwani", "Avi Singh", "Vincent Vanhoucke", "Grace Vesom", "Peng Xu" ], "externalIds": { "DBLP": "conf/rss/DAmbrosioJSOXLS23", "ArXiv": "2309.03315", "DOI": "10.15607/RSS.2023.XIX.006", "CorpusId": 259343110 }, "url": "https://www.semanticscholar.org/paper/a89fab384f84dfed44627775a09b489c8d34d70f", "referenceCount": 102, "citationCount": 8, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Agile Catching with Whole-Body MPC and Blackbox Policy Learning", "abstract": "We address a benchmark task in agile robotics: catching objects thrown at high-speed. This is a challenging task that involves tracking, intercepting, and cradling a thrown object with access only to visual observations of the object and the proprioceptive state of the robot, all within a fraction of a second. We present the relative merits of two fundamentally different solution strategies: (i) Model Predictive Control using accelerated constrained trajectory optimization, and (ii) Reinforcement Learning using zeroth-order optimization. We provide insights into various performance trade-offs including sample efficiency, sim-to-real transfer, robustness to distribution shifts, and whole-body multimodality via extensive on-hardware experiments. We conclude with proposals on fusing\"classical\"and\"learning-based\"techniques for agile robot control. Videos of our experiments may be found at https://sites.google.com/view/agile-catching", "year": 2023, "venue": "Conference on Learning for Dynamics & Control", "authors": [ "Saminda Abeyruwan", "A. Bewley", "Nicholas M. Boffi", "K. Choromanski", "David B. D'Ambrosio", "Deepali Jain", "Pannag R. Sanketi", "Anish Shankar", "Vikas Sindhwani", "Sumeet Singh", "J. Slotine", "Stephen Tu" ], "externalIds": { "DBLP": "journals/corr/abs-2306-08205", "ArXiv": "2306.08205", "DOI": "10.48550/arXiv.2306.08205", "CorpusId": 254240619 }, "url": "https://www.semanticscholar.org/paper/e90240641f3e34c8a1c93e9754b647bbb5adeb1b", "referenceCount": 29, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning Fine-Grained Bimanual Manipulation with Low-Cost Hardware", "abstract": "Fine manipulation tasks, such as threading cable ties or slotting a battery, are notoriously difficult for robots because they require precision, careful coordination of contact forces, and closed-loop visual feedback. Performing these tasks typically requires high-end robots, accurate sensors, or careful calibration, which can be expensive and difficult to set up. Can learning enable low-cost and imprecise hardware to perform these fine manipulation tasks? We present a low-cost system that performs end-to-end imitation learning directly from real demonstrations, collected with a custom teleoperation interface. Imitation learning, however, presents its own challenges, particularly in high-precision domains: errors in the policy can compound over time, and human demonstrations can be non-stationary. To address these challenges, we develop a simple yet novel algorithm, Action Chunking with Transformers (ACT), which learns a generative model over action sequences. ACT allows the robot to learn 6 difficult tasks in the real world, such as opening a translucent condiment cup and slotting a battery with 80-90% success, with only 10 minutes worth of demonstrations. Project website: https://tonyzhaozh.github.io/aloha/", "year": 2023, "venue": "Robotics: Science and Systems", "authors": [ "Tony Zhao", "Vikash Kumar", "S. Levine", "Chelsea Finn" ], "externalIds": { "DBLP": "journals/corr/abs-2304-13705", "ArXiv": "2304.13705", "DOI": "10.48550/arXiv.2304.13705", "CorpusId": 258331658 }, "url": "https://www.semanticscholar.org/paper/91eb20f923ea3b0246868902aef4e9bea572b800", "referenceCount": 75, "citationCount": 210, "influentialCitationCount": 45, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Goal-Conditioned Imitation Learning using Score-based Diffusion Policies", "abstract": "We propose a new policy representation based on score-based diffusion models (SDMs). We apply our new policy representation in the domain of Goal-Conditioned Imitation Learning (GCIL) to learn general-purpose goal-specified policies from large uncurated datasets without rewards. Our new goal-conditioned policy architecture\"$\\textbf{BE}$havior generation with $\\textbf{S}$c$\\textbf{O}$re-based Diffusion Policies\"(BESO) leverages a generative, score-based diffusion model as its policy. BESO decouples the learning of the score model from the inference sampling process, and, hence allows for fast sampling strategies to generate goal-specified behavior in just 3 denoising steps, compared to 30+ steps of other diffusion based policies. Furthermore, BESO is highly expressive and can effectively capture multi-modality present in the solution space of the play data. Unlike previous methods such as Latent Plans or C-Bet, BESO does not rely on complex hierarchical policies or additional clustering for effective goal-conditioned behavior learning. Finally, we show how BESO can even be used to learn a goal-independent policy from play-data using classifier-free guidance. To the best of our knowledge this is the first work that a) represents a behavior policy based on such a decoupled SDM b) learns an SDM based policy in the domain of GCIL and c) provides a way to simultaneously learn a goal-dependent and a goal-independent policy from play-data. We evaluate BESO through detailed simulation and show that it consistently outperforms several state-of-the-art goal-conditioned imitation learning methods on challenging benchmarks. We additionally provide extensive ablation studies and experiments to demonstrate the effectiveness of our method for goal-conditioned behavior generation. Demonstrations and Code are available at https://intuitive-robots.github.io/beso-website/", "year": 2023, "venue": "Robotics: Science and Systems", "authors": [ "Moritz Reuss", "M. Li", "Xiaogang Jia", "Rudolf Lioutikov" ], "externalIds": { "ArXiv": "2304.02532", "DBLP": "conf/rss/ReussLJL23", "DOI": "10.48550/arXiv.2304.02532", "CorpusId": 257952177 }, "url": "https://www.semanticscholar.org/paper/1334a47e8f4e4ffd04ff534329d76a5e5cc16f46", "referenceCount": 57, "citationCount": 77, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The Effect of Robot Skill Level and Communication in Rapid, Proximate Human-Robot Collaboration", "abstract": "As high-speed, agile robots become more commonplace, these robots will have the potential to better aid and collaborate with humans. However, due to the increased agility and functionality of these robots, close collaboration with humans can create safety concerns that alter team dynamics and degrade task performance. In this work, we aim to enable the deployment of safe and trustworthy agile robots that operate in proximity with humans. We do so by 1) Proposing a novel human-robot doubles table tennis scenario to serve as a testbed for studying agile, proximate human-robot collaboration and 2) Conducting a user-study to understand how attributes of the robot (e.g., robot competency or capacity to communicate) impact team dynamics, perceived safety, and perceived trust, and how these latent factors affect human-robot collaboration (HRC) performance. We find that robot competency significantly increases perceived trust (p < .001), extending skill-to-trust assessments in prior studies to agile, proximate HRC. Furthermore, interestingly, we find that when the robot vocalizes its intention to perform a task, it results in a significant decrease in team performance (p = .037) and perceived safety of the system (p = .009).", "year": 2023, "venue": "IEEE/ACM International Conference on Human-Robot Interaction", "authors": [ "Kin Man Lee", "Arjun Krishna", "Z. Zaidi", "Rohan R. Paleja", "Letian Chen", "Erin Hedlund-Botti", "Mariah L. Schrum", "M. Gombolay" ], "externalIds": { "ArXiv": "2304.03756", "DBLP": "conf/hri/LeeKZPCHSG23", "DOI": "10.1145/3568162.3577002", "CorpusId": 257430648 }, "url": "https://www.semanticscholar.org/paper/eec1a261d5b76876bda7b710af0dfd43a527dc26", "referenceCount": 82, "citationCount": 7, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Diffusion Policy: Visuomotor Policy Learning via Action Diffusion", "abstract": "This paper introduces Diffusion Policy, a new way of generating robot behavior by representing a robot's visuomotor policy as a conditional denoising diffusion process. We benchmark Diffusion Policy across 12 different tasks from 4 different robot manipulation benchmarks and find that it consistently outperforms existing state-of-the-art robot learning methods with an average improvement of 46.9%. Diffusion Policy learns the gradient of the action-distribution score function and iteratively optimizes with respect to this gradient field during inference via a series of stochastic Langevin dynamics steps. We find that the diffusion formulation yields powerful advantages when used for robot policies, including gracefully handling multimodal action distributions, being suitable for high-dimensional action spaces, and exhibiting impressive training stability. To fully unlock the potential of diffusion models for visuomotor policy learning on physical robots, this paper presents a set of key technical contributions including the incorporation of receding horizon control, visual conditioning, and the time-series diffusion transformer. We hope this work will help motivate a new generation of policy learning techniques that are able to leverage the powerful generative modeling capabilities of diffusion models. Code, data, and training details is publicly available diffusion-policy.cs.columbia.edu", "year": 2023, "venue": "Robotics: Science and Systems", "authors": [ "Cheng Chi", "S. Feng", "Yilun Du", "Zhenjia Xu", "Eric A. Cousineau", "B. Burchfiel", "Shuran Song" ], "externalIds": { "DBLP": "journals/corr/abs-2303-04137", "ArXiv": "2303.04137", "DOI": "10.48550/arXiv.2303.04137", "CorpusId": 257378658 }, "url": "https://www.semanticscholar.org/paper/bdba3bd30a49ea4c5b20b43dbd8f0eb59e9d80e2", "referenceCount": 58, "citationCount": 430, "influentialCitationCount": 117, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Fast Kinodynamic Planning on the Constraint Manifold With Deep Neural Networks", "abstract": "Motion planning is a mature area of research in robotics with many well-established methods based on optimization or sampling the state space, suitable for solving kinematic motion planning. However, when dynamic motions under constraints are needed and computation time is limited, fast kinodynamic planning on the constraint manifold is indispensable. In recent years, learning-based solutions have become alternatives to classical approaches, but they still lack comprehensive handling of complex constraints, such as planning on a lower dimensional manifold of the task space while considering the robot's dynamics. This article introduces a novel learning-to-plan framework that exploits the concept of constraint manifold, including dynamics, and neural planning methods. Our approach generates plans satisfying an arbitrary set of constraints and computes them in a short constant time, namely the inference time of a neural network. This allows the robot to plan and replan reactively, making our approach suitable for dynamic environments. We validate our approach on two simulated tasks and in a demanding real-world scenario, where we use a Kuka LBR Iiwa 14 robotic arm to perform the hitting movement in robotic air hockey.", "year": 2023, "venue": "IEEE Transactions on robotics", "authors": [ "Piotr Kicki", "Puze Liu", "Davide Tateo", "Haitham Bou-Ammar", "K. Walas", "Piotr Skrzypczy'nski", "Jan Peters" ], "externalIds": { "DBLP": "journals/trob/KickiLTBWSP24", "ArXiv": "2301.04330", "DOI": "10.1109/TRO.2023.3326922", "CorpusId": 255595544 }, "url": "https://www.semanticscholar.org/paper/a8ba90983d8b63a9ea5bd11cfeaaabc70fae7b17", "referenceCount": 88, "citationCount": 9, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Utilizing Human Feedback for Primitive Optimization in Wheelchair Tennis", "abstract": "Agile robotics presents a difficult challenge with robots moving at high speeds requiring precise and low-latency sensing and control. Creating agile motion that accomplishes the task at hand while being safe to execute is a key requirement for agile robots to gain human trust. This requires designing new approaches that are flexible and maintain knowledge over world constraints. In this paper, we consider the problem of building a flexible and adaptive controller for a challenging agile mobile manipulation task of hitting ground strokes on a wheelchair tennis robot. We propose and evaluate an extension to work done on learning striking behaviors using a probabilistic movement primitive (ProMP) framework by (1) demonstrating the safe execution of learned primitives on an agile mobile manipulator setup, and (2) proposing an online primitive refinement procedure that utilizes evaluative feedback from humans on the executed trajectories.", "year": 2022, "venue": "arXiv.org", "authors": [ "Arjun Krishna", "Z. Zaidi", "Letian Chen", "Rohan R. Paleja", "Esmaeil Seraj", "M. Gombolay" ], "externalIds": { "ArXiv": "2212.14403", "DBLP": "journals/corr/abs-2212-14403", "DOI": "10.48550/arXiv.2212.14403", "CorpusId": 254223823 }, "url": "https://www.semanticscholar.org/paper/270f233ebb2d94af2d2561bc5f47dc44612c8236", "referenceCount": 26, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Is Conditional Generative Modeling all you need for Decision-Making?", "abstract": "Recent improvements in conditional generative modeling have made it possible to generate high-quality images from language descriptions alone. We investigate whether these methods can directly address the problem of sequential decision-making. We view decision-making not through the lens of reinforcement learning (RL), but rather through conditional generative modeling. To our surprise, we find that our formulation leads to policies that can outperform existing offline RL approaches across standard benchmarks. By modeling a policy as a return-conditional diffusion model, we illustrate how we may circumvent the need for dynamic programming and subsequently eliminate many of the complexities that come with traditional offline RL. We further demonstrate the advantages of modeling policies as conditional diffusion models by considering two other conditioning variables: constraints and skills. Conditioning on a single constraint or skill during training leads to behaviors at test-time that can satisfy several constraints together or demonstrate a composition of skills. Our results illustrate that conditional generative modeling is a powerful tool for decision-making.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Anurag Ajay", "Yilun Du", "Abhi Gupta", "J. Tenenbaum", "T. Jaakkola", "Pulkit Agrawal" ], "externalIds": { "DBLP": "journals/corr/abs-2211-15657", "ArXiv": "2211.15657", "DOI": "10.48550/arXiv.2211.15657", "CorpusId": 254044710 }, "url": "https://www.semanticscholar.org/paper/f19dfc360088922cf1d423c538662aae8d542c28", "referenceCount": 71, "citationCount": 227, "influentialCitationCount": 50, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Diffusion Posterior Sampling for General Noisy Inverse Problems", "abstract": "Diffusion models have been recently studied as powerful generative inverse problem solvers, owing to their high quality reconstructions and the ease of combining existing iterative solvers. However, most works focus on solving simple linear inverse problems in noiseless settings, which significantly under-represents the complexity of real-world problems. In this work, we extend diffusion solvers to efficiently handle general noisy (non)linear inverse problems via approximation of the posterior sampling. Interestingly, the resulting posterior sampling scheme is a blended version of diffusion sampling with the manifold constrained gradient without a strict measurement consistency projection step, yielding a more desirable generative path in noisy settings compared to the previous studies. Our method demonstrates that diffusion models can incorporate various measurement noise statistics such as Gaussian and Poisson, and also efficiently handle noisy nonlinear inverse problems such as Fourier phase retrieval and non-uniform deblurring. Code available at https://github.com/DPS2022/diffusion-posterior-sampling", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Hyungjin Chung", "Jeongsol Kim", "Michael T. McCann", "M. Klasky", "J. C. Ye" ], "externalIds": { "DBLP": "conf/iclr/ChungKMKY23", "ArXiv": "2209.14687", "DOI": "10.48550/arXiv.2209.14687", "CorpusId": 252596252 }, "url": "https://www.semanticscholar.org/paper/61e46884567be7cad12e999365b16a8d3414b678", "referenceCount": 45, "citationCount": 434, "influentialCitationCount": 128, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Fast Lifelong Adaptive Inverse Reinforcement Learning from Demonstrations", "abstract": "Learning from Demonstration (LfD) approaches empower end-users to teach robots novel tasks via demonstrations of the desired behaviors, democratizing access to robotics. However, current LfD frameworks are not capable of fast adaptation to heterogeneous human demonstrations nor the large-scale deployment in ubiquitous robotics applications. In this paper, we propose a novel LfD framework, Fast Lifelong Adaptive Inverse Reinforcement learning (FLAIR). Our approach (1) leverages learned strategies to construct policy mixtures for fast adaptation to new demonstrations, allowing for quick end-user personalization, (2) distills common knowledge across demonstrations, achieving accurate task inference; and (3) expands its model only when needed in lifelong deployments, maintaining a concise set of prototypical strategies that can approximate all behaviors via policy mixtures. We empirically validate that FLAIR achieves adaptability (i.e., the robot adapts to heterogeneous, user-specific task preferences), efficiency (i.e., the robot achieves sample-efficient adaptation), and scalability (i.e., the model grows sublinearly with the number of demonstrations while maintaining high performance). FLAIR surpasses benchmarks across three control tasks with an average 57% improvement in policy returns and an average 78% fewer episodes required for demonstration modeling using policy mixtures. Finally, we demonstrate the success of FLAIR in a table tennis task and find users rate FLAIR as having higher task (p<.05) and personalization (p<.05) performance.", "year": 2022, "venue": "Conference on Robot Learning", "authors": [ "Letian Chen", "Sravan Jayanthi", "Rohan R. Paleja", "Daniel Martin", "Viacheslav Zakharov", "M. Gombolay" ], "externalIds": { "DBLP": "journals/corr/abs-2209-11908", "ArXiv": "2209.11908", "DOI": "10.48550/arXiv.2209.11908", "CorpusId": 252531579 }, "url": "https://www.semanticscholar.org/paper/d3ab9181f93fdd4cc05c756dbc485cb2a085e84b", "referenceCount": 64, "citationCount": 12, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Diffusion Policies as an Expressive Policy Class for Offline Reinforcement Learning", "abstract": "Offline reinforcement learning (RL), which aims to learn an optimal policy using a previously collected static dataset, is an important paradigm of RL. Standard RL methods often perform poorly in this regime due to the function approximation errors on out-of-distribution actions. While a variety of regularization methods have been proposed to mitigate this issue, they are often constrained by policy classes with limited expressiveness that can lead to highly suboptimal solutions. In this paper, we propose representing the policy as a diffusion model, a recent class of highly-expressive deep generative models. We introduce Diffusion Q-learning (Diffusion-QL) that utilizes a conditional diffusion model to represent the policy. In our approach, we learn an action-value function and we add a term maximizing action-values into the training loss of the conditional diffusion model, which results in a loss that seeks optimal actions that are near the behavior policy. We show the expressiveness of the diffusion model-based policy, and the coupling of the behavior cloning and policy improvement under the diffusion model both contribute to the outstanding performance of Diffusion-QL. We illustrate the superiority of our method compared to prior works in a simple 2D bandit example with a multimodal behavior policy. We then show that our method can achieve state-of-the-art performance on the majority of the D4RL benchmark tasks.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Zhendong Wang", "Jonathan J. Hunt", "Mingyuan Zhou" ], "externalIds": { "DBLP": "conf/iclr/WangHZ23", "ArXiv": "2208.06193", "DOI": "10.48550/arXiv.2208.06193", "CorpusId": 251554821 }, "url": "https://www.semanticscholar.org/paper/2cbea7615ebecea2c414d8fbad47d5d258a5c3b4", "referenceCount": 45, "citationCount": 213, "influentialCitationCount": 39, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Classifier-Free Diffusion Guidance", "abstract": "Classifier guidance is a recently introduced method to trade off mode coverage and sample fidelity in conditional diffusion models post training, in the same spirit as low temperature sampling or truncation in other types of generative models. Classifier guidance combines the score estimate of a diffusion model with the gradient of an image classifier and thereby requires training an image classifier separate from the diffusion model. It also raises the question of whether guidance can be performed without a classifier. We show that guidance can be indeed performed by a pure generative model without such a classifier: in what we call classifier-free guidance, we jointly train a conditional and an unconditional diffusion model, and we combine the resulting conditional and unconditional score estimates to attain a trade-off between sample quality and diversity similar to that obtained using classifier guidance.", "year": 2022, "venue": "arXiv.org", "authors": [ "Jonathan Ho" ], "externalIds": { "ArXiv": "2207.12598", "DBLP": "journals/corr/abs-2207-12598", "DOI": "10.48550/arXiv.2207.12598", "CorpusId": 249145348 }, "url": "https://www.semanticscholar.org/paper/af9f365ed86614c800f082bd8eb14be76072ad16", "referenceCount": 25, "citationCount": 2396, "influentialCitationCount": 313, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "i-Sim2Real: Reinforcement Learning of Robotic Policies in Tight Human-Robot Interaction Loops", "abstract": "Sim-to-real transfer is a powerful paradigm for robotic reinforcement learning. The ability to train policies in simulation enables safe exploration and large-scale data collection quickly at low cost. However, prior works in sim-to-real transfer of robotic policies typically do not involve any human-robot interaction because accurately simulating human behavior is an open problem. In this work, our goal is to leverage the power of simulation to train robotic policies that are proficient at interacting with humans upon deployment. But there is a chicken and egg problem -- how to gather examples of a human interacting with a physical robot so as to model human behavior in simulation without already having a robot that is able to interact with a human? Our proposed method, Iterative-Sim-to-Real (i-S2R), attempts to address this. i-S2R bootstraps from a simple model of human behavior and alternates between training in simulation and deploying in the real world. In each iteration, both the human behavior model and the policy are refined. For all training we apply a new evolutionary search algorithm called Blackbox Gradient Sensing (BGS). We evaluate our method on a real world robotic table tennis setting, where the objective for the robot is to play cooperatively with a human player for as long as possible. Table tennis is a high-speed, dynamic task that requires the two players to react quickly to each other's moves, making for a challenging test bed for research on human-robot interaction. We present results on an industrial robotic arm that is able to cooperatively play table tennis with human players, achieving rallies of 22 successive hits on average and 150 at best. Further, for 80% of players, rally lengths are 70% to 175% longer compared to the sim-to-real plus fine-tuning (S2R+FT) baseline. For videos of our system in action, please see https://sites.google.com/view/is2r.", "year": 2022, "venue": "Conference on Robot Learning", "authors": [ "Saminda Abeyruwan", "L. Graesser", "David B. D'Ambrosio", "Avi Singh", "Anish Shankar", "A. Bewley", "Deepali Jain", "K. Choromanski", "Pannag R. Sanketi" ], "externalIds": { "DBLP": "conf/corl/AbeyruwanGDSSBJ22", "ArXiv": "2207.06572", "DOI": "10.48550/arXiv.2207.06572", "CorpusId": 250526228 }, "url": "https://www.semanticscholar.org/paper/79e007e57ccaf0d5616456603c815ad7b22c17d1", "referenceCount": 78, "citationCount": 37, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Improving Diffusion Models for Inverse Problems using Manifold Constraints", "abstract": "Recently, diffusion models have been used to solve various inverse problems in an unsupervised manner with appropriate modifications to the sampling process. However, the current solvers, which recursively apply a reverse diffusion step followed by a projection-based measurement consistency step, often produce suboptimal results. By studying the generative sampling path, here we show that current solvers throw the sample path off the data manifold, and hence the error accumulates. To address this, we propose an additional correction term inspired by the manifold constraint, which can be used synergistically with the previous solvers to make the iterations close to the manifold. The proposed manifold constraint is straightforward to implement within a few lines of code, yet boosts the performance by a surprisingly large margin. With extensive experiments, we show that our method is superior to the previous methods both theoretically and empirically, producing promising results in many applications such as image inpainting, colorization, and sparse-view computed tomography. Code available https://github.com/HJ-harry/MCG_diffusion", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Hyungjin Chung", "Byeongsu Sim", "Dohoon Ryu", "J. C. Ye" ], "externalIds": { "DBLP": "conf/nips/ChungSRY22", "ArXiv": "2206.00941", "DOI": "10.48550/arXiv.2206.00941", "CorpusId": 249282628 }, "url": "https://www.semanticscholar.org/paper/b3f5cf32178bcbed91aa5303b70963c6463f48a2", "referenceCount": 63, "citationCount": 282, "influentialCitationCount": 35, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Planning with Diffusion for Flexible Behavior Synthesis", "abstract": "Model-based reinforcement learning methods often use learning only for the purpose of estimating an approximate dynamics model, offloading the rest of the decision-making work to classical trajectory optimizers. While conceptually simple, this combination has a number of empirical shortcomings, suggesting that learned models may not be well-suited to standard trajectory optimization. In this paper, we consider what it would look like to fold as much of the trajectory optimization pipeline as possible into the modeling problem, such that sampling from the model and planning with it become nearly identical. The core of our technical approach lies in a diffusion probabilistic model that plans by iteratively denoising trajectories. We show how classifier-guided sampling and image inpainting can be reinterpreted as coherent planning strategies, explore the unusual and useful properties of diffusion-based planning methods, and demonstrate the effectiveness of our framework in control settings that emphasize long-horizon decision-making and test-time flexibility.", "year": 2022, "venue": "International Conference on Machine Learning", "authors": [ "Michael Janner", "Yilun Du", "J. Tenenbaum", "S. Levine" ], "externalIds": { "ArXiv": "2205.09991", "DBLP": "journals/corr/abs-2205-09991", "DOI": "10.48550/arXiv.2205.09991", "CorpusId": 248965046 }, "url": "https://www.semanticscholar.org/paper/3ebdd3db0dd91069fa0cd31cbf8308b60b1b565e", "referenceCount": 76, "citationCount": 383, "influentialCitationCount": 81, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "What Matters in Learning from Offline Human Demonstrations for Robot Manipulation", "abstract": "Imitating human demonstrations is a promising approach to endow robots with various manipulation capabilities. While recent advances have been made in imitation learning and batch (offline) reinforcement learning, a lack of open-source human datasets and reproducible learning methods make assessing the state of the field difficult. In this paper, we conduct an extensive study of six offline learning algorithms for robot manipulation on five simulated and three real-world multi-stage manipulation tasks of varying complexity, and with datasets of varying quality. Our study analyzes the most critical challenges when learning from offline human data for manipulation. Based on the study, we derive a series of lessons including the sensitivity to different algorithmic design choices, the dependence on the quality of the demonstrations, and the variability based on the stopping criteria due to the different objectives in training and evaluation. We also highlight opportunities for learning from human datasets, such as the ability to learn proficient policies on challenging, multi-stage tasks beyond the scope of current reinforcement learning methods, and the ability to easily scale to natural, real-world manipulation scenarios where only raw sensory signals are available. We have open-sourced our datasets and all algorithm implementations to facilitate future research and fair comparisons in learning from human demonstration data. Codebase, datasets, trained models, and more available at https://arise-initiative.github.io/robomimic-web/", "year": 2021, "venue": "Conference on Robot Learning", "authors": [ "Ajay Mandlekar", "Danfei Xu", "J. Wong", "Soroush Nasiriany", "Chen Wang", "Rohun Kulkarni", "Li Fei-Fei", "S. Savarese", "Yuke Zhu", "Roberto Mart'in-Mart'in" ], "externalIds": { "ArXiv": "2108.03298", "DBLP": "journals/corr/abs-2108-03298", "CorpusId": 236956615 }, "url": "https://www.semanticscholar.org/paper/3032844d6ac6882ccb03e7a2c22a0026b210ac05", "referenceCount": 90, "citationCount": 312, "influentialCitationCount": 41, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ILVR: Conditioning Method for Denoising Diffusion Probabilistic Models", "abstract": "Denoising diffusion probabilistic models (DDPM) have shown remarkable performance in unconditional image generation. However, due to the stochasticity of the generative process in DDPM, it is challenging to generate images with the desired semantics. In this work, we propose Iterative Latent Variable Refinement (ILVR), a method to guide the generative process in DDPM to generate high-quality images based on a given reference image. Here, the refinement of the generative process in DDPM enables a single DDPM to sample images from various sets directed by the reference image. The proposed ILVR method generates high-quality images while controlling the generation. The controllability of our method allows adaptation of a single DDPM without any additional learning in various image generation tasks, such as generation from various downsampling factors, multi-domain image translation, paint-to-image, and editing with scribbles.", "year": 2021, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Jooyoung Choi", "Sungwon Kim", "Yonghyun Jeong", "Youngjune Gwon", "Sungroh Yoon" ], "externalIds": { "DBLP": "journals/corr/abs-2108-02938", "ArXiv": "2108.02938", "DOI": "10.1109/iccv48922.2021.01410", "CorpusId": 236950721 }, "url": "https://www.semanticscholar.org/paper/cda3fbbac6734b603bee363b0938e9baa924aa78", "referenceCount": 59, "citationCount": 550, "influentialCitationCount": 54, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Efficient and Reactive Planning for High Speed Robot Air Hockey", "abstract": "Highly dynamic robotic tasks require high-speed and reactive robots. These tasks are particularly challenging due to the physical constraints, hardware limitations, and the high uncertainty of dynamics and sensor measures. To face these issues, it’s crucial to design robotics agents that generate precise and fast trajectories and react immediately to environmental changes. Air hockey is an example of this kind of task. Due to the environment’s characteristics, it is possible to formalize the problem and derive clean mathematical solutions. For these reasons, this environment is perfect for pushing to the limit the performance of currently available general-purpose robotic manipulators. Using two Kuka Iiwa 14, we show how to design a policy for general-purpose robotic manipulators for the air hockey game. We demonstrate that a real robot arm can perform fast-hitting movements and that the two robots can play against each other on a medium-size air hockey table in simulation.", "year": 2021, "venue": "IEEE/RJS International Conference on Intelligent RObots and Systems", "authors": [ "Puze Liu", "Davide Tateo", "Haitham Bou-Ammar", "Jan Peters" ], "externalIds": { "DBLP": "journals/corr/abs-2107-06140", "ArXiv": "2107.06140", "DOI": "10.1109/IROS51168.2021.9636263", "CorpusId": 235828810 }, "url": "https://www.semanticscholar.org/paper/6264a8d654dd5e7bc01b773380258218ca3d14f8", "referenceCount": 19, "citationCount": 16, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Challenges of real-world reinforcement learning: definitions, benchmarks and analysis", "abstract": null, "year": 2021, "venue": "Machine-mediated learning", "authors": [ "Cosmin Paduraru", "D. Mankowitz", "Gabriel Dulac-Arnold", "Jerry Li", "Nir Levine", "Sven Gowal", "Todd Hester" ], "externalIds": { "MAG": "3121342653", "DBLP": "journals/ml/Dulac-ArnoldLML21", "DOI": "10.1007/s10994-021-05961-4", "CorpusId": 234868359 }, "url": "https://www.semanticscholar.org/paper/5f1adc14a77fb61aa463fac728397bd32e00b617", "referenceCount": 144, "citationCount": 355, "influentialCitationCount": 14, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Score-Based Generative Modeling through Stochastic Differential Equations", "abstract": "Creating noise from data is easy; creating data from noise is generative modeling. We present a stochastic differential equation (SDE) that smoothly transforms a complex data distribution to a known prior distribution by slowly injecting noise, and a corresponding reverse-time SDE that transforms the prior distribution back into the data distribution by slowly removing the noise. Crucially, the reverse-time SDE depends only on the time-dependent gradient field (\\aka, score) of the perturbed data distribution. By leveraging advances in score-based generative modeling, we can accurately estimate these scores with neural networks, and use numerical SDE solvers to generate samples. We show that this framework encapsulates previous approaches in score-based generative modeling and diffusion probabilistic modeling, allowing for new sampling procedures and new modeling capabilities. In particular, we introduce a predictor-corrector framework to correct errors in the evolution of the discretized reverse-time SDE. We also derive an equivalent neural ODE that samples from the same distribution as the SDE, but additionally enables exact likelihood computation, and improved sampling efficiency. In addition, we provide a new way to solve inverse problems with score-based models, as demonstrated with experiments on class-conditional generation, image inpainting, and colorization. Combined with multiple architectural improvements, we achieve record-breaking performance for unconditional image generation on CIFAR-10 with an Inception score of 9.89 and FID of 2.20, a competitive likelihood of 2.99 bits/dim, and demonstrate high fidelity generation of 1024 x 1024 images for the first time from a score-based generative model.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Yang Song", "Jascha Narain Sohl-Dickstein", "Diederik P. Kingma", "Abhishek Kumar", "Stefano Ermon", "Ben Poole" ], "externalIds": { "DBLP": "journals/corr/abs-2011-13456", "ArXiv": "2011.13456", "MAG": "3110257065", "CorpusId": 227209335 }, "url": "https://www.semanticscholar.org/paper/633e2fbfc0b21e959a244100937c5853afca4853", "referenceCount": 66, "citationCount": 4108, "influentialCitationCount": 956, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Sample-efficient Reinforcement Learning in Robotic Table Tennis", "abstract": "Reinforcement learning (RL) has achieved some impressive recent successes in various computer games and simulations. Most of these successes are based on having large numbers of episodes from which the agent can learn. In typical robotic applications, however, the number of feasible attempts is very limited. In this paper we present a sample-efficient RL algorithm applied to the example of a table tennis robot. In table tennis every stroke is different, with varying placement, speed and spin. An accurate return therefore has to be found depending on a high-dimensional continuous state space. To make learning in few trials possible the method is embedded into our robot system. In this way we can use a one-step environment. The state space depends on the ball at hitting time (position, velocity, spin) and the action is the racket state (orientation, velocity) at hitting. An actor-critic based deterministic policy gradient algorithm was developed for accelerated learning. Our approach performs competitively both in a simulation and on the real robot in a number of challenging scenarios. Accurate results are obtained without pre-training in under 200 episodes of training. The video presenting our experiments is available at https://youtu.be/uRAtdoL6Wpw.", "year": 2020, "venue": "IEEE International Conference on Robotics and Automation", "authors": [ "Jonas Tebbe", "Lukas Krauch", "Yapeng Gao", "A. Zell" ], "externalIds": { "DBLP": "conf/icra/TebbeKGZ21", "MAG": "3103793864", "ArXiv": "2011.03275", "DOI": "10.1109/ICRA48506.2021.9560764", "CorpusId": 226277996 }, "url": "https://www.semanticscholar.org/paper/d0b69a1885781e38de0f2cb81651fb852799ba3a", "referenceCount": 51, "citationCount": 28, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning from Suboptimal Demonstration via Self-Supervised Reward Regression", "abstract": "Learning from Demonstration (LfD) seeks to democratize robotics by enabling non-roboticist end-users to teach robots to perform a task by providing a human demonstration. However, modern LfD techniques, such as inverse reinforcement learning (IRL), assume users provide at least stochastically optimal demonstrations. This assumption fails to hold in all but the most isolated, controlled scenarios, reducing the ability to achieve the goal of empowering real end-users. Recent attempts to learn from sub-optimal demonstration leverage pairwise rankings through Preference-based Reinforcement Learning (PbRL) to infer a more optimal policy than the demonstration. However, we show that these approaches make incorrect assumptions and, consequently, suffer from brittle, degraded performance. In this paper, we overcome the limitations of prior work by developing a novel computational technique that infers an idealized reward function from suboptimal demonstration and bootstraps suboptimal demonstrations to synthesize optimality-parameterized training data for training our reward function. We empirically validate we can learn an idealized reward function with $\\sim0.95$ correlation with the ground truth reward versus only $\\sim 0.75$ for prior work. We can then train policies achieving $\\sim 200\\%$ improvement over the suboptimal demonstration and $\\sim 90\\%$ improvement over prior work. Finally, we present a real-world implementation for teaching a robot to hit a topspin shot in table tennis better than user demonstration.", "year": 2020, "venue": "Conference on Robot Learning", "authors": [ "Letian Chen", "Rohan R. Paleja", "M. Gombolay" ], "externalIds": { "DBLP": "journals/corr/abs-2010-11723", "ArXiv": "2010.11723", "MAG": "3093784762", "CorpusId": 225039754 }, "url": "https://www.semanticscholar.org/paper/0eb1a4c84bf6c2c96decfe53c1e9899c2fb0b7ce", "referenceCount": 48, "citationCount": 93, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Denoising Diffusion Probabilistic Models", "abstract": "We present high quality image synthesis results using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics. Our best results are obtained by training on a weighted variational bound designed according to a novel connection between diffusion probabilistic models and denoising score matching with Langevin dynamics, and our models naturally admit a progressive lossy decompression scheme that can be interpreted as a generalization of autoregressive decoding. On the unconditional CIFAR10 dataset, we obtain an Inception score of 9.46 and a state-of-the-art FID score of 3.17. On 256x256 LSUN, we obtain sample quality similar to ProgressiveGAN. Our implementation is available at this https URL", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Jonathan Ho", "Ajay Jain", "P. Abbeel" ], "externalIds": { "ArXiv": "2006.11239", "MAG": "3100572490", "DBLP": "journals/corr/abs-2006-11239", "CorpusId": 219955663 }, "url": "https://www.semanticscholar.org/paper/5c126ae3421f05768d8edd97ecd44b1364e2c99a", "referenceCount": 73, "citationCount": 10778, "influentialCitationCount": 2337, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Learning to Play Table Tennis From Scratch Using Muscular Robots", "abstract": "Dynamic tasks such as table tennis are relatively easy to learn for humans, but pose significant challenges to robots. Such tasks require accurate control of fast movements and precise timing in the presence of imprecise state estimation of the flying ball and the robot. Reinforcement learning (RL) has shown promise in learning complex control tasks from data. However, applying step-based RL to dynamic tasks on real systems is safety-critical as RL requires exploring and failing safely for millions of time steps in high-speed and high-acceleration regimes. This article demonstrates that using robot arms driven by pneumatic artificial muscles (PAMs) enables safe end-to-end learning of table tennis using model-free RL. In particular, we learn from scratch for thousands of trials while a stochastic policy acts on the low-level controls of the real system. The robot returns and smashes real balls with $5 $ ms−1 and $12 $ ms−1 on average, respectively, to a desired landing point. Additionally, we present hybrid sim and real training (HYSR), a practical procedure that avoids training with real balls by virtually replaying recorded ball trajectories and applying actions to the real robot. To the best of authors’ knowledge, this work pioneers (i) failsafe learning of a safety-critical dynamic task using anthropomorphic robot arms, (ii) learning a precision-demanding problem with a PAM-driven system that is inherently hard to control as well as (iii) train a robot to play table tennis without real balls.", "year": 2020, "venue": "IEEE Transactions on robotics", "authors": [ "Dieter Büchler", "Simon Guist", "R. Calandra", "Vincent Berenz", "B. Scholkopf", "J. Peters" ], "externalIds": { "MAG": "3037785630", "DBLP": "journals/corr/abs-2006-05935", "ArXiv": "2006.05935", "DOI": "10.1109/TRO.2022.3176207", "CorpusId": 219558297 }, "url": "https://www.semanticscholar.org/paper/46146c301e3a0ea0454680d5d2bec75e6496d4d2", "referenceCount": 34, "citationCount": 72, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Recent Advances in Robot Learning from Demonstration", "abstract": "In the context of robotics and automation, learning from demonstration (LfD) is the paradigm in which robots acquire new skills by learning to imitate an expert. The choice of LfD over other robot learning methods is compelling when ideal behavior can be neither easily scripted (as is done in traditional robot programming) nor easily defined as an optimization problem, but can be demonstrated. While there have been multiple surveys of this field in the past, there is a need for a new one given the considerable growth in the number of publications in recent years. This review aims to provide an overview of the collection of machine-learning methods used to enable a robot to learn from and imitate a teacher. We focus on recent advancements in the field and present an updated taxonomy and characterization of existing methods. We also discuss mature and emerging application areas for LfD and highlight the significant challenges that remain to be overcome both in theory and in practice.", "year": 2020, "venue": "Annu. Rev. Control. Robotics Auton. Syst.", "authors": [ "H. Ravichandar", "Athanasios S. Polydoros", "Sonia Chernova", "A. Billard" ], "externalIds": { "MAG": "2994446013", "DBLP": "journals/arcras/RavichandarPCB20", "DOI": "10.1146/annurev-control-100819-063206", "CorpusId": 208958394 }, "url": "https://www.semanticscholar.org/paper/f441e637980a8b427474dbdc0141f38dd78bb831", "referenceCount": 214, "citationCount": 525, "influentialCitationCount": 16, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep Learning Techniques for Inverse Problems in Imaging", "abstract": "Recent work in machine learning shows that deep neural networks can be used to solve a wide variety of inverse problems arising in computational imaging. We explore the central prevailing themes of this emerging area and present a taxonomy that can be used to categorize different problems and reconstruction methods. Our taxonomy is organized along two central axes: (1) whether or not a forward model is known and to what extent it is used in training and testing, and (2) whether or not the learning is supervised or unsupervised, i.e., whether or not the training relies on access to matched ground truth image and measurement pairs. We also discuss the tradeoffs associated with these different reconstruction approaches, caveats and common failure modes, plus open problems and avenues for future work.", "year": 2020, "venue": "IEEE Journal on Selected Areas in Information Theory", "authors": [ "Greg Ongie", "A. Jalal", "Christopher A. Metzler", "Richard Baraniuk", "A. Dimakis", "R. Willett" ], "externalIds": { "MAG": "3021094251", "DBLP": "journals/jsait/OngieJMBDW20", "ArXiv": "2005.06001", "DOI": "10.1109/JSAIT.2020.2991563", "CorpusId": 218613748 }, "url": "https://www.semanticscholar.org/paper/b3e430ce15a956694d45203e69c537486025309f", "referenceCount": 187, "citationCount": 468, "influentialCitationCount": 14, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "Robotic Table Tennis with Model-Free Reinforcement Learning", "abstract": "We propose a model-free algorithm for learning efficient policies capable of returning table tennis balls by controlling robot joints at a rate of 100Hz. We demonstrate that evolutionary search (ES) methods acting on CNN-based policy architectures for non-visual inputs and convolving across time learn compact controllers leading to smooth motions. Furthermore, we show that with appropriately tuned curriculum learning on the task and rewards, policies are capable of developing multi-modal styles, specifically forehand and backhand stroke, whilst achieving 80% return rate on a wide range of ball throws. We observe that multi-modality does not require any architectural priors, such as multi-head architectures or hierarchical policies.", "year": 2020, "venue": "IEEE/RJS International Conference on Intelligent RObots and Systems", "authors": [ "Wenbo Gao", "L. Graesser", "K. Choromanski", "Xingyou Song", "N. Lazic", "Pannag R. Sanketi", "Vikas Sindhwani", "N. Jaitly" ], "externalIds": { "MAG": "3014969391", "DBLP": "conf/iros/GaoGCSLSSJ20", "ArXiv": "2003.14398", "DOI": "10.1109/IROS45743.2020.9341191", "CorpusId": 214728040 }, "url": "https://www.semanticscholar.org/paper/0f6be23c9e36d6d32d7550a7ba91a10ef5ffebed", "referenceCount": 46, "citationCount": 37, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Soft Actor-Critic Algorithms and Applications", "abstract": "Model-free deep reinforcement learning (RL) algorithms have been successfully applied to a range of challenging sequential decision making and control tasks. However, these methods typically suffer from two major challenges: high sample complexity and brittleness to hyperparameters. Both of these challenges limit the applicability of such methods to real-world domains. In this paper, we describe Soft Actor-Critic (SAC), our recently introduced off-policy actor-critic algorithm based on the maximum entropy RL framework. In this framework, the actor aims to simultaneously maximize expected return and entropy. That is, to succeed at the task while acting as randomly as possible. We extend SAC to incorporate a number of modifications that accelerate training and improve stability with respect to the hyperparameters, including a constrained formulation that automatically tunes the temperature hyperparameter. We systematically evaluate SAC on a range of benchmark tasks, as well as real-world challenging tasks such as locomotion for a quadrupedal robot and robotic manipulation with a dexterous hand. With these improvements, SAC achieves state-of-the-art performance, outperforming prior on-policy and off-policy methods in sample-efficiency and asymptotic performance. Furthermore, we demonstrate that, in contrast to other off-policy algorithms, our approach is very stable, achieving similar performance across different random seeds. These results suggest that SAC is a promising candidate for learning in real-world robotics tasks.", "year": 2018, "venue": "arXiv.org", "authors": [ "Tuomas Haarnoja", "Aurick Zhou", "Kristian Hartikainen", "G. Tucker", "Sehoon Ha", "Jie Tan", "Vikash Kumar", "Henry Zhu", "Abhishek Gupta", "P. Abbeel", "S. Levine" ], "externalIds": { "DBLP": "journals/corr/abs-1812-05905", "MAG": "2904246096", "ArXiv": "1812.05905", "CorpusId": 55703664 }, "url": "https://www.semanticscholar.org/paper/12c0751b4f51ed833172a713b7e32390032ead93", "referenceCount": 49, "citationCount": 2036, "influentialCitationCount": 475, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Sim-to-Real Transfer of Robotic Control with Dynamics Randomization", "abstract": "Simulations are attractive environments for training agents as they provide an abundant source of data and alleviate certain safety concerns during the training process. But the behaviours developed by agents in simulation are often specific to the characteristics of the simulator. Due to modeling error, strategies that are successful in simulation may not transfer to their real world counterparts. In this paper, we demonstrate a simple method to bridge this “reality gap”. By randomizing the dynamics of the simulator during training, we are able to develop policies that are capable of adapting to very different dynamics, including ones that differ significantly from the dynamics on which the policies were trained. This adaptivity enables the policies to generalize to the dynamics of the real world without any training on the physical system. Our approach is demonstrated on an object pushing task using a robotic arm. Despite being trained exclusively in simulation, our policies are able to maintain a similar level of performance when deployed on a real robot, reliably moving an object to a desired location from random initial configurations. We explore the impact of various design decisions and show that the resulting policies are robust to significant calibration error.", "year": 2017, "venue": "IEEE International Conference on Robotics and Automation", "authors": [ "X. B. Peng", "Marcin Andrychowicz", "Wojciech Zaremba", "P. Abbeel" ], "externalIds": { "MAG": "2949612618", "DBLP": "conf/icra/PengAZA18", "ArXiv": "1710.06537", "DOI": "10.1109/ICRA.2018.8460528", "CorpusId": 3707478 }, "url": "https://www.semanticscholar.org/paper/0af8cdb71ce9e5bf37ad2a11f05af293cfe62172", "referenceCount": 40, "citationCount": 1197, "influentialCitationCount": 67, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Using probabilistic movement primitives for striking movements", "abstract": "Due to its strong requirements in motor abilities, robot table tennis is an important test bed for new robot learning approaches. Learning approaches have to generalize a complex hitting behavior from relatively few demonstrated trajectories, which neither cover all ball trajectories nor all desired hitting directions. Therefore, past approaches that only modeled a deterministic mean behavior without capturing the variability of the movement have been fairly limited. Recent work on capturing trajectory distributions using probabilistic movement representations opens important new possibilities for robot table tennis. In this paper, we present two new methods to adapt probabilistic movement primitives. First we present a method to adapt a probability distribution of hitting movements learned in joint space to have a desired end effector position, velocity and orientation. Subsequently, we present a method to find the initial time and duration of the movement primitive in order to intercept a moving object like the table tennis ball. The resulting methods rely on simple operations from probability theory. Providing a more principled approach to solve some of the challenges of robot table tennis compared to previous approaches. Additionally, the presented method has the potential of generalizing to many other motor tasks.", "year": 2016, "venue": "IEEE-RAS International Conference on Humanoid Robots", "authors": [ "S. Gómez-González", "G. Neumann", "B. Scholkopf", "J. Peters" ], "externalIds": { "MAG": "2568487251", "DBLP": "conf/humanoids/Gomez-GonzalezN16", "DOI": "10.1109/HUMANOIDS.2016.7803322", "CorpusId": 30771833 }, "url": "https://www.semanticscholar.org/paper/73b1b19280594d9837dbeda477d7efe94269d7c6", "referenceCount": 13, "citationCount": 25, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Generative Adversarial Imitation Learning", "abstract": "Consider learning a policy from example expert behavior, without interaction with the expert or access to reinforcement signal. One approach is to recover the expert's cost function with inverse reinforcement learning, then extract a policy from that cost function with reinforcement learning. This approach is indirect and can be slow. We propose a new general framework for directly extracting a policy from data, as if it were obtained by reinforcement learning following inverse reinforcement learning. We show that a certain instantiation of our framework draws an analogy between imitation learning and generative adversarial networks, from which we derive a model-free imitation learning algorithm that obtains significant performance gains over existing model-free methods in imitating complex behaviors in large, high-dimensional environments.", "year": 2016, "venue": "Neural Information Processing Systems", "authors": [ "Jonathan Ho", "Stefano Ermon" ], "externalIds": { "MAG": "2949080919", "DBLP": "journals/corr/HoE16", "ArXiv": "1606.03476", "CorpusId": 16153365 }, "url": "https://www.semanticscholar.org/paper/4ab53de69372ec2cd2d90c126b6a100165dc8ed1", "referenceCount": 33, "citationCount": 2735, "influentialCitationCount": 513, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Probabilistic Movement Primitives", "abstract": "Movement Primitives (MP) are a well-established approach for representing modular and re-usable robot movement generators. Many state-of-the-art robot learning successes are based MPs, due to their compact representation of the inherently continuous and high dimensional robot movements. A major goal in robot learning is to combine multiple MPs as building blocks in a modular control architecture to solve complex tasks. To this effect, a MP representation has to allow for blending between motions, adapting to altered task variables, and co-activating multiple MPs in parallel. We present a probabilistic formulation of the MP concept that maintains a distribution over trajectories. Our probabilistic approach allows for the derivation of new operations which are essential for implementing all aforementioned properties in one framework. In order to use such a trajectory distribution for robot movement control, we analytically derive a stochastic feedback controller which reproduces the given trajectory distribution. We evaluate and compare our approach to existing methods on several simulated as well as real robot scenarios.", "year": 2013, "venue": "Neural Information Processing Systems", "authors": [ "A. Paraschos", "Christian Daniel", "Jan Peters", "G. Neumann" ], "externalIds": { "MAG": "2140801763", "DBLP": "conf/nips/ParaschosDPN13", "CorpusId": 13083455 }, "url": "https://www.semanticscholar.org/paper/3a81cfb4a7a880b7cf8979f6067732e961aceb7c", "referenceCount": 24, "citationCount": 574, "influentialCitationCount": 54, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Leveraging multiple simulators for crossing the reality gap", "abstract": "We propose a novel approach for transferring evolved control systems from a simulated environment to a real robot. Multiple dynamic simulation systems are simultaneously employed to provide a valid range of simulation variance that can be exploited to generate robust controllers in a purely virtual environment. These controllers can then be directly transferred to a physical robot.", "year": 2012, "venue": "International Conference on Control, Automation, Robotics and Vision", "authors": [ "A. Boeing", "T. Bräunl" ], "externalIds": { "DBLP": "conf/icarcv/BoeingB12", "MAG": "2081329272", "DOI": "10.1109/ICARCV.2012.6485313", "CorpusId": 16932583 }, "url": "https://www.semanticscholar.org/paper/a90f5627ba1abaec6aecebea66658491c0765ef2", "referenceCount": 20, "citationCount": 40, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Engineering", "Computer Science" ] }, { "title": "Learning to select and generalize striking movements in robot table tennis", "abstract": "Learning new motor tasks from physical interactions is an important goal for both robotics and machine learning. However, when moving beyond basic skills, most monolithic machine learning approaches fail to scale. For more complex skills, methods that are tailored for the domain of skill learning are needed. In this paper, we take the task of learning table tennis as an example and present a new framework that allows a robot to learn cooperative table tennis from physical interaction with a human. The robot first learns a set of elementary table tennis hitting movements from a human table tennis teacher by kinesthetic teach-in, which is compiled into a set of motor primitives represented by dynamical systems. The robot subsequently generalizes these movements to a wider range of situations using our mixture of motor primitives approach. The resulting policy enables the robot to select appropriate motor primitives as well as to generalize between them. Finally, the robot plays with a human table tennis partner and learns online to improve its behavior. We show that the resulting setup is capable of playing table tennis using an anthropomorphic robot arm.", "year": 2012, "venue": "AAAI Fall Symposium: Robots Learning Interactively from Human Teachers", "authors": [ "Katharina Muelling", "Jens Kober", "Oliver Kroemer", "Jan Peters" ], "externalIds": { "DBLP": "journals/ijrr/MullingKKP13", "MAG": "2399574992", "DOI": "10.1177/0278364912472380", "CorpusId": 10852076 }, "url": "https://www.semanticscholar.org/paper/1695dbabf8e905db0b391ff522c323db5fc8b958", "referenceCount": 59, "citationCount": 446, "influentialCitationCount": 22, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Learning table tennis with a Mixture of Motor Primitives", "abstract": "Table tennis is a sufficiently complex motor task for studying complete skill learning systems. It consists of several elementary motions and requires fast movements, accurate control, and online adaptation. To represent the elementary movements needed for robot table tennis, we rely on dynamic systems motor primitives (DMP). While such DMPs have been successfully used for learning a variety of simple motor tasks, they only represent single elementary actions. In order to select and generalize among different striking movements, we present a new approach, called Mixture of Motor Primitives that uses a gating network to activate appropriate motor primitives. The resulting policy enables us to select among the appropriate motor primitives as well as to generalize between them. In order to obtain a fully learned robot table tennis setup, we also address the problem of predicting the necessary context information, i.e., the hitting point in time and space where we want to hit the ball. We show that the resulting setup was capable of playing rudimentary table tennis using an anthropomorphic robot arm.", "year": 2010, "venue": "IEEE-RAS International Conference on Humanoid Robots", "authors": [ "Katharina Muelling", "Jens Kober", "Jan Peters" ], "externalIds": { "DBLP": "conf/humanoids/MullingKP10", "MAG": "2144576818", "DOI": "10.1109/ICHR.2010.5686298", "CorpusId": 14390613 }, "url": "https://www.semanticscholar.org/paper/ffa89e2d70c7b12e42b12923ebc45a46fb7798a9", "referenceCount": 26, "citationCount": 111, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Robot Programming by Demonstration", "abstract": null, "year": 2010, "venue": "Simulation, Modeling, and Programming for Autonomous Robots", "authors": [ "K. Narayanan", "Luis-Felipe Posada", "F. Hoffmann", "T. Bertram" ], "externalIds": { "MAG": "1586824591", "DBLP": "conf/simpar/NarayananPHB10", "DOI": "10.1007/978-3-642-17319-6_28", "CorpusId": 8024583 }, "url": "https://www.semanticscholar.org/paper/5edc84dcb6ead31aa858b3ba4f81a58242f761ec", "referenceCount": 29, "citationCount": 874, "influentialCitationCount": 48, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Crossing the reality gap in evolutionary robotics by promoting transferable controllers", "abstract": "The reality gap, that often makes controllers evolved in simulation inefficient once transferred onto the real system, remains a critical issue in Evolutionary Robotics (ER); it prevents ER application to real-world problems. We hypothesize that this gap mainly stems from a conflict between the efficiency of the solutions in simulation and their transferability from simulation to reality: best solutions in simulation often rely on bad simulated phenomena (e.g. the most dynamic ones). This hypothesis leads to a multi-objective formulation of ER in which two main objectives are optimized via a Pareto-based Multi-Objective Evolutionary Algorithm: (1) the fitness and (2) the transferability. To evaluate this second objective, a simulation-to-reality disparity value is approximated for each controller. The proposed method is applied to the evolution of walking controllers for a real 8-DOF quadrupedal robot. It successfully finds efficient and well-transferable controllers with only a few experiments in reality.", "year": 2010, "venue": "Annual Conference on Genetic and Evolutionary Computation", "authors": [ "S. Koos", "Jean-Baptiste Mouret", "S. Doncieux" ], "externalIds": { "DBLP": "conf/gecco/KoosMD10", "MAG": "2089087399", "DOI": "10.1145/1830483.1830505", "CorpusId": 4490512 }, "url": "https://www.semanticscholar.org/paper/a5a7206fb201e6068509d16c1a5ff61d613ca159", "referenceCount": 23, "citationCount": 131, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A survey of robot learning from demonstration", "abstract": null, "year": 2009, "venue": "Robotics Auton. Syst.", "authors": [ "B. Argall", "S. Chernova", "M. Veloso", "Brett Browning" ], "externalIds": { "MAG": "1986014385", "DBLP": "journals/ras/ArgallCVB09", "DOI": "10.1016/j.robot.2008.10.024", "CorpusId": 1045325 }, "url": "https://www.semanticscholar.org/paper/4e5dfb0b1e54412e799eb0e86d552956cc3a5f54", "referenceCount": 114, "citationCount": 3681, "influentialCitationCount": 174, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Robot Reinforcement Learning on the Constraint Manifold", "abstract": ": Reinforcement learning in robotics is extremely challenging due to many practical issues, including safety, mechanical constraints, and wear and tear. Typically, these issues are not considered in the machine learning literature. One crucial problem in applying reinforcement learning in the real world is Safe Ex-ploration, which requires physical and safety constraints satisfaction throughout the learning process. To explore in such a safety-critical environment, leveraging known information such as robot models and constraints is beneficial to provide more robust safety guarantees. Exploiting this knowledge, we propose a novel method to learn robotics tasks in simulation efficiently while satisfying the constraints during the learning process.", "year": 2021, "venue": "Conference on Robot Learning", "authors": [ "Puze Liu", "Davide Tateo", "Haitham Bou-Ammar", "Jan Peters" ], "externalIds": { "DBLP": "conf/corl/LiuTB021", "CorpusId": 235651619 }, "url": "https://www.semanticscholar.org/paper/54d5947f9f1c63104a6b1de912cc278ef1eb9d32", "referenceCount": 40, "citationCount": 36, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning Locomotion Skills for Cassie: Iterative Design and Sim-to-Real", "abstract": ": Deep reinforcement learning (DRL) is a promising approach for developing legged locomotion skills. However, current work commonly describes DRL as being a one-shot process, where the state, action and reward are assumed to be well defined and are directly used by an RL algorithm to obtain policies. In this paper, we describe and document an iterative design approach, which reflects the multiple design iterations of the reward that are often (if not always) needed in practice. Throughout the process, transfer learning is achieved via Deterministic Action Stochastic State (DASS) tuples, representing the deterministic policy actions associated with states visited by the stochastic policy. We demonstrate the transfer of policies learned in simulation to the physical robot without dynamics randomization. We also identify several key components that are critical for sim-to-real transfer in our setting.", "year": 2019, "venue": "Conference on Robot Learning", "authors": [ "Zhaoming Xie", "Patrick Clary", "Jeremy Dao", "Pedro Morais", "Jonanthan Hurst", "M. V. D. Panne" ], "externalIds": { "DBLP": "conf/corl/XieCDMHP19", "MAG": "3029641972", "CorpusId": 218875900 }, "url": "https://www.semanticscholar.org/paper/719068eb8b8c9ab8552ec3e82c1b1088a9eacdce", "referenceCount": 37, "citationCount": 132, "influentialCitationCount": 8, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning Robust Rewards with Adverserial Inverse Reinforcement Learning", "abstract": null, "year": 2018, "venue": "International Conference on Learning Representations", "authors": [ "Justin Fu", "Katie Luo", "Sergey Levine" ], "externalIds": { "DBLP": "conf/iclr/FuLL18", "CorpusId": 263870625 }, "url": "https://www.semanticscholar.org/paper/941ba185f01b1a0a27453fd178aa5f010510ee8b", "referenceCount": 0, "citationCount": 172, "influentialCitationCount": 50, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "GENERATIVE ADVERSARIAL NETS", "abstract": "Estimating individualized treatment effects (ITE) is a challenging task due to the need for an individual’s potential outcomes to be learned from biased data and without having access to the counterfactuals. We propose a novel method for inferring ITE based on the Generative Adversarial Nets (GANs) framework. Our method, termed Generative Adversarial Nets for inference of Individualized Treatment Effects (GANITE), is motivated by the possibility that we can capture the uncertainty in the counterfactual distributions by attempting to learn them using a GAN. We generate proxies of the counterfactual outcomes using a counterfactual generator, G, and then pass these proxies to an ITE generator, I, in order to train it. By modeling both of these using the GAN framework, we are able to infer based on the factual data, while still accounting for the unseen counterfactuals. We test our method on three real-world datasets (with both binary and multiple treatments) and show that GANITE outperforms state-of-the-art methods.", "year": 2018, "venue": "", "authors": [ "Individualized Treat", "Jinsung Yoon" ], "externalIds": { "CorpusId": 10319744 }, "url": "https://www.semanticscholar.org/paper/c68796f833a7151f0a63d1d1608dc902b4fdc9b6", "referenceCount": 24, "citationCount": 28002, "influentialCitationCount": 3321, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "“Diffusion based multi-agent adversarial tracking,”", "abstract": null, "year": null, "venue": "2023 IEEE International Symposium on Multi-Robot & Multi-Agent Systems", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Bayesian computation with generative diffusion models by Multilevel Monte Carlo": { "paper_title": "Bayesian computation with generative diffusion models by Multilevel Monte Carlo", "arxiv_id": "2409.15511v1", "keyword": "diffusion model", "authors": [ "Abdul-Lateef Haji-Ali", "Marcelo Pereyra", "Luke Shaw", "Konstantinos Zygalakis" ], "references": [ { "title": "Aurora: A Foundation Model of the Atmosphere", "abstract": "Deep learning foundation models are revolutionizing many facets of science by leveraging vast amounts of data to learn general-purpose representations that can be adapted to tackle diverse downstream tasks. Foundation models hold the promise to also transform our ability to model our planet and its subsystems by exploiting the vast expanse of Earth system data. Here we introduce Aurora, a large-scale foundation model of the atmosphere trained on over a million hours of diverse weather and climate data. Aurora leverages the strengths of the foundation modelling approach to produce operational forecasts for a wide variety of atmospheric prediction problems, including those with limited training data, heterogeneous variables, and extreme events. In under a minute, Aurora produces 5-day global air pollution predictions and 10-day high-resolution weather forecasts that outperform state-of-the-art classical simulation tools and the best specialized deep learning models. Taken together, these results indicate that foundation models can transform environmental forecasting.", "year": 2024, "venue": "arXiv.org", "authors": [ "Cristian Bodnar", "W. Bruinsma", "Ana Lucic", "Megan Stanley", "Johannes Brandstetter", "P. Garvan", "Maik Riechert", "Jonathan A. Weyn", "Haiyu Dong", "Anna Vaughan", "Jayesh K. Gupta", "Kit Thambiratnam", "Alex Archibald", "E. Heider", "Max Welling", "Richard E. Turner", "P. Perdikaris" ], "externalIds": { "ArXiv": "2405.13063", "DBLP": "journals/corr/abs-2405-13063", "DOI": "10.48550/arXiv.2405.13063", "CorpusId": 269983273 }, "url": "https://www.semanticscholar.org/paper/16befb80c7af86b58e8d962fccbfe2bf41b84ae8", "referenceCount": 0, "citationCount": 20, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Physics" ] }, { "title": "GenCast: Diffusion-based ensemble forecasting for medium-range weather", "abstract": "Weather forecasts are fundamentally uncertain, so predicting the range of probable weather scenarios is crucial for important decisions, from warning the public about hazardous weather, to planning renewable energy use. Here, we introduce GenCast, a probabilistic weather model with greater skill and speed than the top operational medium-range weather forecast in the world, the European Centre for Medium-Range Forecasts (ECMWF)'s ensemble forecast, ENS. Unlike traditional approaches, which are based on numerical weather prediction (NWP), GenCast is a machine learning weather prediction (MLWP) method, trained on decades of reanalysis data. GenCast generates an ensemble of stochastic 15-day global forecasts, at 12-hour steps and 0.25 degree latitude-longitude resolution, for over 80 surface and atmospheric variables, in 8 minutes. It has greater skill than ENS on 97.4% of 1320 targets we evaluated, and better predicts extreme weather, tropical cyclones, and wind power production. This work helps open the next chapter in operational weather forecasting, where critical weather-dependent decisions are made with greater accuracy and efficiency.", "year": 2023, "venue": "arXiv.org", "authors": [ "Ilan Price", "Alvaro Sanchez-Gonzalez", "Ferran Alet", "T. Ewalds", "Andrew El-Kadi", "Jacklynn Stott", "Shakir Mohamed", "Peter W. Battaglia", "Remi Lam", "Matthew Willson" ], "externalIds": { "DBLP": "journals/corr/abs-2312-15796", "ArXiv": "2312.15796", "DOI": "10.48550/arXiv.2312.15796", "CorpusId": 266551167 }, "url": "https://www.semanticscholar.org/paper/f6e7e8b96b8ce1f32973318b3b88bc54f6eb4ab7", "referenceCount": 65, "citationCount": 62, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Physics" ] }, { "title": "Nearly d-Linear Convergence Bounds for Diffusion Models via Stochastic Localization", "abstract": "Denoising diffusions are a powerful method to generate approximate samples from high-dimensional data distributions. Recent results provide polynomial bounds on their convergence rate, assuming $L^2$-accurate scores. Until now, the tightest bounds were either superlinear in the data dimension or required strong smoothness assumptions. We provide the first convergence bounds which are linear in the data dimension (up to logarithmic factors) assuming only finite second moments of the data distribution. We show that diffusion models require at most $\\tilde O(\\frac{d \\log^2(1/\\delta)}{\\varepsilon^2})$ steps to approximate an arbitrary distribution on $\\mathbb{R}^d$ corrupted with Gaussian noise of variance $\\delta$ to within $\\varepsilon^2$ in KL divergence. Our proof extends the Girsanov-based methods of previous works. We introduce a refined treatment of the error from discretizing the reverse SDE inspired by stochastic localization.", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Joe Benton", "Valentin De Bortoli", "A. Doucet", "George Deligiannidis" ], "externalIds": { "DBLP": "conf/iclr/BentonBDD24", "ArXiv": "2308.03686", "CorpusId": 260682793 }, "url": "https://www.semanticscholar.org/paper/451f022af13697d943d2f31b8d885e6b7274abb8", "referenceCount": 65, "citationCount": 53, "influentialCitationCount": 13, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Multilevel Diffusion: Infinite Dimensional Score-Based Diffusion Models for Image Generation", "abstract": "Score-based diffusion models (SBDM) have recently emerged as state-of-the-art approaches for image generation. Existing SBDMs are typically formulated in a finite-dimensional setting, where images are considered as tensors of a finite size. This papers develops SBDMs in the infinite-dimensional setting, that is, we model the training data as functions supported on a rectangular domain. Besides the quest for generating images at ever higher resolution our primary motivation is to create a well-posed infinite-dimensional learning problem so that we can discretize it consistently on multiple resolution levels. We thereby hope to obtain diffusion models that generalize across different resolution levels and improve the efficiency of the training process. We demonstrate how to overcome two shortcomings of current SBDM approaches in the infinite-dimensional setting. First, we modify the forward process to ensure that the latent distribution is well-defined in the infinite-dimensional setting using the notion of trace class operators. Second, we illustrate that approximating the score function with an operator network, in our case Fourier neural operators (FNOs), is beneficial for multilevel training. After deriving the forward process in the infinite-dimensional setting and reverse processes for finite approximations, we show their well-posedness, derive adequate discretizations, and investigate the role of the latent distributions. We provide first promising numerical results on two datasets, MNIST and material structures. In particular, we show that multilevel training is feasible within this framework.", "year": 2023, "venue": "arXiv.org", "authors": [ "Paul Hagemann", "Lars Ruthotto", "G. Steidl", "Ni Yang" ], "externalIds": { "DBLP": "journals/corr/abs-2303-04772", "ArXiv": "2303.04772", "DOI": "10.48550/arXiv.2303.04772", "CorpusId": 257405098 }, "url": "https://www.semanticscholar.org/paper/95db688b65f532950f5f3a969760692ca12107d7", "referenceCount": 78, "citationCount": 19, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Q-Diffusion: Quantizing Diffusion Models", "abstract": "Diffusion models have achieved great success in image synthesis through iterative noise estimation using deep neural networks. However, the slow inference, high memory consumption, and computation intensity of the noise estimation model hinder the efficient adoption of diffusion models. Although post-training quantization (PTQ) is considered a go-to compression method for other tasks, it does not work out-of-the-box on diffusion models. We propose a novel PTQ method specifically tailored towards the unique multi-timestep pipeline and model architecture of the diffusion models, which compresses the noise estimation network to accelerate the generation process. We identify the key difficulty of diffusion model quantization as the changing output distributions of noise estimation networks over multiple time steps and the bimodal activation distribution of the shortcut layers within the noise estimation network. We tackle these challenges with timestep-aware calibration and split shortcut quantization in this work. Experimental results show that our proposed method is able to quantize full-precision unconditional diffusion models into 4-bit while maintaining comparable performance (small FID change of at most 2.34 compared to >100 for traditional PTQ) in a training-free manner. Our approach can also be applied to text-guided image generation, where we can run stable diffusion in 4-bit weights with high generation quality for the first time.", "year": 2023, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Xiuyu Li", "Long Lian", "Yijia Liu", "Hua Yang", "Zhen Dong", "Daniel Kang", "Shanghang Zhang", "Kurt Keutzer" ], "externalIds": { "DBLP": "conf/iccv/LiLLYDKZK23", "ArXiv": "2302.04304", "DOI": "10.1109/ICCV51070.2023.01608", "CorpusId": 256697129 }, "url": "https://www.semanticscholar.org/paper/489ab1945feb21f17b3efbcf40726c8cbb52bb75", "referenceCount": 71, "citationCount": 75, "influentialCitationCount": 18, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Diffusion Posterior Sampling for General Noisy Inverse Problems", "abstract": "Diffusion models have been recently studied as powerful generative inverse problem solvers, owing to their high quality reconstructions and the ease of combining existing iterative solvers. However, most works focus on solving simple linear inverse problems in noiseless settings, which significantly under-represents the complexity of real-world problems. In this work, we extend diffusion solvers to efficiently handle general noisy (non)linear inverse problems via approximation of the posterior sampling. Interestingly, the resulting posterior sampling scheme is a blended version of diffusion sampling with the manifold constrained gradient without a strict measurement consistency projection step, yielding a more desirable generative path in noisy settings compared to the previous studies. Our method demonstrates that diffusion models can incorporate various measurement noise statistics such as Gaussian and Poisson, and also efficiently handle noisy nonlinear inverse problems such as Fourier phase retrieval and non-uniform deblurring. Code available at https://github.com/DPS2022/diffusion-posterior-sampling", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Hyungjin Chung", "Jeongsol Kim", "Michael T. McCann", "M. Klasky", "J. C. Ye" ], "externalIds": { "DBLP": "conf/iclr/ChungKMKY23", "ArXiv": "2209.14687", "DOI": "10.48550/arXiv.2209.14687", "CorpusId": 252596252 }, "url": "https://www.semanticscholar.org/paper/61e46884567be7cad12e999365b16a8d3414b678", "referenceCount": 45, "citationCount": 434, "influentialCitationCount": 128, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Learned Reconstruction Methods With Convergence Guarantees: A survey of concepts and applications", "abstract": "In recent years, deep learning has achieved remarkable empirical success for image reconstruction. This has catalyzed an ongoing quest for the precise characterization of the correctness and reliability of data-driven methods in critical use cases, for instance, in medical imaging. Notwithstanding the excellent performance and efficacy of deep learning-based methods, concerns have been raised regarding the approaches’ stability, or lack thereof, with serious practical implications. Significant advances have been made in recent years to unravel the inner workings of data-driven image recovery methods, challenging their widely perceived black-box nature. In this article, we specify relevant notions of convergence for data-driven image reconstruction, which forms the basis of a survey of learned methods with mathematically rigorous reconstruction guarantees. An example that is highlighted is the role of input-convex neural networks (ICNNs), offering the possibility to combine the power of deep learning with classical convex regularization theory for devising methods that are provably convergent. This survey article is aimed at both methodological researchers seeking to advance the frontiers of our understanding of data-driven image reconstruction methods as well as practitioners by providing an accessible description of useful convergence concepts and by placing some of the existing empirical practices on a solid mathematical foundation.", "year": 2022, "venue": "IEEE Signal Processing Magazine", "authors": [ "Subhadip Mukherjee", "A. Hauptmann", "O. Öktem", "M. Pereyra", "C. Schönlieb" ], "externalIds": { "DBLP": "journals/spm/MukherjeeHOPS23", "ArXiv": "2206.05431", "DOI": "10.1109/MSP.2022.3207451", "CorpusId": 255417311 }, "url": "https://www.semanticscholar.org/paper/3f142ab28706f73ec59d991b35ee964aa7b16299", "referenceCount": 63, "citationCount": 43, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "gDDIM: Generalized denoising diffusion implicit models", "abstract": "Our goal is to extend the denoising diffusion implicit model (DDIM) to general diffusion models~(DMs) besides isotropic diffusions. Instead of constructing a non-Markov noising process as in the original DDIM, we examine the mechanism of DDIM from a numerical perspective. We discover that the DDIM can be obtained by using some specific approximations of the score when solving the corresponding stochastic differential equation. We present an interpretation of the accelerating effects of DDIM that also explains the advantages of a deterministic sampling scheme over the stochastic one for fast sampling. Building on this insight, we extend DDIM to general DMs, coined generalized DDIM (gDDIM), with a small but delicate modification in parameterizing the score network. We validate gDDIM in two non-isotropic DMs: Blurring diffusion model (BDM) and Critically-damped Langevin diffusion model (CLD). We observe more than 20 times acceleration in BDM. In the CLD, a diffusion model by augmenting the diffusion process with velocity, our algorithm achieves an FID score of 2.26, on CIFAR10, with only 50 number of score function evaluations~(NFEs) and an FID score of 2.86 with only 27 NFEs. Code is available at https://github.com/qsh-zh/gDDIM", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Qinsheng Zhang", "Molei Tao", "Yongxin Chen" ], "externalIds": { "ArXiv": "2206.05564", "DBLP": "journals/corr/abs-2206-05564", "DOI": "10.48550/arXiv.2206.05564", "CorpusId": 249626510 }, "url": "https://www.semanticscholar.org/paper/670bab7b71be5e432b0dc60f406a6115cf6c0633", "referenceCount": 50, "citationCount": 91, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Denoising Diffusion Restoration Models", "abstract": "Many interesting tasks in image restoration can be cast as linear inverse problems. A recent family of approaches for solving these problems uses stochastic algorithms that sample from the posterior distribution of natural images given the measurements. However, efficient solutions often require problem-specific supervised training to model the posterior, whereas unsupervised methods that are not problem-specific typically rely on inefficient iterative methods. This work addresses these issues by introducing Denoising Diffusion Restoration Models (DDRM), an efficient, unsupervised posterior sampling method. Motivated by variational inference, DDRM takes advantage of a pre-trained denoising diffusion generative model for solving any linear inverse problem. We demonstrate DDRM's versatility on several image datasets for super-resolution, deblurring, inpainting, and colorization under various amounts of measurement noise. DDRM outperforms the current leading unsupervised methods on the diverse ImageNet dataset in reconstruction quality, perceptual quality, and runtime, being 5x faster than the nearest competitor. DDRM also generalizes well for natural images out of the distribution of the observed ImageNet training set.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Bahjat Kawar", "Michael Elad", "Stefano Ermon", "Jiaming Song" ], "externalIds": { "ArXiv": "2201.11793", "DBLP": "journals/corr/abs-2201-11793", "CorpusId": 246411364 }, "url": "https://www.semanticscholar.org/paper/3d3c5fcbc40aadccceda58d3d9c5cd00588ea0b7", "referenceCount": 70, "citationCount": 552, "influentialCitationCount": 73, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Palette: Image-to-Image Diffusion Models", "abstract": "This paper develops a unified framework for image-to-image translation based on conditional diffusion models and evaluates this framework on four challenging image-to-image translation tasks, namely colorization, inpainting, uncropping, and JPEG restoration. Our simple implementation of image-to-image diffusion models outperforms strong GAN and regression baselines on all tasks, without task-specific hyper-parameter tuning, architecture customization, or any auxiliary loss or sophisticated new techniques needed. We uncover the impact of an L2 vs. L1 loss in the denoising diffusion objective on sample diversity, and demonstrate the importance of self-attention in the neural architecture through empirical studies. Importantly, we advocate a unified evaluation protocol based on ImageNet, with human evaluation and sample quality scores (FID, Inception Score, Classification Accuracy of a pre-trained ResNet-50, and Perceptual Distance against original images). We expect this standardized evaluation protocol to play a role in advancing image-to-image translation research. Finally, we show that a generalist, multi-task diffusion model performs as well or better than task-specific specialist counterparts. Check out https://diffusion-palette.github.io/ for an overview of the results and code.", "year": 2021, "venue": "International Conference on Computer Graphics and Interactive Techniques", "authors": [ "Chitwan Saharia", "William Chan", "Huiwen Chang", "Chris A. Lee", "Jonathan Ho", "Tim Salimans", "David J. Fleet", "Mohammad Norouzi" ], "externalIds": { "DBLP": "journals/corr/abs-2111-05826", "ArXiv": "2111.05826", "DOI": "10.1145/3528233.3530757", "CorpusId": 243938678 }, "url": "https://www.semanticscholar.org/paper/37c9c4e7648f639c0b36f150fc6c6c90b3682f4a", "referenceCount": 114, "citationCount": 1199, "influentialCitationCount": 71, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Image Super-Resolution via Iterative Refinement", "abstract": "We present SR3, an approach to image Super-Resolution via Repeated Refinement. SR3 adapts denoising diffusion probabilistic models (Ho et al. 2020), (Sohl-Dickstein et al. 2015) to image-to-image translation, and performs super-resolution through a stochastic iterative denoising process. Output images are initialized with pure Gaussian noise and iteratively refined using a U-Net architecture that is trained on denoising at various noise levels, conditioned on a low-resolution input image. SR3 exhibits strong performance on super-resolution tasks at different magnification factors, on faces and natural images. We conduct human evaluation on a standard 8× face super-resolution task on CelebA-HQ for which SR3 achieves a fool rate close to 50%, suggesting photo-realistic outputs, while GAN baselines do not exceed a fool rate of 34%. We evaluate SR3 on a 4× super-resolution task on ImageNet, where SR3 outperforms baselines in human evaluation and classification accuracy of a ResNet-50 classifier trained on high-resolution images. We further show the effectiveness of SR3 in cascaded image generation, where a generative model is chained with super-resolution models to synthesize high-resolution images with competitive FID scores on the class-conditional 256×256 ImageNet generation challenge.", "year": 2021, "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", "authors": [ "Chitwan Saharia", "Jonathan Ho", "William Chan", "Tim Salimans", "David J. Fleet", "Mohammad Norouzi" ], "externalIds": { "DBLP": "journals/corr/abs-2104-07636", "ArXiv": "2104.07636", "DOI": "10.1109/TPAMI.2022.3204461", "CorpusId": 233241040, "PubMed": "36094974" }, "url": "https://www.semanticscholar.org/paper/8a1ea7b6e7e834d146ad782be5d63f57f806a9cc", "referenceCount": 74, "citationCount": 1305, "influentialCitationCount": 130, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering", "Medicine" ] }, { "title": "Score-Based Generative Modeling through Stochastic Differential Equations", "abstract": "Creating noise from data is easy; creating data from noise is generative modeling. We present a stochastic differential equation (SDE) that smoothly transforms a complex data distribution to a known prior distribution by slowly injecting noise, and a corresponding reverse-time SDE that transforms the prior distribution back into the data distribution by slowly removing the noise. Crucially, the reverse-time SDE depends only on the time-dependent gradient field (\\aka, score) of the perturbed data distribution. By leveraging advances in score-based generative modeling, we can accurately estimate these scores with neural networks, and use numerical SDE solvers to generate samples. We show that this framework encapsulates previous approaches in score-based generative modeling and diffusion probabilistic modeling, allowing for new sampling procedures and new modeling capabilities. In particular, we introduce a predictor-corrector framework to correct errors in the evolution of the discretized reverse-time SDE. We also derive an equivalent neural ODE that samples from the same distribution as the SDE, but additionally enables exact likelihood computation, and improved sampling efficiency. In addition, we provide a new way to solve inverse problems with score-based models, as demonstrated with experiments on class-conditional generation, image inpainting, and colorization. Combined with multiple architectural improvements, we achieve record-breaking performance for unconditional image generation on CIFAR-10 with an Inception score of 9.89 and FID of 2.20, a competitive likelihood of 2.99 bits/dim, and demonstrate high fidelity generation of 1024 x 1024 images for the first time from a score-based generative model.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Yang Song", "Jascha Narain Sohl-Dickstein", "Diederik P. Kingma", "Abhishek Kumar", "Stefano Ermon", "Ben Poole" ], "externalIds": { "DBLP": "journals/corr/abs-2011-13456", "ArXiv": "2011.13456", "MAG": "3110257065", "CorpusId": 227209335 }, "url": "https://www.semanticscholar.org/paper/633e2fbfc0b21e959a244100937c5853afca4853", "referenceCount": 66, "citationCount": 4108, "influentialCitationCount": 956, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Methods for Pruning Deep Neural Networks", "abstract": "This paper presents a survey of methods for pruning deep neural networks. It begins by categorising over 150 studies based on the underlying approach used and then focuses on three categories: methods that use magnitude based pruning, methods that utilise clustering to identify redundancy, and methods that use sensitivity analysis to assess the effect of pruning. Some of the key influencing studies within these categories are presented to highlight the underlying approaches and results achieved. Most studies present results which are distributed in the literature as new architectures, algorithms and data sets have developed with time, making comparison across different studied difficult. The paper therefore provides a resource for the community that can be used to quickly compare the results from many different methods on a variety of data sets, and a range of architectures, including AlexNet, ResNet, DenseNet and VGG. The resource is illustrated by comparing the results published for pruning AlexNet and ResNet50 on ImageNet and ResNet56 and VGG16 on the CIFAR10 data to reveal which pruning methods work well in terms of retaining accuracy whilst achieving good compression rates. The paper concludes by identifying some research gaps and promising directions for future research.", "year": 2020, "venue": "IEEE Access", "authors": [ "S. Vadera", "Salem Ameen" ], "externalIds": { "DBLP": "journals/access/VaderaA22", "MAG": "3096215947", "ArXiv": "2011.00241", "DOI": "10.1109/ACCESS.2022.3182659", "CorpusId": 226226764 }, "url": "https://www.semanticscholar.org/paper/0fa13a5ef36168ff3fd08b03fd30f1f935d6a18a", "referenceCount": 256, "citationCount": 78, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Denoising Diffusion Implicit Models", "abstract": "Denoising diffusion probabilistic models (DDPMs) have achieved high quality image generation without adversarial training, yet they require simulating a Markov chain for many steps to produce a sample. To accelerate sampling, we present denoising diffusion implicit models (DDIMs), a more efficient class of iterative implicit probabilistic models with the same training procedure as DDPMs. In DDPMs, the generative process is defined as the reverse of a Markovian diffusion process. We construct a class of non-Markovian diffusion processes that lead to the same training objective, but whose reverse process can be much faster to sample from. We empirically demonstrate that DDIMs can produce high quality samples $10 \\times$ to $50 \\times$ faster in terms of wall-clock time compared to DDPMs, allow us to trade off computation for sample quality, and can perform semantically meaningful image interpolation directly in the latent space.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Jiaming Song", "Chenlin Meng", "Stefano Ermon" ], "externalIds": { "ArXiv": "2010.02502", "DBLP": "journals/corr/abs-2010-02502", "MAG": "3092442149", "CorpusId": 222140788 }, "url": "https://www.semanticscholar.org/paper/014576b866078524286802b1d0e18628520aa886", "referenceCount": 47, "citationCount": 4422, "influentialCitationCount": 877, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Denoising Diffusion Probabilistic Models", "abstract": "We present high quality image synthesis results using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics. Our best results are obtained by training on a weighted variational bound designed according to a novel connection between diffusion probabilistic models and denoising score matching with Langevin dynamics, and our models naturally admit a progressive lossy decompression scheme that can be interpreted as a generalization of autoregressive decoding. On the unconditional CIFAR10 dataset, we obtain an Inception score of 9.46 and a state-of-the-art FID score of 3.17. On 256x256 LSUN, we obtain sample quality similar to ProgressiveGAN. Our implementation is available at this https URL", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Jonathan Ho", "Ajay Jain", "P. Abbeel" ], "externalIds": { "ArXiv": "2006.11239", "MAG": "3100572490", "DBLP": "journals/corr/abs-2006-11239", "CorpusId": 219955663 }, "url": "https://www.semanticscholar.org/paper/5c126ae3421f05768d8edd97ecd44b1364e2c99a", "referenceCount": 73, "citationCount": 10778, "influentialCitationCount": 2337, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Generative Modeling by Estimating Gradients of the Data Distribution", "abstract": "We introduce a new generative model where samples are produced via Langevin dynamics using gradients of the data distribution estimated with score matching. Because gradients can be ill-defined and hard to estimate when the data resides on low-dimensional manifolds, we perturb the data with different levels of Gaussian noise, and jointly estimate the corresponding scores, i.e., the vector fields of gradients of the perturbed data distribution for all noise levels. For sampling, we propose an annealed Langevin dynamics where we use gradients corresponding to gradually decreasing noise levels as the sampling process gets closer to the data manifold. Our framework allows flexible model architectures, requires no sampling during training or the use of adversarial methods, and provides a learning objective that can be used for principled model comparisons. Our models produce samples comparable to GANs on MNIST, CelebA and CIFAR-10 datasets, achieving a new state-of-the-art inception score of 8.87 on CIFAR-10. Additionally, we demonstrate that our models learn effective representations via image inpainting experiments.", "year": 2019, "venue": "Neural Information Processing Systems", "authors": [ "Yang Song", "Stefano Ermon" ], "externalIds": { "MAG": "2971034910", "ArXiv": "1907.05600", "DBLP": "conf/nips/SongE19", "CorpusId": 196470871 }, "url": "https://www.semanticscholar.org/paper/965359b3008ab50dd04e171551220ec0e7f83aba", "referenceCount": 68, "citationCount": 2763, "influentialCitationCount": 330, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Multilevel Nested Simulation for Efficient Risk Estimation", "abstract": "We investigate the problem of computing a nested expectation of the form $\\mathbb{P}[\\mathbb{E}[X|Y] \\!\\geq\\!0]\\!=\\!\\mathbb{E}[\\textrm{H}(\\mathbb{E}[X|Y])]$ where $\\textrm{H}$ is the Heaviside function. This nested expectation appears, for example, when estimating the probability of a large loss from a financial portfolio. We present a method that combines the idea of using Multilevel Monte Carlo (MLMC) for nested expectations with the idea of adaptively selecting the number of samples in the approximation of the inner expectation, as proposed by (Broadie et al., 2011). We propose and analyse an algorithm that adaptively selects the number of inner samples on each MLMC level and prove that the resulting MLMC method with adaptive sampling has an $\\mathcal{O}\\left( \\varepsilon^{-2}|\\log\\varepsilon|^2 \\right)$ complexity to achieve a root mean-squared error $\\varepsilon$. The theoretical analysis is verified by numerical experiments on a simple model problem. We also present a stochastic root-finding algorithm that, combined with our adaptive methods, can be used to compute other risk measures such as Value-at-Risk (VaR) and Conditional Value-at-Risk (CVaR), with the latter being achieved with $\\mathcal{O}\\left(\\varepsilon^{-2}\\right)$ complexity.", "year": 2018, "venue": "SIAM/ASA J. Uncertain. Quantification", "authors": [ "M. Giles", "A. Haji-Ali" ], "externalIds": { "DBLP": "journals/juq/GilesH19", "MAG": "2962965244", "ArXiv": "1802.05016", "DOI": "10.1137/18M1173186", "CorpusId": 125273380 }, "url": "https://www.semanticscholar.org/paper/48035f74a807b428eb511de5572cdbf26bfc7d69", "referenceCount": 18, "citationCount": 45, "influentialCitationCount": 8, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics", "Economics" ] }, { "title": "Deep Unsupervised Learning using Nonequilibrium Thermodynamics", "abstract": "A central problem in machine learning involves modeling complex data-sets using highly flexible families of probability distributions in which learning, sampling, inference, and evaluation are still analytically or computationally tractable. Here, we develop an approach that simultaneously achieves both flexibility and tractability. The essential idea, inspired by non-equilibrium statistical physics, is to systematically and slowly destroy structure in a data distribution through an iterative forward diffusion process. We then learn a reverse diffusion process that restores structure in data, yielding a highly flexible and tractable generative model of the data. This approach allows us to rapidly learn, sample from, and evaluate probabilities in deep generative models with thousands of layers or time steps, as well as to compute conditional and posterior probabilities under the learned model. We additionally release an open source reference implementation of the algorithm.", "year": 2015, "venue": "International Conference on Machine Learning", "authors": [ "Jascha Narain Sohl-Dickstein", "Eric A. Weiss", "Niru Maheswaranathan", "S. Ganguli" ], "externalIds": { "MAG": "2129069237", "DBLP": "journals/corr/Sohl-DicksteinW15", "ArXiv": "1503.03585", "CorpusId": 14888175 }, "url": "https://www.semanticscholar.org/paper/2dcef55a07f8607a819c21fe84131ea269cc2e3c", "referenceCount": 60, "citationCount": 4757, "influentialCitationCount": 356, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Multilevel Monte Carlo Path Simulation", "abstract": "We show that multigrid ideas can be used to reduce the computational complexity of estimating an expected value arising from a stochastic differential equation using Monte Carlo path simulations. In the simplest case of a Lipschitz payoff and a Euler discretisation, the computational cost to achieve an accuracy of O(e) is reduced from O(e-3) to O(e-2 (log e)2). The analysis is supported by numerical results showing significant computational savings.", "year": 2008, "venue": "Operational Research", "authors": [ "M. Giles" ], "externalIds": { "MAG": "2163715525", "DBLP": "journals/ior/Giles08", "DOI": "10.1287/opre.1070.0496", "CorpusId": 3000492 }, "url": "https://www.semanticscholar.org/paper/9011df67b7e93b0727162b0778517e068cfbf902", "referenceCount": 32, "citationCount": 1476, "influentialCitationCount": 311, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "The Bayesian choice : from decision-theoretic foundations to computational implementation", "abstract": "Winner of the 2004 DeGroot Prize This paperback edition, a reprint of the 2001 edition, is a graduate-level textbook that introduces Bayesian statistics and decision theory. It covers both the basic ideas of statistical theory, and also some of the more modern and advanced topics of Bayesian statistics such as complete class theorems, the Stein effect, Bayesian model choice, hierarchical and empirical Bayes modeling, Monte Carlo integration including Gibbs sampling, and other MCMC techniques. It was awarded the 2004 DeGroot Prize by the International Society for Bayesian Analysis (ISBA) for setting \"a new standard for modern textbooks dealing with Bayesian methods, especially those using MCMC techniques, and that it is a worthy successor to DeGroot's and Berger's earlier texts\".", "year": 2007, "venue": "", "authors": [ "C. Robert" ], "externalIds": { "MAG": "1598928703", "CorpusId": 50937448 }, "url": "https://www.semanticscholar.org/paper/9a1227f2fc93d22e13c649e97cfffafc97f3127e", "referenceCount": 2, "citationCount": 1344, "influentialCitationCount": 150, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Pattern Recognition and Machine Learning", "abstract": null, "year": 2006, "venue": "Technometrics", "authors": [ "Radford M. Neal" ], "externalIds": { "MAG": "1663973292", "DBLP": "journals/jei/BishopN07", "DOI": "10.1007/978-0-387-45528-0", "CorpusId": 31993898 }, "url": "https://www.semanticscholar.org/paper/668b1277fbece28c4841eeab1c97e4ebd0079700", "referenceCount": 361, "citationCount": 36225, "influentialCitationCount": 4245, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Statistical and Computational Inverse Problems", "abstract": "Classification Without Interaction”), and 13 (“Two-Way Crossed Classification With Interaction”). Every chapter contains two or more numerical example with the exception of Chapters 14 (“Three-Way and Higher-Order Crossed Classifications”) and 17 (“General r-Way Nested Classification”), which only contain one example each. Examples appear in the estimation, confidence interval, and hypothesis testing sections. Distribution of estimators is only discussed for the models in Chapters 11 and 15 (“Two-Way Nested Classification”). Chapters 11, 13, 15, and 16 (“Three-Way Nested Classification”) contain information on design considerations involving unbalanced experiments. The appendixes contain basic theoretical and methodological results useful in the development of unbalanced random models as well as information on the capabilities of widely available software. Packages discussed are SAS, SPSS, BMDP, S–PLUS, GENSTAT, and BUGS. The book is well organized and focused. It contains extensive coverage on crossed and nested unbalanced models. Because of the number of topics, the depth of coverage is occasionally limited. This is only a minor issue, since there are always a substantial number of references given. The organization of the book and the presentation of the material make difficult subject matter easier to follow. The main drawback to the book is that it deals only with completely random univariate models. Given the volume of information in the book, however, this is understandable. The authors point out this shortcoming in the Preface and suggest that a future work covering these topics may be forthcoming. For the application-oriented practitioner, a small disadvantage is that a number of the estimation approaches discussed, while interesting, cannot be found in the more commonly used statistical software packages. Regardless, the book makes an excellent resource for anyone working with unbalanced random models.", "year": 2006, "venue": "Technometrics", "authors": [ "F. Liang", "Jianhua Huang" ], "externalIds": { "MAG": "41328014", "DBLP": "journals/technometrics/LiangH06", "DOI": "10.1198/tech.2006.s351", "CorpusId": 43268482 }, "url": "https://www.semanticscholar.org/paper/4edb0aa5f6ee8268f6b278858005c0e6e9127120", "referenceCount": 4, "citationCount": 1406, "influentialCitationCount": 167, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Multilevel Monte Carlo Methods", "abstract": null, "year": 2001, "venue": "Large-Scale Scientific Computing", "authors": [ "S. Heinrich" ], "externalIds": { "MAG": "1835030294", "DBLP": "conf/lssc/Heinrich01", "DOI": "10.1007/3-540-45346-6_5", "CorpusId": 8539292 }, "url": "https://www.semanticscholar.org/paper/d512f19f7cdf5994df388d8bc5c5a828c311b870", "referenceCount": 20, "citationCount": 817, "influentialCitationCount": 118, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Reverse-time diffusion equation models", "abstract": null, "year": 1982, "venue": "", "authors": [ "B. Anderson" ], "externalIds": { "MAG": "1991111872", "DOI": "10.1016/0304-4149(82)90051-5", "CorpusId": 3897405 }, "url": "https://www.semanticscholar.org/paper/c7a5128b45edb4db9105ec5167210b887617ddf2", "referenceCount": 17, "citationCount": 625, "influentialCitationCount": 71, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "I 2 SB: Image-to-image Schr¨odinger bridge", "abstract": null, "year": null, "venue": "Proceedings of the 40th International Conference on Machine Learning", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Progressive distillation for fast sampling of diffusion models", "abstract": null, "year": null, "venue": "International Conference on Learning Representations", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Hand-object reconstruction via interaction-aware graph attention mechanism": { "paper_title": "Hand-object reconstruction via interaction-aware graph attention mechanism", "arxiv_id": "2409.17629v1", "keyword": "graph neural network", "authors": [ "Taeyun Woo", "Tae-Kyun Kim", "Jinah Park" ], "references": [ { "title": "DexFuncGrasp: A Robotic Dexterous Functional Grasp Dataset Constructed from a Cost-Effective Real-Simulation Annotation System", "abstract": "Robot grasp dataset is the basis of designing the robot's grasp generation model. Compared with the building grasp dataset for Low-DOF grippers, it is harder for High-DOF dexterous robot hand. Most current datasets meet the needs of generating stable grasps, but they are not suitable for dexterous hands to complete human-like functional grasp, such as grasp the handle of a cup or pressing the button of a flashlight, so as to enable robots to complete subsequent functional manipulation action autonomously, and there is no dataset with functional grasp pose annotations at present. This paper develops a unique Cost-Effective Real-Simulation Annotation System by leveraging natural hand's actions. The system is able to capture a functional grasp of a dexterous hand in a simulated environment assisted by human demonstration in real world. By using this system, dexterous grasp data can be collected efficiently as well as cost-effective. Finally, we construct the first dexterous functional grasp dataset with rich pose annotations. A Functional Grasp Synthesis Model is also provided to validate the effectiveness of the proposed system and dataset. Our project page is: https://hjlllll.github.io/DFG/.", "year": 2024, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Jinglue Hang", "Xiangbo Lin", "Tianqiang Zhu", "Xuanheng Li", "Rina Wu", "Xiaohong Ma", "Yi Sun" ], "externalIds": { "DBLP": "conf/aaai/HangLZLWM024", "DOI": "10.1609/aaai.v38i9.28897", "CorpusId": 268692447 }, "url": "https://www.semanticscholar.org/paper/27957676bc0623bda8343660ad6a4d00404739a5", "referenceCount": 22, "citationCount": 3, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Physically Plausible Realistic Grip-Lift Interaction Based on Hand Kinematics in VR", "abstract": "Immersive technology, refers to various novel ways of creating and interacting with applications and experiences, e.g., virtual reality (VR), has been used in various simulations and training where preparing real/physical settings is not ideal or possible, or where the use of virtual contents is otherwise beneficial. Realizing realistic interactions with virtual content is crucial for a quality experience and the effectiveness of such simulation and training. In this paper, we propose a kinematics-based realistic hand interaction method to enable a physically plausible grip-lifting experience in VR. The method reflects three kinematic characteristics of the hand: the force at contact points, finger flexion, and the speed of hand/finger motion, and we developed a grip-lift interaction prototype using the proposed method. To examine the sense of realism and hand poses during the grip-lift interaction, we conducted a human subjects experiment using the prototype, resulting in positive effects on the perceived realism and usefulness of the interaction. Grip-lifting is a fundamental interaction technique that is involved in most embodied interaction scenarios. Our method would contribute to the design and development of realistic virtual experiences, of which we will discuss the implications and potential based on our findings.", "year": 2023, "venue": "Electronics", "authors": [ "Hyeon-Woo Nam", "Chanhee Kim", "Kangsoo Kim", "Jong-Il Park" ], "externalIds": { "DOI": "10.3390/electronics12132794", "CorpusId": 259725362 }, "url": "https://www.semanticscholar.org/paper/8d6425752084def4dbff74005e1e0dcd0f23a6f0", "referenceCount": 31, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "Coarse-to-Fine Hand–Object Pose Estimation with Interaction-Aware Graph Convolutional Network", "abstract": "The analysis of hand–object poses from RGB images is important for understanding and imitating human behavior and acts as a key factor in various applications. In this paper, we propose a novel coarse-to-fine two-stage framework for hand–object pose estimation, which explicitly models hand–object relations in 3D pose refinement rather than in the process of converting 2D poses to 3D poses. Specifically, in the coarse stage, 2D heatmaps of hand and object keypoints are obtained from RGB image and subsequently fed into pose regressor to derive coarse 3D poses. As for the fine stage, an interaction-aware graph convolutional network called InterGCN is introduced to perform pose refinement by fully leveraging the hand–object relations in 3D context. One major challenge in 3D pose refinement lies in the fact that relations between hand and object change dynamically according to different HOI scenarios. In response to this issue, we leverage both general and interaction-specific relation graphs to significantly enhance the capacity of the network to cover variations of HOI scenarios for successful 3D pose refinement. Extensive experiments demonstrate state-of-the-art performance of our approach on benchmark hand–object datasets.", "year": 2021, "venue": "Italian National Conference on Sensors", "authors": [ "Maomao Zhang", "Ao Li", "Honglei Liu", "Minghui Wang" ], "externalIds": { "DBLP": "journals/sensors/ZhangLLW21a", "PubMedCentral": "8662406", "DOI": "10.3390/s21238092", "CorpusId": 244940328, "PubMed": "34884096" }, "url": "https://www.semanticscholar.org/paper/de645b9b998a2f9aa2f78fe9a37b2fb9b8f3f4a4", "referenceCount": 36, "citationCount": 4, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Deeper Insights into Graph Convolutional Networks for Semi-Supervised Learning", "abstract": "\n \n Many interesting problems in machine learning are being revisited with new deep learning tools. For graph-based semi-supervised learning, a recent important development is graph convolutional networks (GCNs), which nicely integrate local vertex features and graph topology in the convolutional layers. Although the GCN model compares favorably with other state-of-the-art methods, its mechanisms are not clear and it still requires considerable amount of labeled data for validation and model selection. In this paper, we develop deeper insights into the GCN model and address its fundamental limits. First, we show that the graph convolution of the GCN model is actually a special form of Laplacian smoothing, which is the key reason why GCNs work, but it also brings potential concerns of over-smoothing with many convolutional layers. Second, to overcome the limits of the GCN model with shallow architectures, we propose both co-training and self-training approaches to train GCNs. Our approaches significantly improve GCNs in learning with very few labels, and exempt them from requiring additional labels for validation. Extensive experiments on benchmarks have verified our theory and proposals.\n \n", "year": 2018, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Qimai Li", "Zhichao Han", "Xiao-Ming Wu" ], "externalIds": { "MAG": "2951648099", "DBLP": "conf/aaai/LiHW18", "ArXiv": "1801.07606", "DOI": "10.1609/aaai.v32i1.11604", "CorpusId": 11118105 }, "url": "https://www.semanticscholar.org/paper/36652428740cd30d245d55889f01a7fb04a91c93", "referenceCount": 33, "citationCount": 2470, "influentialCitationCount": 198, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Embodied hands", "abstract": "Humans move their hands and bodies together to communicate and solve tasks. Capturing and replicating such coordinated activity is critical for virtual characters that behave realistically. Surprisingly, most methods treat the 3D modeling and tracking of bodies and hands separately. Here we formulate a model of hands and bodies interacting together and fit it to full-body 4D sequences. When scanning or capturing the full body in 3D, hands are small and often partially occluded, making their shape and pose hard to recover. To cope with low-resolution, occlusion, and noise, we develop a new model called MANO (hand Model with Articulated and Non-rigid defOrmations). MANO is learned from around 1000 high-resolution 3D scans of hands of 31 subjects in a wide variety of hand poses. The model is realistic, low-dimensional, captures non-rigid shape changes with pose, is compatible with standard graphics packages, and can fit any human hand. MANO provides a compact mapping from hand poses to pose blend shape corrections and a linear manifold of pose synergies. We attach MANO to a standard parameterized 3D body shape model (SMPL), resulting in a fully articulated body and hand model (SMPL+H). We illustrate SMPL+H by fitting complex, natural, activities of subjects captured with a 4D scanner. The fitting is fully automatic and results in full body models that move naturally with detailed hand motions and a realism not seen before in full body performance capture. The models and data are freely available for research purposes at http://mano.is.tue.mpg.de.", "year": 2017, "venue": "ACM Transactions on Graphics", "authors": [ "J. Romero", "Dimitrios Tzionas", "Michael J. Black" ], "externalIds": { "ArXiv": "2201.02610", "DBLP": "journals/tog/0002TB17", "MAG": "2768683308", "DOI": "10.1145/3130800.3130883", "CorpusId": 245838515 }, "url": "https://www.semanticscholar.org/paper/fab5008ca5ca2d328395c1138885618fc209904d", "referenceCount": 83, "citationCount": 356, "influentialCitationCount": 46, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Semi-Supervised Classification with Graph Convolutional Networks", "abstract": "We present a scalable approach for semi-supervised learning on graph-structured data that is based on an efficient variant of convolutional neural networks which operate directly on graphs. We motivate the choice of our convolutional architecture via a localized first-order approximation of spectral graph convolutions. Our model scales linearly in the number of graph edges and learns hidden layer representations that encode both local graph structure and features of nodes. In a number of experiments on citation networks and on a knowledge graph dataset we demonstrate that our approach outperforms related methods by a significant margin.", "year": 2016, "venue": "International Conference on Learning Representations", "authors": [ "Thomas Kipf", "M. Welling" ], "externalIds": { "ArXiv": "1609.02907", "MAG": "2519887557", "DBLP": "journals/corr/KipfW16", "CorpusId": 3144218 }, "url": "https://www.semanticscholar.org/paper/36eff562f65125511b5dfab68ce7f7a943c27478", "referenceCount": 38, "citationCount": 25291, "influentialCitationCount": 6216, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Adam: A Method for Stochastic Optimization", "abstract": "We introduce Adam, an algorithm for first-order gradient-based optimization of stochastic objective functions, based on adaptive estimates of lower-order moments. The method is straightforward to implement, is computationally efficient, has little memory requirements, is invariant to diagonal rescaling of the gradients, and is well suited for problems that are large in terms of data and/or parameters. The method is also appropriate for non-stationary objectives and problems with very noisy and/or sparse gradients. The hyper-parameters have intuitive interpretations and typically require little tuning. Some connections to related algorithms, on which Adam was inspired, are discussed. We also analyze the theoretical convergence properties of the algorithm and provide a regret bound on the convergence rate that is comparable to the best known results under the online convex optimization framework. Empirical results demonstrate that Adam works well in practice and compares favorably to other stochastic optimization methods. Finally, we discuss AdaMax, a variant of Adam based on the infinity norm.", "year": 2014, "venue": "International Conference on Learning Representations", "authors": [ "Diederik P. Kingma", "Jimmy Ba" ], "externalIds": { "MAG": "2964121744", "DBLP": "journals/corr/KingmaB14", "ArXiv": "1412.6980", "CorpusId": 6628106 }, "url": "https://www.semanticscholar.org/paper/a6cb366736791bcccc5c8639de5a8f9636bf87e8", "referenceCount": 26, "citationCount": 139990, "influentialCitationCount": 22063, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] } ] }, "Heterogeneous Hyper-Graph Neural Networks for Context-aware Human Activity Recognition": { "paper_title": "Heterogeneous Hyper-Graph Neural Networks for Context-aware Human Activity Recognition", "arxiv_id": "2409.17483v1", "keyword": "graph neural network", "authors": [ "Wen Ge", "Guanyi Mou", "Emmanuel O. Agu", "Kyumin Lee" ], "references": [ { "title": "HAR-GCNN: Deep Graph CNNs for Human Activity Recognition From Highly Unlabeled Mobile Sensor Data", "abstract": "The problem of human activity recognition from mobile sensor data applies to multiple domains, such as health monitoring, personal fitness, daily life logging, and senior care. A critical challenge for training human activity recognition models is data quality. Acquiring balanced datasets containing accurate activity labels requires humans to correctly annotate and potentially interfere with the subjects’ normal activities in real-time. Despite the likelihood of incorrect annotation or lack thereof, there is often an inherent chronology to human behavior. For example, we take a shower after we exercise. This implicit chronology can be used to learn unknown labels and classify future activities. In this work, we propose HAR-GCCN, a deep graph CNN model that leverages the correlation between chronologically adjacent sensor measurements to predict the correct labels for unclassified activities that have at least one activity label. We propose a new training strategy enforcing that the model predicts the missing activity labels by leveraging the known ones. HAR-GCCN shows superior performance relative to previously used baseline methods, improving classification accuracy by about 25% and up to 68% on different datasets.", "year": 2022, "venue": "2022 IEEE International Conference on Pervasive Computing and Communications Workshops and other Affiliated Events (PerCom Workshops)", "authors": [ "Abduallah A. Mohamed", "F. Lejarza", "Stephanie Cahail", "C. Claudel", "Edison Thomaz" ], "externalIds": { "DBLP": "journals/corr/abs-2203-03087", "DOI": "10.1109/PerComWorkshops53856.2022.9767259", "CorpusId": 247292383 }, "url": "https://www.semanticscholar.org/paper/bc6d519d134cb2cdc6dca5fa6e6bdb4f255c3e0e", "referenceCount": 15, "citationCount": 15, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "CRUFT: Context Recognition under Uncertainty using Fusion and Temporal Learning", "abstract": "Human context recognition (HCR), which involves determining a user’s current situation (or context), has long been an important task in context-aware systems. With the widespread ownership of smartphones, HCR methods that utilize signals from its built-in sensors have recently received increased attention. We propose Context Recognition under label Uncertainty using Fusion and Temporal Learning (CRUFT), a novel method to recognize a diverse set of smartphone user contexts, including long-term human activities, short-term human activities, and phone placement (pocket or bag in which the smartphone is carried). Context recognition is formulated as a multi-label classification task. CRUFT uses both handcrafted features and auto-learned deep learning features extracted from raw time-series data in two separate arms. The handcrafted arm includes a Multi-Layer Perceptron (MLP), while the raw data arm utilizes a Convolutional Neural Network (CNN) along with a Bi-Directional Long Short Term Memory (Bi-LSTM) model that exploits temporal correlations in the input stream. As smartphone sensor readings, assigned timestamps, and labels can be wrong sometimes, CRUFT integrates an uncertainty module. CRUFT outperforms the state-of-the-art baselines achieving 94.25% in overall Balanced Accuracy (BA), which improves the best performing baseline by 2.7%. Our detailed analyses demonstrate the non-trivial contributions of each component in CRUFT.", "year": 2020, "venue": "International Conference on Machine Learning and Applications", "authors": [ "Wenhang Ge", "E. Agu" ], "externalIds": { "DBLP": "conf/icmla/GeA20", "DOI": "10.1109/ICMLA51294.2020.00122", "CorpusId": 232062829 }, "url": "https://www.semanticscholar.org/paper/ab6f6a0e6a64d0127f579d6382b4cbbdc750bae4", "referenceCount": 15, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Moodable: On feasibility of instantaneous depression assessment using machine learning on voice samples with retrospectively harvested smartphone and social media data", "abstract": null, "year": 2020, "venue": "Smart Health", "authors": [ "Ada Dogrucu", "Alex Perucic", "Anabella Isaro", "Damon Ball", "E. Toto", "Elke A. Rundensteiner", "E. Agu", "Rachel Davis-Martin", "E. Boudreaux" ], "externalIds": { "MAG": "3016839817", "DOI": "10.1016/j.smhl.2020.100118", "CorpusId": 218795430 }, "url": "https://www.semanticscholar.org/paper/16a3eddd2f68bf9575d2725663a05e63e8d5be6c", "referenceCount": 43, "citationCount": 34, "influentialCitationCount": 8, "isOpenAccess": false, "fieldsOfStudy": [ "Psychology" ] }, { "title": "LightGCN: Simplifying and Powering Graph Convolution Network for Recommendation", "abstract": "Graph Convolution Network (GCN) has become new state-of-the-art for collaborative filtering. Nevertheless, the reasons of its effectiveness for recommendation are not well understood. Existing work that adapts GCN to recommendation lacks thorough ablation analyses on GCN, which is originally designed for graph classification tasks and equipped with many neural network operations. However, we empirically find that the two most common designs in GCNs -- feature transformation and nonlinear activation -- contribute little to the performance of collaborative filtering. Even worse, including them adds to the difficulty of training and degrades recommendation performance. In this work, we aim to simplify the design of GCN to make it more concise and appropriate for recommendation. We propose a new model named LightGCN, including only the most essential component in GCN -- neighborhood aggregation -- for collaborative filtering. Specifically, LightGCN learns user and item embeddings by linearly propagating them on the user-item interaction graph, and uses the weighted sum of the embeddings learned at all layers as the final embedding. Such simple, linear, and neat model is much easier to implement and train, exhibiting substantial improvements (about 16.0% relative improvement on average) over Neural Graph Collaborative Filtering (NGCF) -- a state-of-the-art GCN-based recommender model -- under exactly the same experimental setting. Further analyses are provided towards the rationality of the simple LightGCN from both analytical and empirical perspectives.", "year": 2020, "venue": "Annual International ACM SIGIR Conference on Research and Development in Information Retrieval", "authors": [ "Xiangnan He", "Kuan Deng", "Xiang Wang", "Yan Li", "Yongdong Zhang", "Meng Wang" ], "externalIds": { "ArXiv": "2002.02126", "MAG": "3004578093", "DBLP": "journals/corr/abs-2002-02126", "DOI": "10.1145/3397271.3401063", "CorpusId": 211043589 }, "url": "https://www.semanticscholar.org/paper/3024f58826a5bce3378af94f677e8fb90cbb49e0", "referenceCount": 54, "citationCount": 2705, "influentialCitationCount": 574, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Heterogeneous Graph Neural Network", "abstract": "Representation learning in heterogeneous graphs aims to pursue a meaningful vector representation for each node so as to facilitate downstream applications such as link prediction, personalized recommendation, node classification, etc. This task, however, is challenging not only because of the demand to incorporate heterogeneous structural (graph) information consisting of multiple types of nodes and edges, but also due to the need for considering heterogeneous attributes or contents (e.g., text or image) associated with each node. Despite a substantial amount of effort has been made to homogeneous (or heterogeneous) graph embedding, attributed graph embedding as well as graph neural networks, few of them can jointly consider heterogeneous structural (graph) information as well as heterogeneous contents information of each node effectively. In this paper, we propose HetGNN, a heterogeneous graph neural network model, to resolve this issue. Specifically, we first introduce a random walk with restart strategy to sample a fixed size of strongly correlated heterogeneous neighbors for each node and group them based upon node types. Next, we design a neural network architecture with two modules to aggregate feature information of those sampled neighboring nodes. The first module encodes \"deep\" feature interactions of heterogeneous contents and generates content embedding for each node. The second module aggregates content (attribute) embeddings of different neighboring groups (types) and further combines them by considering the impacts of different groups to obtain the ultimate node embedding. Finally, we leverage a graph context loss and a mini-batch gradient descent procedure to train the model in an end-to-end manner. Extensive experiments on several datasets demonstrate that HetGNN can outperform state-of-the-art baselines in various graph mining tasks, i.e., link prediction, recommendation, node classification & clustering and inductive node classification & clustering.", "year": 2019, "venue": "Knowledge Discovery and Data Mining", "authors": [ "Chuxu Zhang", "Dongjin Song", "Chao Huang", "A. Swami", "N. Chawla" ], "externalIds": { "DBLP": "conf/kdd/ZhangSHSC19", "MAG": "2965857891", "DOI": "10.1145/3292500.3330961", "CorpusId": 198952485 }, "url": "https://www.semanticscholar.org/paper/e3d662bbd0e5539fe22a85f3518f960595b9914e", "referenceCount": 39, "citationCount": 1106, "influentialCitationCount": 104, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Neural Graph Collaborative Filtering", "abstract": "Learning vector representations (aka. embeddings) of users and items lies at the core of modern recommender systems. Ranging from early matrix factorization to recently emerged deep learning based methods, existing efforts typically obtain a user's (or an item's) embedding by mapping from pre-existing features that describe the user (or the item), such as ID and attributes. We argue that an inherent drawback of such methods is that, the collaborative signal, which is latent in user-item interactions, is not encoded in the embedding process. As such, the resultant embeddings may not be sufficient to capture the collaborative filtering effect. In this work, we propose to integrate the user-item interactions - more specifically the bipartite graph structure - into the embedding process. We develop a new recommendation framework Neural Graph Collaborative Filtering (NGCF), which exploits the user-item graph structure by propagating embeddings on it. This leads to the expressive modeling of high-order connectivity in user-item graph, effectively injecting the collaborative signal into the embedding process in an explicit manner. We conduct extensive experiments on three public benchmarks, demonstrating significant improvements over several state-of-the-art models like HOP-Rec [39] and Collaborative Memory Network [5]. Further analysis verifies the importance of embedding propagation for learning better user and item representations, justifying the rationality and effectiveness of NGCF. Codes are available at https://github.com/xiangwang1223/neural_graph_collaborative_filtering.", "year": 2019, "venue": "Annual International ACM SIGIR Conference on Research and Development in Information Retrieval", "authors": [ "Xiang Wang", "Xiangnan He", "Meng Wang", "Fuli Feng", "Tat-Seng Chua" ], "externalIds": { "MAG": "2945827670", "ArXiv": "1905.08108", "DBLP": "conf/sigir/Wang0WFC19", "DOI": "10.1145/3331184.3331267", "CorpusId": 150380651 }, "url": "https://www.semanticscholar.org/paper/c5f5f179d80a3bf9b4f29750283a87eaca42e91b", "referenceCount": 56, "citationCount": 2392, "influentialCitationCount": 484, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Revisiting User Mobility and Social Relationships in LBSNs: A Hypergraph Embedding Approach", "abstract": "Location Based Social Networks (LBSNs) have been widely used as a primary data source to study the impact of mobility and social relationships on each other. Traditional approaches manually define features to characterize users' mobility homophily and social proximity, and show that mobility and social features can help friendship and location prediction tasks, respectively. However, these hand-crafted features not only require tedious human efforts, but also are difficult to generalize. In this paper, by revisiting user mobility and social relationships based on a large-scale LBSN dataset collected over a long-term period, we propose LBSN2Vec, a hypergraph embedding approach designed specifically for LBSN data for automatic feature learning. Specifically, LBSN data intrinsically forms a hypergraph including both user-user edges (friendships) and user-time-POI-semantic hyperedges (check-ins). Based on this hypergraph, we first propose a random-walk-with-stay scheme to jointly sample user check-ins and social relationships, and then learn node embeddings from the sampled (hyper)edges by preserving n-wise node proximity (n = 2 or 4). Our evaluation results show that LBSN2Vec both consistently and significantly outperforms the state-of-the-art graph embedding methods on both friendship and location prediction tasks, with an average improvement of 32.95% and 25.32%, respectively. Moreover, using LBSN2Vec, we discover the asymmetric impact of mobility and social relationships on predicting each other, which can serve as guidelines for future research on friendship and location prediction in LBSNs.", "year": 2019, "venue": "The Web Conference", "authors": [ "Dingqi Yang", "Bingqing Qu", "Jie Yang", "P. Cudré-Mauroux" ], "externalIds": { "MAG": "2913696439", "DBLP": "conf/www/YangQYC19", "DOI": "10.1145/3308558.3313635", "CorpusId": 86810496 }, "url": "https://www.semanticscholar.org/paper/7793d888d82625dc34432110173d5a5d43f5c983", "referenceCount": 44, "citationCount": 245, "influentialCitationCount": 31, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Hypergraph Convolution and Hypergraph Attention", "abstract": null, "year": 2019, "venue": "Pattern Recognition", "authors": [ "S. Bai", "Feihu Zhang", "Philip H. S. Torr" ], "externalIds": { "DBLP": "journals/pr/BaiZT21", "ArXiv": "1901.08150", "MAG": "2911251106", "DOI": "10.1016/j.patcog.2020.107637", "CorpusId": 59222740 }, "url": "https://www.semanticscholar.org/paper/6ed647b67c93901f302fa016a5355db8eebc7888", "referenceCount": 79, "citationCount": 484, "influentialCitationCount": 38, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Graph Convolutional Neural Networks for Human Activity Purpose Imputation", "abstract": "Automatic location tracking of people has recently become a viable source for mobility and movement data. Such data are used in a wide range of applications, from city and transport planning to individual recommendations and schedule optimization. For many of these uses, it is of high interest to know why a person visited at a given location at a certain point in time. We use multiple personalized graphs to model human mobility behavior and to embed a large variety of spatio-temporal information and structure in the graphs’ weights and connections. Taking these graphs as input for graph convolutional neural networks (GCNs) allows us to build models that can exploit the structural information inherent in human mobility. We use GPS travel survey data to build person specific mobility graphs and use GCNs to predict the purpose of a user’s visit at a certain location. Our results show that GCNs are suitable to exploit the structure embedded in the mobility graphs.", "year": 2018, "venue": "Neural Information Processing Systems", "authors": [ "Henry Martin", "D. Bucher", "Esra Suel", "Pengxiang Zhao", "F. Pérez-Cruz", "M. Raubal" ], "externalIds": { "MAG": "2937099112", "DOI": "10.3929/ETHZ-B-000310251", "CorpusId": 70348863 }, "url": "https://www.semanticscholar.org/paper/4820326b4b3e9ae016e5e58166fc61923c684d37", "referenceCount": 22, "citationCount": 25, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Hypergraph Neural Networks", "abstract": "In this paper, we present a hypergraph neural networks (HGNN) framework for data representation learning, which can encode high-order data correlation in a hypergraph structure. Confronting the challenges of learning representation for complex data in real practice, we propose to incorporate such data structure in a hypergraph, which is more flexible on data modeling, especially when dealing with complex data. In this method, a hyperedge convolution operation is designed to handle the data correlation during representation learning. In this way, traditional hypergraph learning procedure can be conducted using hyperedge convolution operations efficiently. HGNN is able to learn the hidden layer representation considering the high-order data structure, which is a general framework considering the complex data correlations. We have conducted experiments on citation network classification and visual object recognition tasks and compared HGNN with graph convolutional networks and other traditional methods. Experimental results demonstrate that the proposed HGNN method outperforms recent state-of-theart methods. We can also reveal from the results that the proposed HGNN is superior when dealing with multi-modal data compared with existing methods.", "year": 2018, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Yifan Feng", "Haoxuan You", "Zizhao Zhang", "R. Ji", "Yue Gao" ], "externalIds": { "ArXiv": "1809.09401", "DBLP": "journals/corr/abs-1809-09401", "MAG": "2892880750", "DOI": "10.1609/AAAI.V33I01.33013558", "CorpusId": 52825543 }, "url": "https://www.semanticscholar.org/paper/510d98681e5e85fb1265513728f16e2543ae1b4b", "referenceCount": 35, "citationCount": 1034, "influentialCitationCount": 161, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Evaluating the specifications of built-in accelerometers in smartphones on fall detection performance", "abstract": "ABSTRACT In the daily life of the elderly, falling is a common accident and results in severe consequences. Hence, considerable research has been conducted on fall detection for the elderly. Smartphones, which have motion sensors and transmitting modules, have been applied as fall detectors. However, the performance of human activity recognition using various smartphones is usually lower than those using a dedicated smartphone due to different specification values of built-in smartphone accelerometers. This presents a challenge in the development of technology for using smartphones as fall detectors. In this study, we compared the specifications of built-in accelerometers which are essential for fall detection in four smartphones from different manufacturers. To test fall detection performance on various smartphones, 10 young healthy volunteers were included to wear the four smartphones and perform daily activities and fall event trials. Then the accuracies of a fall detection algorithm performed on four different smartphones were calculated and compared. The results show that the same algorithm has different accuracies when performed using smartphones from four different manufacturers. Also, a smartphone with insufficient accelerometer specifications exhibits low accuracy in fall detection. Based on our results, we suggest the effect of specifications of built-in accelerometers in smartphones should be considered when developing a fall detection system for multiple models of smartphones. To maintain high accuracy, a smartphone-based fall detection algorithm should only be used in cases in which built-in accelerometers meet certain specifications.", "year": 2018, "venue": "", "authors": [ "Kuang-Hsuan Chen", "Yu-Wei Hsu", "Jing-Jung Yang", "F. Jaw" ], "externalIds": { "MAG": "2746591362", "DOI": "10.1080/10739149.2017.1363054", "CorpusId": 116337473 }, "url": "https://www.semanticscholar.org/paper/748851e52e41783dbedb40abca892259cd02a8d6", "referenceCount": 24, "citationCount": 14, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Engineering" ] }, { "title": "Context Recognition In-the-Wild", "abstract": "Automatic recognition of behavioral context (location, activities, body-posture etc.) can serve health monitoring, aging care, and many other domains. Recognizing context in-the-wild is challenging because of great variability in behavioral patterns, and it requires a complex mapping from sensor features to predicted labels. Data collected in-the-wild may be unbalanced and incomplete, with cases of missing labels or missing sensors. We propose using the multiple layer perceptron (MLP) as a multi-task model for context recognition. Based on features from multi-modal sensors, the model simultaneously predicts many diverse context labels. We analyze the advantages of the model's hidden layers, which are shared among all sensors and all labels, and provide insight to the behavioral patterns that these hidden layers may capture. We demonstrate how recognition of new labels can be improved when utilizing a model that was trained for an initial set of labels, and show how to train the model to withstand missing sensors. We evaluate context recognition on the previously published ExtraSensory Dataset, which was collected in-the-wild. Compared to previously suggested models, the MLP improves recognition, even with fewer parameters than a linear model. The ability to train a good model using data that has incomplete, unbalanced labeling and missing sensors encourages further research with uncontrolled, in-the-wild behavior.", "year": 2018, "venue": "Proceedings of the ACM on Interactive Mobile Wearable and Ubiquitous Technologies", "authors": [ "Yonatan Vaizman", "Nadir Weibel", "Gert R. G. Lanckriet" ], "externalIds": { "DBLP": "journals/imwut/VaizmanWL17", "MAG": "2783920628", "DOI": "10.1145/3161192", "CorpusId": 4055038 }, "url": "https://www.semanticscholar.org/paper/f9fd5b189081bed3feea2cda00c0369a088767e6", "referenceCount": 23, "citationCount": 60, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "LightGBM: A Highly Efficient Gradient Boosting Decision Tree", "abstract": "Gradient Boosting Decision Tree (GBDT) is a popular machine learning algorithm, and has quite a few effective implementations such as XGBoost and pGBRT. Although many engineering optimizations have been adopted in these implementations, the efficiency and scalability are still unsatisfactory when the feature dimension is high and data size is large. A major reason is that for each feature, they need to scan all the data instances to estimate the information gain of all possible split points, which is very time consuming. To tackle this problem, we propose two novel techniques: \\emph{Gradient-based One-Side Sampling} (GOSS) and \\emph{Exclusive Feature Bundling} (EFB). With GOSS, we exclude a significant proportion of data instances with small gradients, and only use the rest to estimate the information gain. We prove that, since the data instances with larger gradients play a more important role in the computation of information gain, GOSS can obtain quite accurate estimation of the information gain with a much smaller data size. With EFB, we bundle mutually exclusive features (i.e., they rarely take nonzero values simultaneously), to reduce the number of features. We prove that finding the optimal bundling of exclusive features is NP-hard, but a greedy algorithm can achieve quite good approximation ratio (and thus can effectively reduce the number of features without hurting the accuracy of split point determination by much). We call our new GBDT implementation with GOSS and EFB \\emph{LightGBM}. Our experiments on multiple public datasets show that, LightGBM speeds up the training process of conventional GBDT by up to over 20 times while achieving almost the same accuracy.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "Guolin Ke", "Qi Meng", "Thomas Finley", "Taifeng Wang", "Wei Chen", "Weidong Ma", "Qiwei Ye", "Tie-Yan Liu" ], "externalIds": { "DBLP": "conf/nips/KeMFWCMYL17", "MAG": "2753094203", "CorpusId": 3815895 }, "url": "https://www.semanticscholar.org/paper/497e4b08279d69513e4d2313a7fd9a55dfb73273", "referenceCount": 32, "citationCount": 8514, "influentialCitationCount": 931, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "A survey of energy-efficient context recognition systems using wearable sensors for healthcare applications", "abstract": null, "year": 2017, "venue": "Pervasive and Mobile Computing", "authors": [ "Tifenn Rault", "A. Bouabdallah", "Y. Challal", "Frédéric Marin" ], "externalIds": { "MAG": "2510206847", "DBLP": "journals/percom/RaultBCM17", "DOI": "10.1016/j.pmcj.2016.08.003", "CorpusId": 207399600 }, "url": "https://www.semanticscholar.org/paper/a54078447ae5a895c45fe16c051dd7e5ade177b7", "referenceCount": 89, "citationCount": 109, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Heterogeneous hypergraph embedding for document recommendation", "abstract": null, "year": 2016, "venue": "Neurocomputing", "authors": [ "Y. Zhu", "Ziyu Guan", "Shulong Tan", "Haifeng Liu", "Deng Cai", "Xiaofei He" ], "externalIds": { "DBLP": "journals/ijon/ZhuGTLCH16", "MAG": "2476125329", "DOI": "10.1016/j.neucom.2016.07.030", "CorpusId": 30834156 }, "url": "https://www.semanticscholar.org/paper/bbe6c4e006c0fa327fccbf3814bb3506accb3f68", "referenceCount": 60, "citationCount": 63, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Recognizing Detailed Human Context in the Wild from Smartphones and Smartwatches", "abstract": "The ability to automatically recognize a person’s behavioral context can contribute to health monitoring, aging care, and many other domains. Validating context recognition in the wild is crucial to promote practical applications that work in real-life settings. The authors collected more than 300,000 minutes of sensor data with context labels from 60 subjects. Unlike previous studies, these subjects used their own personal phone, in any way that was convenient to them, and engaged in their routine in their natural environments. Unscripted behavior and unconstrained phone usage resulted in situations that were harder to recognize. The authors demonstrate how fusion of multimodal sensors is important for resolving such cases. They present a baseline system and encourage researchers to use their public dataset to compare methods and improve context recognition in the wild.", "year": 2016, "venue": "IEEE pervasive computing", "authors": [ "Yonatan Vaizman", "Katherine Ellis", "Gert R. G. Lanckriet" ], "externalIds": { "DBLP": "journals/pervasive/VaizmanEL17", "ArXiv": "1609.06354", "MAG": "2953092962", "DOI": "10.1109/MPRV.2017.3971131", "CorpusId": 8728742 }, "url": "https://www.semanticscholar.org/paper/a85a5307c1214b9accea49e3d27bf3daa24335dc", "referenceCount": 53, "citationCount": 254, "influentialCitationCount": 37, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Semi-Supervised Classification with Graph Convolutional Networks", "abstract": "We present a scalable approach for semi-supervised learning on graph-structured data that is based on an efficient variant of convolutional neural networks which operate directly on graphs. We motivate the choice of our convolutional architecture via a localized first-order approximation of spectral graph convolutions. Our model scales linearly in the number of graph edges and learns hidden layer representations that encode both local graph structure and features of nodes. In a number of experiments on citation networks and on a knowledge graph dataset we demonstrate that our approach outperforms related methods by a significant margin.", "year": 2016, "venue": "International Conference on Learning Representations", "authors": [ "Thomas Kipf", "M. Welling" ], "externalIds": { "ArXiv": "1609.02907", "MAG": "2519887557", "DBLP": "journals/corr/KipfW16", "CorpusId": 3144218 }, "url": "https://www.semanticscholar.org/paper/36eff562f65125511b5dfab68ce7f7a943c27478", "referenceCount": 38, "citationCount": 25291, "influentialCitationCount": 6216, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Physical Human Activity Recognition Using Wearable Sensors", "abstract": "This paper presents a review of different classification techniques used to recognize human activities from wearable inertial sensor data. Three inertial sensor units were used in this study and were worn by healthy subjects at key points of upper/lower body limbs (chest, right thigh and left ankle). Three main steps describe the activity recognition process: sensors’ placement, data pre-processing and data classification. Four supervised classification techniques namely, k-Nearest Neighbor (k-NN), Support Vector Machines (SVM), Gaussian Mixture Models (GMM), and Random Forest (RF) as well as three unsupervised classification techniques namely, k-Means, Gaussian mixture models (GMM) and Hidden Markov Model (HMM), are compared in terms of correct classification rate, F-measure, recall, precision, and specificity. Raw data and extracted features are used separately as inputs of each classifier. The feature selection is performed using a wrapper approach based on the RF algorithm. Based on our experiments, the results obtained show that the k-NN classifier provides the best performance compared to other supervised classification algorithms, whereas the HMM classifier is the one that gives the best results among unsupervised classification algorithms. This comparison highlights which approach gives better performance in both supervised and unsupervised contexts. It should be noted that the obtained results are limited to the context of this study, which concerns the classification of the main daily living human activities using three wearable accelerometers placed at the chest, right shank and left ankle of the subject.", "year": 2015, "venue": "Italian National Conference on Sensors", "authors": [ "Ferhat Attal", "S. Mohammed", "Mariam Dedabrishvili", "Faicel Chamroukhi", "L. Oukhellou", "Y. Amirat" ], "externalIds": { "MAG": "2195342085", "DBLP": "journals/sensors/AttalMDCOA15", "PubMedCentral": "4721778", "DOI": "10.3390/s151229858", "CorpusId": 16144132, "PubMed": "26690450" }, "url": "https://www.semanticscholar.org/paper/bbac65dc51363ce4d4ee95a48fef98e49da407cf", "referenceCount": 97, "citationCount": 667, "influentialCitationCount": 37, "isOpenAccess": true, "fieldsOfStudy": [ "Engineering", "Medicine", "Computer Science" ] }, { "title": "Undistracted driving: a mobile phone that doesn't distract", "abstract": "Distracted driving is a major problem that leads to unnecessary accidents and human casualties everywhere in the world. The ubiquity of mobile phones is one cause of distracted driving. In United States alone, operating mobile phones while driving has been cited as a factor in crashes that have led to 995 deaths and 24,000 injuries in 2009. To mitigate the problem of distracted driving caused by mobile phones, we propose using context-awareness to implement burden-shifting, time-shifting, and activity-based sharing. Although the first two concepts have been introduced before in the research literature and the latter two are novel, none of these concepts have yet been explored in the context of mobile phones and driving. We present our initial interaction designs for these concepts on the Android platform.", "year": 2011, "venue": "Workshop on Mobile Computing Systems and Applications", "authors": [ "J. Lindqvist", "Jason I. Hong" ], "externalIds": { "DBLP": "conf/wmcsa/LindqvistH11", "MAG": "2019993146", "DOI": "10.1145/2184489.2184504", "CorpusId": 10264849 }, "url": "https://www.semanticscholar.org/paper/4bac2ccd42a32c020d82cc0f352e3385ab4a2c04", "referenceCount": 28, "citationCount": 49, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deploying personalized mobile services in an agent-based environment", "abstract": null, "year": 2007, "venue": "Expert systems with applications", "authors": [ "Wei-Po Lee" ], "externalIds": { "DBLP": "journals/eswa/Lee07a", "MAG": "2064530114", "DOI": "10.1016/j.eswa.2006.02.009", "CorpusId": 28822817 }, "url": "https://www.semanticscholar.org/paper/5d806bad5cee631dc86398f067925506f43a5b33", "referenceCount": 41, "citationCount": 80, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Conceptual Framework and a Toolkit for Supporting the Rapid Prototyping of Context-Aware Applications", "abstract": "Computing devices and applications are now used beyond the desktop, in diverse environments, and this trend toward ubiquitous computing is accelerating. One challenge that remains in this emerging research field is the ability to enhance the behavior of any application by informing it of the context of its use. By context, we refer to any information that characterizes a situation related to the interaction between humans, applications, and the surrounding environment. Context-aware applications promise richer and easier interaction, but the current state of research in this field is still far removed from that vision. This is due to 3 main problems: (a) the notion of context is still ill defined, (b) there is a lack of conceptual models and methods to help drive the design of context-aware applications, and (c) no tools are available to jump-start the development of context-aware applications. In this anchor article, we address these 3 problems in turn. We first define context, identify categories of contextual information, and characterize context-aware application behavior. Though the full impact of context-aware computing requires understanding very subtle and high-level notions of context, we are focusing our efforts on the pieces of context that can be inferred automatically from sensors in a physical environment. We then present a conceptual framework that separates the acquisition and representation of context from the delivery and reaction to context by a context-aware application. We have built a toolkit, the Context Toolkit, that instantiates this conceptual framework and supports the rapid development of a rich space of context-aware applications. We illustrate the usefulness of the conceptual framework by describing a number of context-aware applications that have been prototyped using the Context Toolkit. We also demonstrate how such a framework can support the investigation of important research challenges in the area of context-aware computing.", "year": 2001, "venue": "Hum. Comput. Interact.", "authors": [ "A. Dey", "G. Abowd", "D. Salber" ], "externalIds": { "DBLP": "journals/hhci/DeyAS01", "MAG": "2163419627", "DOI": "10.1207/S15327051HCI16234_02", "CorpusId": 61859089 }, "url": "https://www.semanticscholar.org/paper/9680aada952d2f00546b891a2a0aacbbb0adaf8f", "referenceCount": 78, "citationCount": 3250, "influentialCitationCount": 183, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Towards a Better Understanding of Context and Context-Awareness", "abstract": null, "year": 1999, "venue": "HUC", "authors": [ "G. Abowd", "A. Dey", "P. Brown", "N. Davies", "Mark T. Smith", "Pete Steggles" ], "externalIds": { "MAG": "1882088395", "DBLP": "conf/huc/AbowdDBDSS99", "DOI": "10.1007/3-540-48157-5_29", "CorpusId": 10242279 }, "url": "https://www.semanticscholar.org/paper/5749b3590147259baeff87f91d250bf07938c60e", "referenceCount": 28, "citationCount": 5240, "influentialCitationCount": 289, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Adversarial deep feature extraction network for user independent har", "abstract": null, "year": 2022, "venue": "PerCom", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Context recognition in-thewild: Unified model for multi-modal sensors and multi-label classification", "abstract": null, "year": 2018, "venue": "ACM IMWUT, 2018.", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "An extensible modular recognition concept that makes ar practical", "abstract": null, "year": 2010, "venue": "AAAI", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "applicable license agreement with IEEE. Restrictions apply", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "AUGUR, A flexible and efficient optimization algorithm for identification of optimal adsorption sites": { "paper_title": "AUGUR, A flexible and efficient optimization algorithm for identification of optimal adsorption sites", "arxiv_id": "2409.16204v1", "keyword": "graph neural network", "authors": [ "Ioannis Kouroudis", "Poonam", "Neel Misciaci", "Felix Mayr", "Leon Müller", "Zhaosu Gu", "Alessio Gagliardi" ], "references": [ { "title": "Mechanistic Insights into ZIF-8 Encapsulation of Atom-Precise Pt(M) Carbonyl Clusters", "abstract": null, "year": 2023, "venue": "Chemistry of Materials", "authors": [ "Kathrin L. Kollmannsberger", "Poonam", "Cristiana Cesari", "Rachit Khare", "T. Kratky", "M. Boniface", "Ondřej Tomanec", "J. Michalička", "E. Mosconi", "A. Gagliardi", "S. Günther", "W. Kaiser", "T. Lunkenbein", "S. Zacchini", "J. Warnan", "R. A. Fischer" ], "externalIds": { "DOI": "10.1021/acs.chemmater.3c00807", "CorpusId": 259882021 }, "url": "https://www.semanticscholar.org/paper/c9773cd7f2dc73a6bfac613ffea16414b0b3d77b", "referenceCount": 46, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "Utilizing Data-Driven Optimization to Automate the Parametrization of Kinetic Monte Carlo Models.", "abstract": "Kinetic Monte Carlo (kMC) simulations are a popular tool to investigate the dynamic behavior of stochastic systems. However, one major limitation is their relatively high computational costs. In the last three decades, significant effort has been put into developing methodologies to make kMC more efficient, resulting in an enhanced runtime efficiency. Nevertheless, kMC models remain computationally expensive. This is in particular an issue in complex systems with several unknown input parameters where often most of the simulation time is required for finding a suitable parametrization. A potential route for automating the parametrization of kinetic Monte Carlo models arises from coupling kMC with a data-driven approach. In this work, we equip kinetic Monte Carlo simulations with a feedback loop consisting of Gaussian Processes (GPs) and Bayesian optimization (BO) to enable a systematic and data-efficient input parametrization. We utilize the results from fast-converging kMC simulations to construct a database for training a cheap-to-evaluate surrogate model based on Gaussian processes. Combining the surrogate model with a system-specific acquisition function enables us to apply Bayesian optimization for the guided prediction of suitable input parameters. Thus, the amount of trial simulation runs can be considerably reduced facilitating an efficient utilization of arbitrary kMC models. We showcase the effectiveness of our methodology for a physical process of growing industrial relevance: the space-charge layer formation in solid-state electrolytes as it occurs in all-solid-state batteries. Our data-driven approach requires only 1-2 iterations to reconstruct the input parameters from different baseline simulations within the training data set. Moreover, we show that the methodology is even capable of accurately extrapolating into regions outside the training data set which are computationally expensive for direct kMC simulation. Concluding, we demonstrate the high accuracy of the underlying surrogate model via a full parameter space investigation eventually making the original kMC simulation obsolete.", "year": 2023, "venue": "Journal of Physical Chemistry A", "authors": [ "I. Kouroudis", "Manuel Gößwein", "A. Gagliardi" ], "externalIds": { "DOI": "10.1021/acs.jpca.3c02482", "CorpusId": 259375069, "PubMed": "37421601" }, "url": "https://www.semanticscholar.org/paper/bd3782d8c2b37da867c5373b4615b40458e11826", "referenceCount": 48, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Machine-learning driven global optimization of surface adsorbate geometries", "abstract": null, "year": 2023, "venue": "npj Computational Materials", "authors": [ "Hyunwook Jung", "Lena Sauerland", "Sina Stocker", "K. Reuter", "Johannes T. Margraf" ], "externalIds": { "DOI": "10.1038/s41524-023-01065-w", "CorpusId": 259254085 }, "url": "https://www.semanticscholar.org/paper/edbb4aa2e8ed4bbcf585ad2bbab5bdf8229e98b6", "referenceCount": 68, "citationCount": 25, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "Uncertainty Estimation for Molecules: Desiderata and Methods", "abstract": "Graph Neural Networks (GNNs) are promising surrogates for quantum mechanical calculations as they establish unprecedented low errors on collections of molecular dynamics (MD) trajectories. Thanks to their fast inference times they promise to accelerate computational chemistry applications. Unfortunately, despite low in-distribution (ID) errors, such GNNs might be horribly wrong for out-of-distribution (OOD) samples. Uncertainty estimation (UE) may aid in such situations by communicating the model's certainty about its prediction. Here, we take a closer look at the problem and identify six key desiderata for UE in molecular force fields, three 'physics-informed' and three 'application-focused' ones. To overview the field, we survey existing methods from the field of UE and analyze how they fit to the set desiderata. By our analysis, we conclude that none of the previous works satisfies all criteria. To fill this gap, we propose Localized Neural Kernel (LNK) a Gaussian Process (GP)-based extension to existing GNNs satisfying the desiderata. In our extensive experimental evaluation, we test four different UE with three different backbones and two datasets. In out-of-equilibrium detection, we find LNK yielding up to 2.5 and 2.1 times lower errors in terms of AUC-ROC score than dropout or evidential regression-based methods while maintaining high predictive performance.", "year": 2023, "venue": "International Conference on Machine Learning", "authors": [ "Tom Wollschlager", "Nicholas Gao", "Bertrand Charpentier", "Mohamed Amine Ketata", "Stephan Gunnemann" ], "externalIds": { "ArXiv": "2306.14916", "DBLP": "conf/icml/WollschlagerGCK23", "DOI": "10.48550/arXiv.2306.14916", "CorpusId": 259262450 }, "url": "https://www.semanticscholar.org/paper/0a929d160fafe89ef760925ac48e5ebbb287ce11", "referenceCount": 87, "citationCount": 7, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Physics", "Mathematics" ] }, { "title": "Fast evaluation of the adsorption energy of organic molecules on metals via graph neural networks", "abstract": null, "year": 2023, "venue": "Nature Computational Science", "authors": [ "Sergio Pablo‐García", "S. Morandi", "Rodrigo A Vargas-Hernández", "K. Jorner", "Žarko Ivković", "N. López", "Alán Aspuru-Guzik" ], "externalIds": { "DBLP": "journals/ncs/Pablo-GarciaMVJ23", "PubMedCentral": "10766545", "DOI": "10.1038/s43588-023-00437-y", "CorpusId": 258458194, "PubMed": "38177837" }, "url": "https://www.semanticscholar.org/paper/4935902e122150e459beda9e937f7d05aa0081b3", "referenceCount": 84, "citationCount": 20, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Xsorb: A software for identifying the most stable adsorption configuration and energy of a molecule on a crystal surface", "abstract": null, "year": 2023, "venue": "Computer Physics Communications", "authors": [ "Enrico Pedretti", "P. Restuccia", "M. Righi" ], "externalIds": { "ArXiv": "2304.14353", "DBLP": "journals/cphysics/PedrettiRR23", "DOI": "10.1016/j.cpc.2023.108827", "CorpusId": 258352655 }, "url": "https://www.semanticscholar.org/paper/56ecef1344291c97c15b1d3d40d37e6e82959647", "referenceCount": 53, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Physics" ] }, { "title": "Rapid Data‐Efficient Optimization of Perovskite Nanocrystal Syntheses through Machine Learning Algorithm Fusion", "abstract": "With the demand for renewable energy and efficient devices rapidly increasing, a need arises to find and optimize novel (nano)materials. With sheer limitless possibilities for material combinations and synthetic procedures, obtaining novel, highly functional materials has been a tedious trial and error process. Recently, machine learning has emerged as a powerful tool to help optimize syntheses; however, most approaches require a substantial amount of input data, limiting their pertinence. Here, three well‐known machine‐learning models are merged with Bayesian optimization into one to optimize the synthesis of CsPbBr3 nanoplatelets with limited data demand. The algorithm can accurately predict the photoluminescence emission maxima of nanoplatelet dispersions using only the three precursor ratios as input parameters. This allows us to fabricate previously unobtainable seven and eight monolayer‐thick nanoplatelets. Moreover, the algorithm dramatically improves the homogeneity of 2–6‐monolayer‐thick nanoplatelet dispersions, as evidenced by narrower and more symmetric photoluminescence spectra. Decisively, only 200 total syntheses are required to achieve this vast improvement, highlighting how rapidly material properties can be optimized. The algorithm is highly versatile and can incorporate additional synthetic parameters. Accordingly, it is readily applicable to other less‐explored nanocrystal syntheses and can help rapidly identify and improve exciting compositions’ quality.", "year": 2023, "venue": "Advances in Materials", "authors": [ "Carola Lampe", "I. Kouroudis", "M. Harth", "Stefan Martin", "A. Gagliardi", "A. Urban" ], "externalIds": { "DOI": "10.1002/adma.202208772", "CorpusId": 256079922, "PubMed": "36681859" }, "url": "https://www.semanticscholar.org/paper/cb856f940e6479b73366ee9990a949cda5354e92", "referenceCount": 4, "citationCount": 14, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Exploring the Conformers of an Organic Molecule on a Metal Cluster with Bayesian Optimization", "abstract": "Finding low-energy conformers of organic molecules is a complex problem due to the flexibilities of the molecules and the high dimensionality of the search space. When such molecules are on nanoclusters, the search complexity is exacerbated by constraints imposed by the presence of the cluster and other surrounding molecules. To address this challenge, we modified our previously developed active learning molecular conformer search method based on Bayesian optimization and density functional theory. Especially, we have developed and tested strategies to avoid steric clashes between a molecule and a cluster. In this work, we chose a cysteine molecule on a well-studied gold–thiolate cluster as a model system to test and demonstrate our method. We found that cysteine conformers in a cluster inherit the hydrogen bond types from isolated conformers. However, the energy rankings and spacings between the conformers are reordered.", "year": 2023, "venue": "Journal of Chemical Information and Modeling", "authors": [ "Lincan Fang", "Xiaomi Guo", "M. Todorović", "P. Rinke", "Xi Chen" ], "externalIds": { "PubMedCentral": "9930108", "DBLP": "journals/jcisd/FangGTRC23", "DOI": "10.1021/acs.jcim.2c01120", "CorpusId": 255847764, "PubMed": "36642891" }, "url": "https://www.semanticscholar.org/paper/3a36e18d79e9f22eba856ab734d75b7efdab0633", "referenceCount": 34, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "AdsorbML: a leap in efficiency for adsorption energy calculations using generalizable machine learning potentials", "abstract": null, "year": 2022, "venue": "npj Computational Materials", "authors": [ "Janice Lan", "Aini Palizhati", "Muhammed Shuaibi", "Brandon M. Wood", "Brook Wander", "Abhishek Das", "M. Uyttendaele", "C. L. Zitnick", "Zachary W. Ulissi" ], "externalIds": { "ArXiv": "2211.16486", "DOI": "10.1038/s41524-023-01121-5", "CorpusId": 254069765 }, "url": "https://www.semanticscholar.org/paper/1255a652b0014e616dbc525d5abd4cf4b10d3fd4", "referenceCount": 61, "citationCount": 18, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Physics", "Computer Science" ] }, { "title": "ZnO nanostructured materials and their potential applications: progress, challenges and perspectives", "abstract": "Extensive research in nanotechnology has been conducted to investigate new behaviours and properties of materials with nanoscale dimensions. ZnO NPs owing to their distinct physical and chemical properties have gained considerable importance and are hence investigated to a detailed degree for exploitation of these properties. This communication, at the outset, elaborates the various chemical methods of preparation of ZnO NPs, viz., the mechanochemical process, controlled precipitation, sol–gel method, vapour transport method, solvothermal and hydrothermal methods, and methods using emulsion and micro-emulsion environments. The paper further describes the green methods employing the use of plant extracts, in particular, for the synthesis of ZnO NPs. The modifications of ZnO with organic (carboxylic acid, silanes) and inorganic (metal oxides) compounds and polymer matrices have then been described. The multitudinous applications of ZnO NPs across a variety of fields such as the rubber industry, pharmaceutical industry, cosmetics, textile industry, opto-electronics and agriculture have been presented. Elaborative narratives on the photocatalytic and a variety of biomedical applications of ZnO have also been included. The ecotoxic impacts of ZnO NPs have additionally been briefly highlighted. Finally, efforts have been made to examine the current challenges and future scope of the synthetic modes and applications of ZnO NPs.", "year": 2022, "venue": "Nanoscale Advances", "authors": [ "Sauvik Raha", "M. Ahmaruzzaman" ], "externalIds": { "PubMedCentral": "9419838", "DOI": "10.1039/d1na00880c", "CorpusId": 247367861, "PubMed": "36133407" }, "url": "https://www.semanticscholar.org/paper/ee4c48620e99e841b662a13bcf7765734e007d19", "referenceCount": 0, "citationCount": 189, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Uncertainty-aware molecular dynamics from Bayesian active learning for phase transformations and thermal transport in SiC", "abstract": null, "year": 2022, "venue": "npj Computational Materials", "authors": [ "Yu Xie", "Jonathan Vandermause", "Senja Ramakers", "N. Protik", "A. Johansson", "B. Kozinsky" ], "externalIds": { "ArXiv": "2203.03824", "DOI": "10.1038/s41524-023-00988-8", "CorpusId": 247315468 }, "url": "https://www.semanticscholar.org/paper/61736fcb3f03f47cc3da9ea06561862c43e5730d", "referenceCount": 98, "citationCount": 40, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Physics" ] }, { "title": "Machine Learning and Optoelectronic Materials Discovery: A Growing Synergy.", "abstract": "Novel optoelectronic materials have the potential to revolutionize the ongoing green transition by both providing more efficient photovoltaic (PV) devices and lowering energy consumption of devices like LEDs and sensors. The lead candidate materials for these applications are both organic semiconductors and more recently perovskites. This Perspective illustrates how novel machine learning techniques can help explore these materials, from speeding up ab initio calculations toward experimental guidance. Furthermore, based on existing work, perspectives around machine-learned molecular dynamics potentials, physically informed neural networks, and generative methods are outlined.", "year": 2022, "venue": "Journal of Physical Chemistry Letters", "authors": [ "Felix Mayr", "M. Harth", "I. Kouroudis", "M. Rinderle", "A. Gagliardi" ], "externalIds": { "DOI": "10.1021/acs.jpclett.1c04223", "CorpusId": 247023514, "PubMed": "35188778" }, "url": "https://www.semanticscholar.org/paper/a5f7b106200794827a018a2bf1da2589e122ab60", "referenceCount": 79, "citationCount": 12, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Bayesian optimization of nanoporous materials", "abstract": "Nanoporous materials (NPMs) could be used to store, capture, and sense many different gases. Given an adsorption task, we often wish to search a library of NPMs for the one...", "year": 2021, "venue": "Molecular Systems Design & Engineering", "authors": [ "Aryan Deshwal", "Cory M. Simon", "J. Doppa" ], "externalIds": { "MAG": "3176594414", "DOI": "10.33774/chemrxiv-2021-4624n", "CorpusId": 237923782 }, "url": "https://www.semanticscholar.org/paper/c4dbfe12b45ac4ee3f2a1a08eb4ba51668ebac1e", "referenceCount": 144, "citationCount": 42, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Accelerating materials discovery with Bayesian optimization and graph deep learning", "abstract": null, "year": 2021, "venue": "Materials Today", "authors": [ "Yunxing Zuo", "Mingde Qin", "Chi Chen", "Weike Ye", "Xiang-Guo Li", "Jian Luo", "S. Ong" ], "externalIds": { "ArXiv": "2104.10242", "DOI": "10.1016/j.mattod.2021.08.012", "CorpusId": 233324364 }, "url": "https://www.semanticscholar.org/paper/5efb625e2bf00931ac0912e3ffafad7ff0df66e0", "referenceCount": 63, "citationCount": 65, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Physics" ] }, { "title": "DockOnSurf: A Python Code for the High-Throughput Screening of Flexible Molecules Adsorbed on Surfaces", "abstract": "We present the open-source python package DockOnSurf which automates the generation and optimization of low-energy adsorption configurations of molecules on extended surfaces and nanoparticles. DockOnSurf is especially geared toward handling polyfunctional flexible adsorbates. The use of this high-throughput workflow allows us to carry out the screening of adsorbate-surface configurations in a systematic, customizable, and traceable way, while keeping the focus on the chemically relevant structures. The screening strategy consists in splitting the exploration of the adsorbate-surface configurational space into chemically meaningful domains, that is, by choosing among different conformers to adsorb, surface adsorption sites, adsorbate anchoring points, and orientations and allowing dissociation of (acidic) protons. We demonstrate the performance of the main features based on varying examples, ranging from CO adsorption on a gold nanoparticle to sorbitol adsorption on hematite. Through the use of the presented program, we aim to foster efficiency, traceability, and ease of use in research within tribology, catalysis, nanoscience, and surface science in general.", "year": 2021, "venue": "Journal of Chemical Information and Modeling", "authors": [ "Carles Martí", "Sarah Blanck", "Ruben Staub", "S. Loehlé", "C. Michel", "S. Steinmann" ], "externalIds": { "MAG": "3170409048", "DBLP": "journals/jcisd/MartiBSL0S21", "DOI": "10.1021/acs.jcim.1c00256", "CorpusId": 235608540, "PubMed": "34160214" }, "url": "https://www.semanticscholar.org/paper/3e551821439dc9ba474a8d376e5f625cc23ce8f7", "referenceCount": 75, "citationCount": 15, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Materials Science", "Medicine" ] }, { "title": "Detecting stable adsorbates of (1S)-camphor on Cu(111) with Bayesian optimization", "abstract": "Identifying the atomic structure of organic–inorganic interfaces is challenging with current research tools. Interpreting the structure of complex molecular adsorbates from microscopy images can be difficult, and using atomistic simulations to find the most stable structures is limited to partial exploration of the potential energy surface due to the high-dimensional phase space. In this study, we present the recently developed Bayesian Optimization Structure Search (BOSS) method as an efficient solution for identifying the structure of non-planar adsorbates. We apply BOSS with density-functional theory simulations to detect the stable adsorbate structures of (1S)-camphor on the Cu(111) surface. We identify the optimal structure among eight unique types of stable adsorbates, in which camphor chemisorbs via oxygen (global minimum) or physisorbs via hydrocarbons to the Cu(111) surface. This study demonstrates that new cross-disciplinary tools, such as BOSS, facilitate the description of complex surface structures and their properties, and ultimately allow us to tune the functionality of advanced materials.", "year": 2020, "venue": "Beilstein Journal of Nanotechnology", "authors": [ "J. Järvi", "P. Rinke", "M. Todorović" ], "externalIds": { "MAG": "3092698482", "ArXiv": "2002.05598", "PubMedCentral": "7590619", "DOI": "10.3762/bjnano.11.140", "CorpusId": 211096788, "PubMed": "33134002" }, "url": "https://www.semanticscholar.org/paper/2ec78e964bff71b71516aa507bcb0fe2f7a850dd", "referenceCount": 58, "citationCount": 19, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Materials Science", "Medicine", "Physics" ] }, { "title": "Nudged elastic band calculations accelerated with Gaussian process regression based on inverse inter-atomic distances.", "abstract": "Calculations of minimum energy paths for atomic rearrangements using the nudged elastic band method can be accelerated with Gaussian process regression to reduce the number of energy and atomic force evaluations needed for convergence. Problems can arise, however, when configurations with large forces due to short distance between atoms are included in the data set. Here, a significant improvement to the Gaussian process regression approach is obtained by basing the difference measure between two atomic configurations in the covariance function on the inverted inter-atomic distances and by adding a new early stopping criterion for the path relaxation phase. This greatly improves the performance of the method in two applications where the original formulation does not work well: a dissociative adsorption of an H2 molecule on a Cu(110) surface and a diffusion hop of an H2O molecule on an ice Ih(0001) surface. Also, the revised method works better in the previously analyzed benchmark application to rearrangement transitions of a heptamer island on a surface, requiring fewer energy and force evaluations for convergence to the minimum energy path.", "year": 2019, "venue": "Journal of Chemical Theory and Computation", "authors": [ "O. Koistinen", "V. Ásgeirsson", "Aki Vehtari", "H. Jónsson" ], "externalIds": { "MAG": "2981328235", "DOI": "10.26434/chemrxiv.8850440", "CorpusId": 204851073, "PubMed": "31638795" }, "url": "https://www.semanticscholar.org/paper/7ae78b2f979873b145a1930489c68f2ffaf1ac7a", "referenceCount": 41, "citationCount": 46, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Physics" ] }, { "title": "Optimizing the Size of Platinum Nanoparticles for Enhanced Mass Activity in the Electrochemical Oxygen Reduction Reaction.", "abstract": "High oxygen reduction (ORR) activity has been for many years considered as the key to many energy applications. Herein, by combining theory and experiment we prepare Pt nanoparticles with optimal size for the efficient ORR in proton-exchange-membrane fuel cells. Optimal nanoparticle sizes are predicted near 1, 2, and 3 nm by computational screening. To corroborate our computational results, we have addressed the challenge of approximately 1 nm sized Pt nanoparticle synthesis with a metal-organic framework (MOF) template approach. The electrocatalyst was characterized by HR-TEM, XPS, and its ORR activity was measured using a rotating disk electrode setup. The observed mass activities (0.87±0.14 A mgPt -1 ) are close to the computational prediction (0.99 A mgPt -1 ). We report the highest to date mass activity among pure Pt catalysts for the ORR within similar size range. The specific and mass activities are twice as high as the Tanaka commercial Pt/C catalysis.", "year": 2019, "venue": "Angewandte Chemie", "authors": [ "Batyr Garlyyev", "Kathrin Kratzl", "M. Rück", "J. Michalička", "J. Fichtner", "J. Macák", "T. Kratky", "S. Günther", "M. Cokoja", "A. Bandarenka", "A. Gagliardi", "R. Fischer" ], "externalIds": { "MAG": "2952964756", "DOI": "10.1002/anie.201904492", "CorpusId": 195661712, "PubMed": "31050857" }, "url": "https://www.semanticscholar.org/paper/71ba09fc4935605d3362aa8a3e8a7ee3edc00d50", "referenceCount": 26, "citationCount": 83, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Materials Science", "Medicine" ] }, { "title": "Phase Transitions of Hybrid Perovskites Simulated by Machine-Learning Force Fields Trained on the Fly with Bayesian Inference.", "abstract": "Realistic finite temperature simulations of matter are a formidable challenge for first principles methods. Long simulation times and large length scales are required, demanding years of computing time. Here we present an on-the-fly machine learning scheme that generates force fields automatically during molecular dynamics simulations. This opens up the required time and length scales, while retaining the distinctive chemical precision of first principles methods and minimizing the need for human intervention. The method is widely applicable to multielement complex systems. We demonstrate its predictive power on the entropy driven phase transitions of hybrid perovskites, which have never been accurately described in simulations. Using machine learned potentials, isothermal-isobaric simulations give direct insight into the underlying microscopic mechanisms. Finally, we relate the phase transition temperatures of different perovskites to the radii of the involved species, and we determine the order of the transitions in Landau theory.", "year": 2019, "venue": "Physical Review Letters", "authors": [ "Ryosuke Jinnouchi", "J. Lahnsteiner", "F. Karsai", "G. Kresse", "M. Bokdam" ], "externalIds": { "ArXiv": "1903.09613", "MAG": "2922733622", "DOI": "10.1103/PhysRevLett.122.225701", "CorpusId": 85459557, "PubMed": "31283285" }, "url": "https://www.semanticscholar.org/paper/77ea37089b392cc79da7aab5989b41ed78e9d931", "referenceCount": 32, "citationCount": 190, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Physics", "Medicine", "Computer Science" ] }, { "title": "Bayesian optimization for conformer generation", "abstract": null, "year": 2018, "venue": "Journal of Cheminformatics", "authors": [ "Lucian Chan", "G. Hutchison", "G. Morris" ], "externalIds": { "MAG": "2947658954", "DBLP": "journals/jcheminf/ChanHM19", "PubMedCentral": "6528340", "DOI": "10.1186/s13321-019-0354-7", "CorpusId": 162170695, "PubMed": "31115707" }, "url": "https://www.semanticscholar.org/paper/b1c39a62fded5e2833e84f49f19b6bf709ec72e7", "referenceCount": 46, "citationCount": 41, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine", "Mathematics" ] }, { "title": "Active learning across intermetallics to guide discovery of electrocatalysts for CO2 reduction and H2 evolution", "abstract": null, "year": 2018, "venue": "Nature Catalysis", "authors": [ "Kevin Tran", "Zachary W. Ulissi" ], "externalIds": { "MAG": "2890961624", "DOI": "10.1038/s41929-018-0142-1", "CorpusId": 139684229 }, "url": "https://www.semanticscholar.org/paper/529bf01db3e6b327b34db6723ac826b1047e72d7", "referenceCount": 55, "citationCount": 510, "influentialCitationCount": 9, "isOpenAccess": false, "fieldsOfStudy": [ "Materials Science" ] }, { "title": "Machine learning hydrogen adsorption on nanoclusters through structural descriptors", "abstract": null, "year": 2018, "venue": "npj Computational Materials", "authors": [ "M. Jäger", "Eiaki V. Morooka", "F. Federici Canova", "Lauri Himanen", "A. Foster" ], "externalIds": { "MAG": "2883021798", "DOI": "10.1038/s41524-018-0096-5", "CorpusId": 52045585 }, "url": "https://www.semanticscholar.org/paper/54207c36ef36f8a2c172bc404222aafddc88343f", "referenceCount": 56, "citationCount": 167, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Materials Science" ] }, { "title": "Adsorption Free Energy of Single Amino Acids at the Rutile (110)/Water Interface Studied by Well-Tempered Metadynamics", "abstract": "Single amino acids are present in blood plasma and are the building blocks of larger organic residues. Their interaction with surfaces is therefore crucial for biomedical applications in contact with blood. In this work, we use well-tempered metadynamics to study the adsorption of six amino acids, with nonpolar (Ala and Leu), polar (Ser), positively charged (Arg and Lys), and negatively charged (Asp) side groups, on a negatively charged rutile (110) surface. The free energy of adsorption and the desorption barriers were determined for all the amino acids under different adsorption conformations. When using the center of mass as the collective variable in well-tempered metadynamics, results for different amino acids were difficult to interpret because of different adsorption conformations on the surface overlapping in collective variable space. After projecting onto separate collective variables for the backbone and the side group, much clearer trends were observable. We show that, on the negatively charge...", "year": 2018, "venue": "", "authors": [ "A. YazdanYar", "U. Aschauer", "P. Bowen" ], "externalIds": { "MAG": "2801926928", "DOI": "10.1021/ACS.JPCC.7B12614", "CorpusId": 103642861 }, "url": "https://www.semanticscholar.org/paper/802bf339ba5630dc7d743acc3edfeedd056b6edc", "referenceCount": 48, "citationCount": 25, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "Crystal structure prediction accelerated by Bayesian optimization", "abstract": null, "year": 2018, "venue": "", "authors": [ "T. Yamashita", "N. Sato", "H. Kino", "T. Miyake", "Koji Tsuda", "T. Oguchi" ], "externalIds": { "MAG": "2784208423", "DOI": "10.1103/PHYSREVMATERIALS.2.013803", "CorpusId": 125178371 }, "url": "https://www.semanticscholar.org/paper/a3fb9d2543b2a48902f37b79efffef89b8f32404", "referenceCount": 35, "citationCount": 101, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Materials Science" ] }, { "title": "Bayesian inference of atomistic structure in functional materials", "abstract": null, "year": 2017, "venue": "npj Computational Materials", "authors": [ "M. Todorović", "Michael U Gutmann", "J. Corander", "P. Rinke" ], "externalIds": { "MAG": "2922000874", "ArXiv": "1708.09274", "DOI": "10.1038/s41524-019-0175-2", "CorpusId": 85512097 }, "url": "https://www.semanticscholar.org/paper/3cf919a10025483a17ac2d8de0882ca33ae3fe75", "referenceCount": 51, "citationCount": 106, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Physics", "Computer Science", "Materials Science" ] }, { "title": "ZnO nested shell magic clusters as tetrapod nuclei", "abstract": "Although atomic clusters are generally different from bulk material in terms of their structure, they can initiate (nano)crystal formation. For ZnO, tetrapod-like nanostructures are well known; however, their nucleation model is still questionable. In this report, ZnO magic clusters were considered as seeds for the nucleation of tetrapods; these clusters were characterized via mass spectroscopy and were attributed to a series of nested shell clusters via quantum chemical calculations. Herein, these clusters were constructed as nested Goldberg polyhedra and subjected to Jahn–Teller distortion.", "year": 2017, "venue": "", "authors": [ "A. Dmytruk", "I. Dmitruk", "Y. Shynkarenko", "R. Belosludov", "A. Kasuya" ], "externalIds": { "MAG": "2606542190", "DOI": "10.1039/C7RA01610G", "CorpusId": 99031889 }, "url": "https://www.semanticscholar.org/paper/c7f9c9c75fb38cf1563d61a644147b628fa8b69f", "referenceCount": 75, "citationCount": 14, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Chemistry" ] }, { "title": "Neural Message Passing for Quantum Chemistry", "abstract": "Supervised learning on molecules has incredible potential to be useful in chemistry, drug discovery, and materials science. Luckily, several promising and closely related neural network models invariant to molecular symmetries have already been described in the literature. These models learn a message passing algorithm and aggregation procedure to compute a function of their entire input graph. At this point, the next step is to find a particularly effective variant of this general approach and apply it to chemical prediction benchmarks until we either solve them or reach the limits of the approach. In this paper, we reformulate existing models into a single common framework we call Message Passing Neural Networks (MPNNs) and explore additional novel variations within this framework. Using MPNNs we demonstrate state of the art results on an important molecular property prediction benchmark; these results are strong enough that we believe future work should focus on datasets with larger molecules or more accurate ground truth labels.", "year": 2017, "venue": "International Conference on Machine Learning", "authors": [ "J. Gilmer", "S. Schoenholz", "Patrick F. Riley", "O. Vinyals", "George E. Dahl" ], "externalIds": { "DBLP": "journals/corr/GilmerSRVD17", "MAG": "2952254971", "ArXiv": "1704.01212", "CorpusId": 9665943 }, "url": "https://www.semanticscholar.org/paper/e24cdf73b3e7e590c2fe5ecac9ae8aa983801367", "referenceCount": 37, "citationCount": 6566, "influentialCitationCount": 751, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Acceleration of saddle-point searches with machine learning.", "abstract": "In atomistic simulations, the location of the saddle point on the potential-energy surface (PES) gives important information on transitions between local minima, for example, via transition-state theory. However, the search for saddle points often involves hundreds or thousands of ab initio force calls, which are typically all done at full accuracy. This results in the vast majority of the computational effort being spent calculating the electronic structure of states not important to the researcher, and very little time performing the calculation of the saddle point state itself. In this work, we describe how machine learning (ML) can reduce the number of intermediate ab initio calculations needed to locate saddle points. Since machine-learning models can learn from, and thus mimic, atomistic simulations, the saddle-point search can be conducted rapidly in the machine-learning representation. The saddle-point prediction can then be verified by an ab initio calculation; if it is incorrect, this strategically has identified regions of the PES where the machine-learning representation has insufficient training data. When these training data are used to improve the machine-learning model, the estimates greatly improve. This approach can be systematized, and in two simple example problems we demonstrate a dramatic reduction in the number of ab initio force calls. We expect that this approach and future refinements will greatly accelerate searches for saddle points, as well as other searches on the potential energy surface, as machine-learning methods see greater adoption by the atomistics community.", "year": 2016, "venue": "Journal of Chemical Physics", "authors": [ "A. Peterson" ], "externalIds": { "MAG": "2515090858", "DOI": "10.1063/1.4960708", "CorpusId": 23271758, "PubMed": "27544086" }, "url": "https://www.semanticscholar.org/paper/9e9d1a8ac0307b66c19c42eef07910309c04c57f", "referenceCount": 18, "citationCount": 116, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "A Tutorial on Bayesian Optimization of Expensive Cost Functions, with Application to Active User Modeling and Hierarchical Reinforcement Learning", "abstract": "We present a tutorial on Bayesian optimization, a method of finding the maximum of expensive cost functions. Bayesian optimization employs the Bayesian technique of setting a prior over the objective function and combining it with evidence to get a posterior function. This permits a utility-based selection of the next observation to make on the objective function, which must take into account both exploration (sampling from areas of high uncertainty) and exploitation (sampling areas likely to offer improvement over the current best observation). We also present two detailed extensions of Bayesian optimization, with experiments---active user modelling with preferences, and hierarchical reinforcement learning---and a discussion of the pros and cons of Bayesian optimization based on our experiences.", "year": 2010, "venue": "arXiv.org", "authors": [ "E. Brochu", "Vlad M. Cora", "Nando de Freitas" ], "externalIds": { "MAG": "2950338507", "DBLP": "journals/corr/abs-1012-2599", "ArXiv": "1012.2599", "CorpusId": 1640103 }, "url": "https://www.semanticscholar.org/paper/cd5a26b89f0799db1cbc1dff5607cb6815739fe7", "referenceCount": 109, "citationCount": 2305, "influentialCitationCount": 230, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "AutoDock4 and AutoDockTools4: Automated docking with selective receptor flexibility", "abstract": "We describe the testing and release of AutoDock4 and the accompanying graphical user interface AutoDockTools. AutoDock4 incorporates limited flexibility in the receptor. Several tests are reported here, including a redocking experiment with 188 diverse ligand‐protein complexes and a cross‐docking experiment using flexible sidechains in 87 HIV protease complexes. We also report its utility in analysis of covalently bound ligands, using both a grid‐based docking method and a modification of the flexible sidechain technique. © 2009 Wiley Periodicals, Inc. J Comput Chem, 2009", "year": 2009, "venue": "Journal of Computational Chemistry", "authors": [ "G. Morris", "R. Huey", "William Lindstrom", "M. Sanner", "R. Belew", "D. Goodsell", "A. Olson" ], "externalIds": { "DBLP": "journals/jcc/MorrisHLSBGO09", "MAG": "2105668062", "DOI": "10.1002/jcc.21256", "CorpusId": 5959835, "PubMed": "19399780" }, "url": "https://www.semanticscholar.org/paper/d7f0d01711855d236a8683cae11307da1610efc7", "referenceCount": 20, "citationCount": 17820, "influentialCitationCount": 1704, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Chemistry", "Medicine" ] }, { "title": "The performance of minima hopping and evolutionary algorithms for cluster structure prediction.", "abstract": "We compare evolutionary algorithms with minima hopping for global optimization in the field of cluster structure prediction. We introduce a new average offspring recombination operator and compare it with previously used operators. Minima hopping is improved with a softening method and a stronger feedback mechanism. Test systems are atomic clusters with Lennard-Jones interaction as well as silicon and gold clusters described by force fields. The improved minima hopping is found to be well-suited to all these homoatomic problems. The evolutionary algorithm is more efficient for systems with compact and symmetric ground states, including LJ(150), but it fails for systems with very complex energy landscapes and asymmetric ground states, such as LJ(75) and silicon clusters with more than 30 atoms. Both successes and failures of the evolutionary algorithm suggest ways for its improvement.", "year": 2008, "venue": "Journal of Chemical Physics", "authors": [ "Sandro Schönborn", "S. Goedecker", "Shantanu Roy", "A. Oganov" ], "externalIds": { "MAG": "2129328464", "ArXiv": "0810.2055", "DOI": "10.1063/1.3097197", "CorpusId": 6961491, "PubMed": "19368430" }, "url": "https://www.semanticscholar.org/paper/b7bf7709f78b45028dc53721a16fc7739b5f3bf9", "referenceCount": 42, "citationCount": 88, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Physics", "Medicine" ] }, { "title": "Well-tempered metadynamics: a smoothly converging and tunable free-energy method.", "abstract": "We present a method for determining the free-energy dependence on a selected number of collective variables using an adaptive bias. The formalism provides a unified description which has metadynamics and canonical sampling as limiting cases. Convergence and errors can be rigorously and easily controlled. The parameters of the simulation can be tuned so as to focus the computational effort only on the physically relevant regions of the order parameter space. The algorithm is tested on the reconstruction of an alanine dipeptide free-energy landscape.", "year": 2008, "venue": "Physical Review Letters", "authors": [ "A. Barducci", "G. Bussi", "M. Parrinello" ], "externalIds": { "MAG": "2598934403", "ArXiv": "0803.3861", "DOI": "10.1103/PhysRevLett.100.020603", "CorpusId": 13690352, "PubMed": "18232845" }, "url": "https://www.semanticscholar.org/paper/9fb9d52747b5937a31a67a76cd3d7a50ecbe1a82", "referenceCount": 42, "citationCount": 1867, "influentialCitationCount": 71, "isOpenAccess": true, "fieldsOfStudy": [ "Physics", "Materials Science", "Medicine" ] }, { "title": "Density functional theory calculations for the hydrogen evolution reaction in an electrochemical double layer on the Pt(111) electrode.", "abstract": "We present results of density functional theory calculations on a Pt(111) slab with a bilayer of water, solvated protons in the water layer, and excess electrons in the metal surface. In this way we model the electrochemical double layer at a platinum electrode. By varying the number of protons/electrons in the double layer we investigate the system as a function of the electrode potential. We study the elementary processes involved in the hydrogen evolution reaction, 2(H(+) + e(-)) --> H(2), and determine the activation energy and predominant reaction mechanism as a function of electrode potential. We confirm by explicit calculations the notion that the variation of the activation barrier with potential can be viewed as a manifestation of the Brønsted-Evans-Polanyi-type relationship between activation energy and reaction energy found throughout surface chemistry.", "year": 2007, "venue": "Physical Chemistry, Chemical Physics - PCCP", "authors": [ "Egill Skúlason", "G. Karlberg", "J. Rossmeisl", "T. Bligaard", "J. Greeley", "H. Jónsson", "J. Nørskov" ], "externalIds": { "MAG": "2100929361", "DOI": "10.1039/B700099E", "CorpusId": 41684403, "PubMed": "17579732" }, "url": "https://www.semanticscholar.org/paper/a6b7d308d62e3cad1b2a24c54ddb2a75d9962009", "referenceCount": 53, "citationCount": 552, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Chemistry" ] }, { "title": "Descriptions of surface chemical reactions using a neural network representation of the potential-energy surface", "abstract": "A neural network NN approach is proposed for the representation of six-dimensional ab initio potentialenergy surfaces PES for the dissociation of a diatomic molecule at surfaces. We report tests of NN representations that are fitted to six-dimensional analytical PESs for H2 dissociation on the clean and the sulfur covered Pd100 surfaces. For the present study we use high-dimensional analytical PESs as the basis for the NN training, as this enables us to investigate the influence of phase space sampling on adsorption rates in great detail. We note, however, that these analytical PESs were obtained from detailed density functional theory calculations. When information about the PES is collected only from a few high-symmetric adsorption sites, we find that the obtained adsorption probabilities are not reliable. Thus, intermediate configurations need to be considered as well. However, it is not necessary to map out complete elbow plots above nonsymmetric sites. Our study suggests that only a few additional energies need to be considered in the region of activated systems where the molecular bond breaks. With this understanding, the required number of NN training energies for obtaining a high-quality PES that provides a reliable description of the dissociation and adsorption dynamics is orders of magnitude smaller than the number of total-energy calculations needed in traditional ab initio on the fly molecular dynamics. Our analysis also demonstrates the importance of a reliable, high-dimensional PES to describe reaction rates for dissociative adsorption of molecules at surfaces.", "year": 2006, "venue": "", "authors": [ "S. Lorenz", "M. Scheffler", "A. Gross" ], "externalIds": { "MAG": "1971920777", "DOI": "10.1103/PHYSREVB.73.115431", "CorpusId": 15873654 }, "url": "https://www.semanticscholar.org/paper/133f3979c0f4100aa740025fb89e997175ad2701", "referenceCount": 88, "citationCount": 99, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Materials Science" ] }, { "title": "Minima hopping: an efficient search method for the global minimum of the potential energy surface of complex molecular systems.", "abstract": "A method is presented that can find the global minimum of very complex condensed matter systems. It is based on the simple principle of exploring the configurational space as fast as possible and of avoiding revisiting known parts of this space. Even though it is not a genetic algorithm, it is not based on thermodynamics. The efficiency of the method depends strongly on the type of moves that are used to hop into new local minima. Moves that find low-barrier escape-paths out of the present minimum generally lead into low energy minima.", "year": 2004, "venue": "Journal of Chemical Physics", "authors": [ "S. Goedecker" ], "externalIds": { "MAG": "2086702546", "DOI": "10.1063/1.1724816", "CorpusId": 30045205, "PubMed": "15268009" }, "url": "https://www.semanticscholar.org/paper/1e2b78e527df3fcbd50c59158461731b3fe9ab73", "referenceCount": 20, "citationCount": 686, "influentialCitationCount": 13, "isOpenAccess": true, "fieldsOfStudy": [ "Physics", "Medicine" ] }, { "title": "Nudged elastic band method for finding minimum energy paths of transitions", "abstract": null, "year": 1998, "venue": "", "authors": [ "H. Jónsson", "G. Mills", "K. Jacobsen" ], "externalIds": { "MAG": "2086513155", "DOI": "10.1142/9789812839664_0016", "CorpusId": 18062534 }, "url": "https://www.semanticscholar.org/paper/ccdc692aed351de71979bd4726cfd0a3b714aab2", "referenceCount": 10, "citationCount": 1889, "influentialCitationCount": 64, "isOpenAccess": false, "fieldsOfStudy": [ "Materials Science" ] }, { "title": "Ab initio calculation of the potential energy surface for the dissociation of H_2 on the sulfur-covered Pd(100) surface", "abstract": "The presence of sulfur atoms on the Pd(100) surface is known to hinder the dissociative adsorption of hydrogen. Using density-functional theory and the full-potential linear augmented plane-wave method, we investigate the potential energy surface (PES) of the dissociative adsorption of H_2 on the sulfur covered Pd(100) surface. The PES is changed significantly compared to the dissociation on the clean Pd(100) surface, in particular for hydrogen close to the S atoms. While the hydrogen dissociation at the clean Pd(100) surface is non-activated, for the (2x2) sulfur adlayer (coverage Theta_S= 0.25) the dissociation of H_2 is inhibited by energy barriers. Their heights strongly depend on the distance between the hydrogen and sulfur atoms leading to a highly corrugated PES. The largest barriers are in the vicinity of the sulfur atoms due to the strong repulsion between sulfur and hydrogen. Still the hydrogen dissociation on the (2x2) sulfur covered Pd(100) surface is exothermic. Thus the poisoning effect of sulfur adatoms for H_2 dissociation at low sulfur coverage (Theta_S <= 0.25) is mainly governed by the formation of energy barriers, not by blocking of the adsorption sites. For the c(2x2) sulfur adlayer (Theta_S= 0.5), the PES for hydrogen dissociation is purely repulsive. This is due to the fact that for all different possible adsorption geometries the hydrogen molecules come too close to the sulfur adatoms before the dissociation is completed.", "year": 1997, "venue": "", "authors": [ "C. Wei", "A. Gross", "M. Scheffler" ], "externalIds": { "MAG": "2057748852", "ArXiv": "cond-mat/9711289", "DOI": "10.1103/PhysRevB.57.15572", "CorpusId": 108286250 }, "url": "https://www.semanticscholar.org/paper/ac22def514f056248d8ff76379f3429aad38698c", "referenceCount": 0, "citationCount": 41, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Physics", "Chemistry" ] }, { "title": "Inhomogeneous Electron Gas", "abstract": null, "year": 1973, "venue": "", "authors": [ "A. Rajagopal", "J. Callaway" ], "externalIds": { "MAG": "2007833519", "DOI": "10.1103/PHYSREVB.7.1912", "CorpusId": 120172164 }, "url": "https://www.semanticscholar.org/paper/1b91abc1cffc258b6b0d83a63af3260427b9be7a", "referenceCount": 0, "citationCount": 33216, "influentialCitationCount": 1486, "isOpenAccess": false, "fieldsOfStudy": [ "Physics" ] }, { "title": "Study", "abstract": null, "year": 2022, "venue": "Evaluation Methods in Biomedical and Health Informatics", "authors": [ "Charles P. Friedman", "Jeremy C. Wyatt", "Joan S. Ash" ], "externalIds": { "DOI": "10.1007/978-3-030-86453-8_5", "CorpusId": 246765060 }, "url": "https://www.semanticscholar.org/paper/1fe71c38973f6fd76cac9731a388201de3f83bc4", "referenceCount": 312, "citationCount": 2772, "influentialCitationCount": 55, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Gaussian Processes for Machine Learning . Vol. 2. 3", "abstract": null, "year": 2006, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Test if the adsorbent falls inside the convex hull of the cluster", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Symmetries and Expressive Requirements for Learning General Policies": { "paper_title": "Symmetries and Expressive Requirements for Learning General Policies", "arxiv_id": "2409.15892v1", "keyword": "graph neural network", "authors": [ "Dominik Drexler", "Simon Ståhlberg", "Blai Bonet", "Hector Geffner" ], "references": [ { "title": "Expressiveness of Graph Neural Networks in Planning Domains", "abstract": "Graph Neural Networks (GNNs) have become the standard method of choice for learning with structured data, demonstrating particular promise in classical planning. Their inherent invariance under symmetries of the input graphs endows them with superior generalization capabilities, compared to their symmetry-oblivious counterparts. However, this comes at the cost of limited expressive power. Particularly, GNNs cannot distinguish between graphs that satisfy identical sentences of C2 logic.\n \nTo leverage GNNs for learning policies in PDDL domains, one needs to encode the contextual representation of the planning states as graphs. The expressiveness of this encoding, coupled with a specific GNN architecture, then hinges on the absence of indistinguishable states necessitating distinct actions. This paper provides a comprehensive theoretical and statistical exploration of such situations in PDDL domains across diverse natural encoding schemes and GNN models.", "year": 2024, "venue": "International Conference on Automated Planning and Scheduling", "authors": [ "Rostislav Horcík", "Gustav Šír" ], "externalIds": { "DBLP": "conf/icaps/HorcikS24", "DOI": "10.1609/icaps.v34i1.31486", "CorpusId": 270169177 }, "url": "https://www.semanticscholar.org/paper/ae683dbd44ec508f63254d864f83d6c1006dd652", "referenceCount": 0, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning General Policies for Classical Planning Domains: Getting Beyond C2", "abstract": "GNN-based approaches for learning general policies across planning domains are limited by the expressive power of $C_2$, namely; first-order logic with two variables and counting. This limitation can be overcomed by transitioning to $k$-GNNs, for $k=3$, wherein object embeddings are substituted with triplet embeddings. Yet, while $3$-GNNs have the expressive power of $C_3$, unlike $1$- and $2$-GNNs that are confined to $C_2$, they require quartic time for message exchange and cubic space for embeddings, rendering them impractical. In this work, we introduce a parameterized version of relational GNNs. When $t$ is infinity, R-GNN[$t$] approximates $3$-GNNs using only quadratic space for embeddings. For lower values of $t$, such as $t=1$ and $t=2$, R-GNN[$t$] achieves a weaker approximation by exchanging fewer messages, yet interestingly, often yield the $C_3$ features required in several planning domains. Furthermore, the new R-GNN[$t$] architecture is the original R-GNN architecture with a suitable transformation applied to the input states only. Experimental results illustrate the clear performance gains of R-GNN[$1$] and R-GNN[$2$] over plain R-GNNs, and also over edge transformers that also approximate $3$-GNNs.", "year": 2024, "venue": "arXiv.org", "authors": [ "Simon Ståhlberg", "Blai Bonet", "Hector Geffner" ], "externalIds": { "DBLP": "journals/corr/abs-2403-11734", "ArXiv": "2403.11734", "DOI": "10.48550/arXiv.2403.11734", "CorpusId": 268531114 }, "url": "https://www.semanticscholar.org/paper/f895fab081fc45aa969e48eb3e51a28d777fcacc", "referenceCount": 47, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning General Policies with Policy Gradient Methods", "abstract": "While reinforcement learning methods have delivered remarkable results in a number of settings, generalization, i.e., the ability to produce policies that generalize in a reliable and systematic way, has remained a challenge. The problem of generalization has been addressed formally in classical planning where provable correct policies that generalize over all instances of a given domain have been learned using combinatorial methods. The aim of this work is to bring these two research threads together to illuminate the conditions under which (deep) reinforcement learning approaches, and in particular, policy optimization methods, can be used to learn policies that generalize like combinatorial methods do. We draw on lessons learned from previous combinatorial and deep learning approaches, and extend them in a convenient way. From the former, we model policies as state transition classifiers, as (ground) actions are not general and change from instance to instance. From the latter, we use graph neural networks (GNNs) adapted to deal with relational structures for representing value functions over planning states, and in our case, policies. With these ingredients in place, we find that actor-critic methods can be used to learn policies that generalize almost as well as those obtained using combinatorial approaches while avoiding the scalability bottleneck and the use of feature pools. Moreover, the limitations of the DRL methods on the benchmarks considered have little to do with deep learning or reinforcement learning algorithms, and result from the well-understood expressive limitations of GNNs, and the tradeoff between optimality and generalization (general policies cannot be optimal in some domains). Both of these limitations are addressed without changing the basic DRL methods by adding derived predicates and an alternative cost structure to optimize.", "year": 2023, "venue": "International Conference on Principles of Knowledge Representation and Reasoning", "authors": [ "Simon Ståhlberg", "Blai Bonet", "Hector Geffner" ], "externalIds": { "DBLP": "conf/kr/StahlbergBG23", "DOI": "10.24963/kr.2023/63", "CorpusId": 260410754 }, "url": "https://www.semanticscholar.org/paper/9e095b84dbdbbbc69e33f272f46e500de0234f8c", "referenceCount": 65, "citationCount": 10, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Hierarchical Decompositions and Termination Analysis for Generalized Planning", "abstract": "This paper presents new methods for analyzing and evaluating generalized plans that can solve broad classes of related planning problems. Although synthesis and learning of generalized plans has been a longstanding goal in AI, it remains challenging due to fundamental gaps in methods for analyzing the scope and utility of a given generalized plan. This paper addresses these gaps by developing a new conceptual framework along with proof techniques and algorithmic processes for assessing termination and goal-reachability related properties of generalized plans. We build upon classic results from graph theory to decompose generalized plans into smaller components that are then used to derive hierarchical termination arguments. These methods can be used to determine the utility of a given generalized plan, as well as to guide the synthesis and learning processes for generalized plans. We present theoretical as well as empirical results illustrating the scope of this new approach. Our analysis shows that this approach significantly extends the class of generalized plans that can be assessed automatically, thereby reducing barriers in the synthesis and learning of reliable generalized plans.", "year": 2022, "venue": "Journal of Artificial Intelligence Research", "authors": [ "Siddharth Srivastava" ], "externalIds": { "DBLP": "journals/jair/Srivastava23", "ArXiv": "2212.02823", "DOI": "10.1613/jair.1.14185", "CorpusId": 259262651 }, "url": "https://www.semanticscholar.org/paper/c8db2d4caad3d6f21f42f55da9cc224de7ea40d9", "referenceCount": 46, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning Generalized Policies Without Supervision Using GNNs", "abstract": "We consider the problem of learning generalized policies for classical planning domains using graph neural networks from small instances represented in lifted STRIPS. The problem has been considered before but the proposed neural architectures are complex and the results are often mixed. In this work, we use a simple and general GNN architecture and aim at obtaining crisp experimental results and a deeper understanding: either the policy greedy in the learned value function achieves close to 100% generalization over instances larger than those used in training, or the failure must be understood, and possibly fixed, logically. For this, we exploit the relation established between the expressive power of GNNs and the C2 fragment of first-order logic (namely, FOL with 2 variables and counting quantifiers). We find for example that domains with general policies that require more expressive features can be solved with GNNs once the states are extended with suitable \"derived atoms\" encoding role compositions and transitive closures that do not fit into C2. The work follows an existing approach based on GNNs for learning optimal general policies in a supervised fashion, but the learned policies are no longer required to be optimal (which expands the scope, as many planning domains do not have general optimal policies) and are learned without supervision. Interestingly, value-based reinforcement learning methods that aim to produce optimal policies, do not always yield policies that generalize, as the goals of optimality and generality are in conflict in domains where optimal planning is NP-hard.", "year": 2022, "venue": "International Conference on Principles of Knowledge Representation and Reasoning", "authors": [ "Simon Ståhlberg", "Blai Bonet", "Hector Geffner" ], "externalIds": { "ArXiv": "2205.06002", "DBLP": "journals/corr/abs-2205-06002", "DOI": "10.48550/arXiv.2205.06002", "CorpusId": 248721898 }, "url": "https://www.semanticscholar.org/paper/360813ee47a6d53a3f1f82ba29dab725144bc279", "referenceCount": 52, "citationCount": 21, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "PG3: Policy-Guided Planning for Generalized Policy Generation", "abstract": "A longstanding objective in classical planning is to synthesize policies that generalize across multiple problems from the same domain. In this work, we study generalized policy search-based methods with a focus on the score function used to guide the search over policies. We demonstrate limitations of two score functions --- policy evaluation and plan comparison --- and propose a new approach that overcomes these limitations. The main idea behind our approach, Policy-Guided Planning for Generalized Policy Generalization (PG3), is that a candidate policy should be used to guide planning on training problems as a mechanism for evaluating that candidate. Theoretical results in a simplified setting give conditions under which PG3 is optimal or admissible. We then study a specific instantiation of policy search where planning problems are PDDL-based and policies are lifted decision lists. Empirical results in six domains confirm that PG3 learns generalized policies more efficiently and effectively than several baselines.", "year": 2022, "venue": "International Joint Conference on Artificial Intelligence", "authors": [ "Ryan Yang", "Tom Silver", "Aidan Curtis", "Tomas Lozano-Perez", "L. Kaelbling" ], "externalIds": { "DBLP": "journals/corr/abs-2204-10420", "ArXiv": "2204.10420", "DOI": "10.48550/arXiv.2204.10420", "CorpusId": 248366629 }, "url": "https://www.semanticscholar.org/paper/a7ae577fca451ea64e19b918bcc2598d7e50eb73", "referenceCount": 44, "citationCount": 8, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning General Optimal Policies with Graph Neural Networks: Expressive Power, Transparency, and Limits", "abstract": "It has been recently shown that general policies for many classical planning domains can be expressed and learned in terms of a pool of features defined from the domain predicates using a description logic grammar. At the same time, most description logics correspond to a fragment of k-variable counting logic (C_k) for k=2, that has been shown to provide a tight characterization of the expressive power of graph neural networks. In this work, we make use of these results to understand the power and limits of using graph neural networks (GNNs) for learning optimal general policies over a number of tractable planning domains where such policies are known to exist. For this, we train a simple GNN in a supervised manner to approximate the optimal value function V*(s) of a number of sample states s. As predicted by the theory, it is observed that general optimal policies are obtained in domains where general optimal value functions can be defined with C_2 features but not in those requiring more expressive C_3 features. In addition, it is observed that the features learned are in close correspondence with the features needed to express V* in closed form. The theory and the analysis of the domains let us understand the features that are actually learned as well as those that cannot be learned in this way, and let us move in a principled manner from a combinatorial optimization approach to learning general policies to a potentially, more robust and scalable approach based on deep learning.", "year": 2021, "venue": "International Conference on Automated Planning and Scheduling", "authors": [ "Simon Ståhlberg", "Blai Bonet", "Hector Geffner" ], "externalIds": { "DBLP": "journals/corr/abs-2109-10129", "ArXiv": "2109.10129", "DOI": "10.1609/icaps.v32i1.19851", "CorpusId": 237581157 }, "url": "https://www.semanticscholar.org/paper/30fb5a3fecb56f775f582652abb2d4df9291bc3e", "referenceCount": 58, "citationCount": 35, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning General Planning Policies from Small Examples Without Supervision", "abstract": "Generalized planning is concerned with the computation of general policies that solve multiple instances of a planning domain all at once. It has been recently shown that these policies can be computed in two steps: first, a suitable abstraction in the form of a qualitative numerical planning problem (QNP) is learned from sample plans, then the general policies are obtained from the learned QNP using a planner. In this work, we introduce an alternative approach for computing more expressive general policies which does not require sample plans or a QNP planner. The new formulation is very simple and can be cast in terms that are more standard in machine learning: a large but finite pool of features is defined from the predicates in the planning examples using a general grammar, and a small subset of features is sought for separating “good” from “bad” state transitions, and goals from non-goals. The problems of finding such a “separating surface” while labeling the transitions as “good” or “bad” are jointly addressed as a single combinatorial optimization problem expressed as a Weighted Max-SAT problem. The advantage of looking for the simplest policy in the given feature space that solves the given examples, possibly non-optimally, is that many domains have no general, compact policies that are optimal. The approach yields general policies for a number of benchmark domains.", "year": 2021, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Guillem Francès", "Blai Bonet", "Hector Geffner" ], "externalIds": { "DBLP": "conf/aaai/FrancesBG21", "DOI": "10.1609/aaai.v35i13.17402", "CorpusId": 235349223 }, "url": "https://www.semanticscholar.org/paper/42c5c5a8e52e037ceb3e6dfa96fdf134d085d184", "referenceCount": 0, "citationCount": 28, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The Logic of Graph Neural Networks", "abstract": "Graph neural networks (GNNs) are deep learning architectures for machine learning problems on graphs. It has recently been shown that the expressiveness of GNNs can be characterised precisely by the combinatorial Weisfeiler-Leman algorithms and by finite variable counting logics. The correspondence has even led to new, higher-order GNNs corresponding to the WL algorithm in higher dimensions.The purpose of this paper is to explain these descriptive characterisations of GNNs.", "year": 2021, "venue": "Logic in Computer Science", "authors": [ "Martin Grohe" ], "externalIds": { "DBLP": "conf/lics/Grohe21", "ArXiv": "2104.14624", "DOI": "10.1109/LICS52264.2021.9470677", "CorpusId": 233476550 }, "url": "https://www.semanticscholar.org/paper/dc08220b483bcbe72f09d342cf20bfc739ed540a", "referenceCount": 65, "citationCount": 78, "influentialCitationCount": 14, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Generalized Planning With Deep Reinforcement Learning", "abstract": "A hallmark of intelligence is the ability to deduce general principles from examples, which are correct beyond the range of those observed. Generalized Planning deals with finding such principles for a class of planning problems, so that principles discovered using small instances of a domain can be used to solve much larger instances of the same domain. In this work we study the use of Deep Reinforcement Learning and Graph Neural Networks to learn such generalized policies and demonstrate that they can generalize to instances that are orders of magnitude larger than those they were trained on.", "year": 2020, "venue": "arXiv.org", "authors": [ "Or Rivlin", "Tamir Hazan", "E. Karpas" ], "externalIds": { "MAG": "3023380900", "DBLP": "journals/corr/abs-2005-02305", "ArXiv": "2005.02305", "CorpusId": 218502552 }, "url": "https://www.semanticscholar.org/paper/e5b942906281d4c13244d9528271335781af829d", "referenceCount": 30, "citationCount": 44, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ASNets: Deep Learning for Generalised Planning", "abstract": "In this paper, we discuss the learning of generalised policies for probabilistic and classical planning problems using Action Schema Networks (ASNets). The ASNet is a neural network architecture that exploits the relational structure of (P)PDDL planning problems to learn a common set of weights that can be applied to any problem in a domain. By mimicking the actions chosen by a traditional, non-learning planner on a handful of small problems in a domain, ASNets are able to learn a generalised reactive policy that can quickly solve much larger instances from the domain. This work extends the ASNet architecture to make it more expressive, while still remaining invariant to a range of symmetries that exist in PPDDL problems. We also present a thorough experimental evaluation of ASNets, including a comparison with heuristic search planners on seven probabilistic and deterministic domains, an extended evaluation on over 18,000 Blocksworld instances, and an ablation study. Finally, we show that sparsity-inducing regularisation can produce ASNets that are compact enough for humans to understand, yielding insights into how the structure of ASNets allows them to generalise across a domain.", "year": 2019, "venue": "Journal of Artificial Intelligence Research", "authors": [ "S. Toyer", "Felipe W. Trevizan", "S. Thiébaux", "Lexing Xie" ], "externalIds": { "DBLP": "journals/corr/abs-1908-01362", "ArXiv": "1908.01362", "MAG": "2965246582", "DOI": "10.1613/jair.1.11633", "CorpusId": 199442346 }, "url": "https://www.semanticscholar.org/paper/fa31dd72183e18f37ecdfde510676239b3836b07", "referenceCount": 82, "citationCount": 62, "influentialCitationCount": 14, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Generalized Planning via Abstraction: Arbitrary Numbers of Objects", "abstract": "We consider a class of generalized planning problems based on the idea of quantifying over sets of similar objects. We show how we can adapt fully observable nondeterministic planning techniques to produce generalized solutions that are easy to instantiate over particular problem instances. We also describe how we can reformulate a classical planning problem into a quantified one. The reformulation allows us to solve the original planning task without grounding every action with respect to all objects in the problem, and a single solution can be applied to a possibly infinite set of related classical planning tasks. We report experimental results that show our approach is a practical and promising technique for solving an interesting class of problems.", "year": 2019, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "León Illanes", "Sheila A. McIlraith" ], "externalIds": { "DBLP": "conf/aaai/IllanesM19", "MAG": "2904811519", "DOI": "10.1609/AAAI.V33I01.33017610", "CorpusId": 69417956 }, "url": "https://www.semanticscholar.org/paper/f6992d6f485b613a26c9f9d303374266a4c19d20", "referenceCount": 26, "citationCount": 33, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Theoretical Foundations for Structural Symmetries of Lifted PDDL Tasks", "abstract": "We transfer the notion of structural symmetries to lifted planning task representations, based on abstract structures which we define to model planning tasks. We show that symmetries are preserved by common grounding methods and we shed some light on the relation to previous symmetry concepts used in planning. Using a suitable graph representation of lifted tasks, our experimental analysis of common planning benchmarks reveals that symmetries occur in the lifted representation of many domains. Our work establishes the theoretical ground for exploiting symmetries beyond their previous scope, such as for faster grounding and mutex generation, as well as for state space transformations and reductions.", "year": 2019, "venue": "International Conference on Automated Planning and Scheduling", "authors": [ "Silvan Sievers", "Gabriele Röger", "Martin Wehrle", "Michael Katz" ], "externalIds": { "DBLP": "conf/aips/SieversRW019", "MAG": "2966405739", "DOI": "10.1609/icaps.v29i1.3509", "CorpusId": 197418546 }, "url": "https://www.semanticscholar.org/paper/7a2837d2c5a50414cfc906a8b9764dc636df4396", "referenceCount": 51, "citationCount": 22, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning Features and Abstract Actions for Computing Generalized Plans", "abstract": "Generalized planning is concerned with the computation of plans that solve not one but multiple instances of a planning domain. Recently, it has been shown that generalized plans can be expressed as mappings of feature values into actions, and that they can often be computed with fully observable non-deterministic (FOND) planners. The actions in such plans, however, are not the actions in the instances themselves, which are not necessarily common to other instances, but abstract actions that are defined on a set of common features. The formulation assumes that the features and the abstract actions are given. In this work, we address this limitation by showing how to learn them automatically. The resulting account of generalized planning combines learning and planning in a novel way: a learner, based on a Max SAT formulation, yields the features and abstract actions from sampled state transitions, and a FOND planner uses this information, suitably transformed, to produce the general plans. Correctness guarantees are given and experimental results on several domains are reported.", "year": 2018, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Blai Bonet", "Guillem Francès", "Hector Geffner" ], "externalIds": { "MAG": "2952922965", "DBLP": "conf/aaai/BonetFG19", "ArXiv": "1811.07231", "DOI": "10.1609/aaai.v33i01.33012703", "CorpusId": 53720294 }, "url": "https://www.semanticscholar.org/paper/481e8bc7591290403ef2b0a0bb7a50566c6d6ca0", "referenceCount": 34, "citationCount": 53, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Transfer of Deep Reactive Policies for MDP Planning", "abstract": "Domain-independent probabilistic planners input an MDP description in a factored representation language such as PPDDL or RDDL, and exploit the specifics of the representation for faster planning. Traditional algorithms operate on each problem instance independently, and good methods for transferring experience from policies of other instances of a domain to a new instance do not exist. Recently, researchers have begun exploring the use of deep reactive policies, trained via deep reinforcement learning (RL), for MDP planning domains. One advantage of deep reactive policies is that they are more amenable to transfer learning. \nIn this paper, we present the first domain-independent transfer algorithm for MDP planning domains expressed in an RDDL representation. Our architecture exploits the symbolic state configuration and transition function of the domain (available via RDDL) to learn a shared embedding space for states and state-action pairs for all problem instances of a domain. We then learn an RL agent in the embedding space, making a near zero-shot transfer possible, i.e., without much training on the new instance, and without using the domain simulator at all. Experiments on three different benchmark domains underscore the value of our transfer algorithm. Compared against planning from scratch, and a state-of-the-art RL transfer algorithm, our transfer solution has significantly superior learning curves.", "year": 2018, "venue": "Neural Information Processing Systems", "authors": [ "Aniket Bajpai", "Sankalp Garg", "Mausam" ], "externalIds": { "DBLP": "journals/corr/abs-1810-11488", "ArXiv": "1810.11488", "MAG": "2949073278", "CorpusId": 53103107 }, "url": "https://www.semanticscholar.org/paper/f2d326b27258cc883718f745229024f676a30a2b", "referenceCount": 30, "citationCount": 31, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Weisfeiler and Leman Go Neural: Higher-order Graph Neural Networks", "abstract": "In recent years, graph neural networks (GNNs) have emerged as a powerful neural architecture to learn vector representations of nodes and graphs in a supervised, end-to-end fashion. Up to now, GNNs have only been evaluated empirically—showing promising results. The following work investigates GNNs from a theoretical point of view and relates them to the 1-dimensional Weisfeiler-Leman graph isomorphism heuristic (1-WL). We show that GNNs have the same expressiveness as the 1-WL in terms of distinguishing non-isomorphic (sub-)graphs. Hence, both algorithms also have the same shortcomings. Based on this, we propose a generalization of GNNs, so-called k-dimensional GNNs (k-GNNs), which can take higher-order graph structures at multiple scales into account. These higher-order structures play an essential role in the characterization of social networks and molecule graphs. Our experimental evaluation confirms our theoretical findings as well as confirms that higher-order information is useful in the task of graph classification and regression.", "year": 2018, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Christopher Morris", "Martin Ritzert", "Matthias Fey", "William L. Hamilton", "J. E. Lenssen", "Gaurav Rattan", "Martin Grohe" ], "externalIds": { "MAG": "2962810718", "ArXiv": "1810.02244", "DBLP": "journals/corr/abs-1810-02244", "DOI": "10.1609/aaai.v33i01.33014602", "CorpusId": 52919090 }, "url": "https://www.semanticscholar.org/paper/6ea57a2aea08ce0628c93f77bdc24c2f3e9cc6da", "referenceCount": 53, "citationCount": 1413, "influentialCitationCount": 199, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "How Powerful are Graph Neural Networks?", "abstract": "Graph Neural Networks (GNNs) are an effective framework for representation learning of graphs. GNNs follow a neighborhood aggregation scheme, where the representation vector of a node is computed by recursively aggregating and transforming representation vectors of its neighboring nodes. Many GNN variants have been proposed and have achieved state-of-the-art results on both node and graph classification tasks. However, despite GNNs revolutionizing graph representation learning, there is limited understanding of their representational properties and limitations. Here, we present a theoretical framework for analyzing the expressive power of GNNs to capture different graph structures. Our results characterize the discriminative power of popular GNN variants, such as Graph Convolutional Networks and GraphSAGE, and show that they cannot learn to distinguish certain simple graph structures. We then develop a simple architecture that is provably the most expressive among the class of GNNs and is as powerful as the Weisfeiler-Lehman graph isomorphism test. We empirically validate our theoretical findings on a number of graph classification benchmarks, and demonstrate that our model achieves state-of-the-art performance.", "year": 2018, "venue": "International Conference on Learning Representations", "authors": [ "Keyulu Xu", "Weihua Hu", "J. Leskovec", "S. Jegelka" ], "externalIds": { "MAG": "2950468517", "ArXiv": "1810.00826", "DBLP": "journals/corr/abs-1810-00826", "CorpusId": 52895589 }, "url": "https://www.semanticscholar.org/paper/62ed9bf1d83c8db1f9cbf92ea2f57ea90ef683d9", "referenceCount": 45, "citationCount": 6421, "influentialCitationCount": 1375, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Features, Projections, and Representation Change for Generalized Planning", "abstract": "Generalized planning is concerned with the characterization and computation of plans that solve many instances at once. In the standard formulation, a generalized plan is a mapping from fea- ture or observation histories into actions, assuming that the instances share a common pool of features and actions. This assumption, however, excludes the standard relational planning domains where actions and objects change across instances. In this work, we extend the standard formulation of generalized planning to such domains. This is achieved by projecting the actions over the features, resulting in a common set of abstract actions which can be tested for soundness and completeness, and which can be used for generating general policies such as “if the gripper is empty, pick the clear block above x and place it on the table” that achieve the goal clear(x) in any Blocksworld instance. In this policy, “pick the clear block above x” is an abstract action that may represent the action Unstack(a, b) in one situation and the action Unstack(b, c) in another. Transformations are also introduced for computing such policies by means of fully observable non-deterministic (FOND) planners. The value of generalized representations for learning general policies is also discussed.", "year": 2018, "venue": "International Joint Conference on Artificial Intelligence", "authors": [ "Blai Bonet", "Hector Geffner" ], "externalIds": { "DBLP": "journals/corr/abs-1801-10055", "MAG": "2785422319", "ArXiv": "1801.10055", "DOI": "10.24963/ijcai.2018/649", "CorpusId": 31431550 }, "url": "https://www.semanticscholar.org/paper/c7fc148f76383818adb53f90601423e6b28da122", "referenceCount": 33, "citationCount": 46, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Neural Message Passing for Quantum Chemistry", "abstract": "Supervised learning on molecules has incredible potential to be useful in chemistry, drug discovery, and materials science. Luckily, several promising and closely related neural network models invariant to molecular symmetries have already been described in the literature. These models learn a message passing algorithm and aggregation procedure to compute a function of their entire input graph. At this point, the next step is to find a particularly effective variant of this general approach and apply it to chemical prediction benchmarks until we either solve them or reach the limits of the approach. In this paper, we reformulate existing models into a single common framework we call Message Passing Neural Networks (MPNNs) and explore additional novel variations within this framework. Using MPNNs we demonstrate state of the art results on an important molecular property prediction benchmark; these results are strong enough that we believe future work should focus on datasets with larger molecules or more accurate ground truth labels.", "year": 2017, "venue": "International Conference on Machine Learning", "authors": [ "J. Gilmer", "S. Schoenholz", "Patrick F. Riley", "O. Vinyals", "George E. Dahl" ], "externalIds": { "DBLP": "journals/corr/GilmerSRVD17", "MAG": "2952254971", "ArXiv": "1704.01212", "CorpusId": 9665943 }, "url": "https://www.semanticscholar.org/paper/e24cdf73b3e7e590c2fe5ecac9ae8aa983801367", "referenceCount": 37, "citationCount": 6566, "influentialCitationCount": 751, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Heuristics and Symmetries in Classical Planning", "abstract": "\n \n Heuristic search is a state-of-the-art approach to classical planning. Several heuristic families were developed over the years to automatically estimate goal distance information from problem descriptions. Orthogonally to the development of better heuristics, recent years have seen an increasing interest in symmetry-based state space pruning techniques that aim at reducing the search effort. However, little work has dealt with how the heuristics behave under symmetries. We investigate the symmetry properties of existing heuristics and reveal that many of them are invariant under symmetries.\n \n", "year": 2015, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Alexander Shleyfman", "Michael Katz", "M. Helmert", "Silvan Sievers", "Martin Wehrle" ], "externalIds": { "MAG": "2180705168", "DBLP": "conf/aaai/Shleyfman0HSW15", "DOI": "10.1609/aaai.v29i1.9649", "CorpusId": 1963487 }, "url": "https://www.semanticscholar.org/paper/c63ba6a0c676cebc34bfcdd0153b8258f1a53016", "referenceCount": 37, "citationCount": 56, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Planning with Pattern Databases", "abstract": "Heuristic search planning effectively finds solutions for large planning problems, but since the estimates are either not admissible or too weak, optimal solutions are found in rare cases only. In contrast, heuristic pattern databases are known to significantly improve lower bound estimates for optimally solving challenging single-agent problems like the 24-Puzzle or Rubik’s Cube. This paper studies the effect of pattern databases in the context of deterministic planning. Given a fixed state description based on instantiated predicates, we provide a general abstraction scheme to automatically create admissible domain-independent memory-based heuristics for planning problems, where abstractions are found in factorizing the planning space. We evaluate the impact of pattern database heuristics in A* and hill climbing algorithms for a collection of benchmark domains.", "year": 2014, "venue": "", "authors": [ "S. Edelkamp" ], "externalIds": { "MAG": "1575990843", "CorpusId": 1254338 }, "url": "https://www.semanticscholar.org/paper/babe801ed7eaeaa865d76b4caabee7c2c7a6e9a5", "referenceCount": 38, "citationCount": 223, "influentialCitationCount": 16, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Practical graph isomorphism, II", "abstract": null, "year": 2013, "venue": "Journal of symbolic computation", "authors": [ "B. McKay", "A. Piperno" ], "externalIds": { "DBLP": "journals/jsc/McKayP14", "MAG": "2949746408", "ArXiv": "1301.1493", "DOI": "10.1016/J.JSC.2013.09.003", "CorpusId": 17930927 }, "url": "https://www.semanticscholar.org/paper/9f24f15610b487677f3a6d08b59705b5e98d20bf", "referenceCount": 39, "citationCount": 1517, "influentialCitationCount": 108, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Introduction to Bisimulation and Coinduction", "abstract": "Induction is a pervasive tool in computer science and mathematics for defining objects and reasoning on them. Coinduction is the dual of induction and as such it brings in quite different tools. Today, it is widely used in computer science, but also in other fields, including artificial intelligence, cognitive science, mathematics, modal logics, philosophy and physics. The best known instance of coinduction is bisimulation, mainly employed to define and prove equalities among potentially infinite objects: processes, streams, non-well-founded sets, etc. This book presents bisimulation and coinduction: the fundamental concepts and techniques and the duality with induction. Each chapter contains exercises and selected solutions, enabling students to connect theory with practice. A special emphasis is placed on bisimulation as a behavioural equivalence for processes. Thus the book serves as an introduction to models for expressing processes (such as process calculi) and to the associated techniques of operational and algebraic analysis.", "year": 2011, "venue": "", "authors": [ "D. Sangiorgi" ], "externalIds": { "MAG": "369805404", "DOI": "10.1017/cbo9780511777110", "CorpusId": 7025282 }, "url": "https://www.semanticscholar.org/paper/06a03356ca9017bc9e95d57ef5a6760db47d4604", "referenceCount": 3, "citationCount": 307, "influentialCitationCount": 24, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Approximate Policy Iteration with a Policy Language Bias: Solving Relational Markov Decision Processes", "abstract": "We study an approach to policy selection for large relational Markov Decision Processes (MDPs). We consider a variant of approximate policy iteration (API) that replaces the usual value-function learning step with a learning step in policy space. This is advantageous in domains where good policies are easier to represent and learn than the corresponding value functions, which is often the case for the relational MDPs we are interested in. In order to apply API to such problems, we introduce a relational policy language and corresponding learner. In addition, we introduce a new bootstrapping routine for goal-based planning domains, based on random walks. Such bootstrapping is necessary for many large relational MDPs, where reward is extremely sparse, as API is ineffective in such domains when initialized with an uninformed policy. Our experiments show that the resulting system is able to find good policies for a number of classical planning domains and their stochastic variants by solving them as extremely large relational MDPs. The experiments also point to some limitations of our approach, suggesting future work.", "year": 2011, "venue": "Journal of Artificial Intelligence Research", "authors": [ "Alan Fern", "R. Givan", "S. Yoon" ], "externalIds": { "DBLP": "journals/corr/abs-1109-2156", "ArXiv": "1109.2156", "MAG": "2150821861", "DOI": "10.1613/jair.1700", "CorpusId": 265100821 }, "url": "https://www.semanticscholar.org/paper/0f0888750bc0b40dd8b8d07417666ffb1e6d387c", "referenceCount": 120, "citationCount": 56, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Exploiting Problem Symmetries in State-Based Planners", "abstract": "\n \n Previous research in Artificial Intelligence has identified the possibility of simplifying planning problems via the identification and exploitation of symmetries. We advance the state of the art in algorithms that exploit symmetry in planning problems by generalizing previous approaches, and applying symmetry reductions to state-based planners. We suggest several algorithms for symmetry exploitation in state-based search, but also provide a comprehensive view through which additional algorithms can be developed and fine-tuned. We evaluate our approach to symmetry exploitation on instances from previous planning competitions, and demonstrate that our algorithms significantly improve the solution time of instances with symmetries.\n \n", "year": 2011, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Nir Pochter", "Aviv Zohar", "J. Rosenschein" ], "externalIds": { "MAG": "1538383780", "DBLP": "conf/aaai/PochterZR11", "DOI": "10.1609/aaai.v25i1.8014", "CorpusId": 470663 }, "url": "https://www.semanticscholar.org/paper/e05a42b5a6b4b89f297a27d314f8fd12ea63f561", "referenceCount": 16, "citationCount": 83, "influentialCitationCount": 12, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A new representation and associated algorithms for generalized planning", "abstract": null, "year": 2011, "venue": "Artificial Intelligence", "authors": [ "Siddharth Srivastava", "N. Immerman", "S. Zilberstein" ], "externalIds": { "DBLP": "journals/ai/SrivastavaIZ11", "MAG": "2127972144", "DOI": "10.1016/j.artint.2010.10.006", "CorpusId": 2320080 }, "url": "https://www.semanticscholar.org/paper/39730afc957e58e14fab73a5cb616c6f97fe9762", "referenceCount": 40, "citationCount": 88, "influentialCitationCount": 10, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Practical solution techniques for first-order MDPs", "abstract": null, "year": 2009, "venue": "Artificial Intelligence", "authors": [ "S. Sanner", "Craig Boutilier" ], "externalIds": { "MAG": "2089730874", "DBLP": "journals/ai/SannerB09", "DOI": "10.1016/j.artint.2008.11.003", "CorpusId": 10510302 }, "url": "https://www.semanticscholar.org/paper/8defd9a7a432a3be38ea70f7f8abe2762832e0d3", "referenceCount": 95, "citationCount": 104, "influentialCitationCount": 10, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Domain-Independent Construction of Pattern Database Heuristics for Cost-Optimal Planning", "abstract": "Heuristic search is a leading approach to domain-independent planning. For cost-optimal planning, however, existing admissible heuristics are generally too weak to effectively guide the search. Pattern database heuristics (PDBs), which are based on abstractions of the search space, are currently one of the most promising approaches to developing better admissible heuristics. The informedness of PDB heuristics depends crucially on the selection of appropriate abstractions (patterns). Although PDBs have been applied to many search problems, including planning, there are not many insights into how to select good patterns, even manually. What constitutes a good pattern depends on the problem domain, making the task even more difficult for domain-independent planning, where the process needs to be completely automatic and generaL We present a novel way of constructing good patterns automatically from the specification of planning problem instances. We demonstrate that this allows a domain-independent planner to solve planning problems optimally in some very challenging domains, including a STRIPS formulation of the Sokoban puzzle.", "year": 2007, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "P. Haslum", "A. Botea", "M. Helmert", "Blai Bonet", "Sven Koenig" ], "externalIds": { "DBLP": "conf/aaai/HaslumBHBK07", "MAG": "1534467528", "CorpusId": 6936785 }, "url": "https://www.semanticscholar.org/paper/98bdbfa67767d70423c7fb268ac77785347b83dd", "referenceCount": 13, "citationCount": 237, "influentialCitationCount": 31, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "First Order Decision Diagrams for Relational MDPs", "abstract": "Markov decision processes capture sequential decision making under uncertainty, where an agent must choose actions so as to optimize long term reward. The paper studies efficient reasoning mechanisms for Relational Markov Decision Processes (RMDP) where world states have an internal relational structure that can be naturally described in terms of objects and relations among them. Two contributions are presented. First, the paper develops First Order Decision Diagrams (FODD), a new compact representation for functions over relational structures, together with a set of operators to combine FODDs, and novel reduction techniques to keep the representation small. Second, the paper shows how FODDs can be used to develop solutions for RMDPs, where reasoning is performed at the abstract level and the resulting optimal policy is independent of domain size (number of objects) or instantiation. In particular, a variant of the value iteration algorithm is developed by using special operations over FODDs, and the algorithm is shown to converge to the optimal policy.", "year": 2007, "venue": "International Joint Conference on Artificial Intelligence", "authors": [ "Chenggang Wang", "Saket Joshi", "R. Khardon" ], "externalIds": { "DBLP": "journals/jair/WangJK08", "ArXiv": "1111.0068", "MAG": "2950465412", "DOI": "10.1613/jair.2489", "CorpusId": 844868 }, "url": "https://www.semanticscholar.org/paper/0a4ecb6d0cec1f6e77da2d69ac56c59c4f6aff5b", "referenceCount": 37, "citationCount": 91, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Symbolic Dynamic Programming for First-Order MDPs", "abstract": "We present a dynamic programming approach for the solution of first-order Markov decisions processes. This technique uses an MDP whose dynamics is represented in a variant of the situation calculus allowing for stochastic actions. It produces a logical description of the optimal value function and policy by constructing a set of first-order formulae that minimally partition state space according to distinctions made by the value function and policy. This is achieved through the use of an operation known as decision-theoretic regression. In effect, our algorithm performs value iteration without explicit enumeration of either the state or action spaces of the MDP. This allows problems involving relational fluents and quantification to be solved without requiring explicit state space enumeration or conversion to propositional form.", "year": 2001, "venue": "International Joint Conference on Artificial Intelligence", "authors": [ "Craig Boutilier", "R. Reiter", "Bob Price" ], "externalIds": { "MAG": "1800916125", "DBLP": "conf/ijcai/BoutilierRP01", "CorpusId": 14567948 }, "url": "https://www.semanticscholar.org/paper/9d59f87b881017e6ef0198636a77ad04b7ebea7a", "referenceCount": 14, "citationCount": 286, "influentialCitationCount": 48, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Learning Action Strategies for Planning Domains", "abstract": null, "year": 1999, "venue": "Artificial Intelligence", "authors": [ "R. Khardon" ], "externalIds": { "MAG": "1967346767", "DBLP": "journals/ai/Khardon99", "DOI": "10.1016/S0004-3702(99)00060-0", "CorpusId": 12936409 }, "url": "https://www.semanticscholar.org/paper/697993428021172e5c9b32e536568f491ba39e51", "referenceCount": 54, "citationCount": 150, "influentialCitationCount": 9, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "An optimal lower bound on the number of variables for graph identification", "abstract": null, "year": 1992, "venue": "Comb.", "authors": [ "Jin-Yi Cai", "Martin Fürer", "N. Immerman" ], "externalIds": { "DBLP": "journals/combinatorica/CaiFI92", "MAG": "2181072414", "DOI": "10.1007/BF01305232", "CorpusId": 13946071 }, "url": "https://www.semanticscholar.org/paper/262d9a9cd7b99a6008e59d440c8fadf1f375b51e", "referenceCount": 46, "citationCount": 588, "influentialCitationCount": 131, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Lifted Successor Generation by Maximum Clique Enumeration", "abstract": null, "year": 2023, "venue": "European Conference on Artificial Intelligence", "authors": [ "Simon Ståhlberg" ], "externalIds": { "DBLP": "conf/ecai/Stahlberg23", "DOI": "10.3233/FAIA230516", "CorpusId": 264290225 }, "url": "https://www.semanticscholar.org/paper/091a730132bb85741cd7433bbb8c563add4c706b", "referenceCount": 0, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "2022. Learning sketches for decomposing planning problems into subproblems of bounded width", "abstract": null, "year": 2022, "venue": "Proc. ICAPS", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Graph Representation Learning , volume 14 of Synthesis Lectures on Artificial Intelligence", "abstract": null, "year": 2020, "venue": "and", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "A review of generalized planning", "abstract": "Abstract Generalized planning studies the representation, computation and evaluation of solutions that are valid for multiple planning instances. These are topics studied since the early days of AI. However, in recent years, we are experiencing the appearance of novel formalisms to compactly represent generalized planning tasks, the solutions to these tasks (called generalized plans) and efficient algorithms to compute generalized plans. The paper reviews recent advances in generalized planning and relates them to existing planning formalisms, such as planning with domain control knowledge and approaches for planning under uncertainty, that also aim at generality.", "year": 2019, "venue": "Knowledge engineering review (Print)", "authors": [ "Sergio Jiménez Celorrio", "Javier Segovia Aguas", "Anders Jonsson" ], "externalIds": { "DBLP": "journals/ker/CelorrioAJ19", "MAG": "2921844528", "DOI": "10.1017/S0269888918000231", "CorpusId": 53691145 }, "url": "https://www.semanticscholar.org/paper/41f997048470808e915fa949546c96a6af06296c", "referenceCount": 107, "citationCount": 48, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Logical Expressiveness of Graph Neural Networks", "abstract": "Graph Neural Networks (GNNs) [11, 14] are a family of machine learning architectures that has recently become popular for applications dealing with structured data, such as molecule classification and knowledge graph completion [3, 6, 9, 15]. Recent work on the expressive power of GNNs has established a close connection between their ability to classify nodes in a graph and the WeisfeilerLehman (WL) test for checking graph isomorphism [12, 17]. Specifically, the authors of these two papers independently observe that the classifications of nodes produced by the WL test always refines the classification produced by any GNN, and moreover that there are GNNs that can reproduce the WL test. These results establish that GNNs can be as powerful as the WL test for node classification. However, this does not imply that GNNs can express any classifier that is refined by the WL test.", "year": 2019, "venue": "", "authors": [ "P. Barceló", "Egor V. Kostylev", "Mikaël Monet", "Jorge Pérez", "Juan L. Reutter", "J. Silva" ], "externalIds": { "CorpusId": 209513260 }, "url": "https://www.semanticscholar.org/paper/6a7c6e9feab24ccf51ee9e53cbc0b42b9b421a7d", "referenceCount": 14, "citationCount": 208, "influentialCitationCount": 30, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Structural Symmetries of the Lifted Representation of Classical Planning Tasks", "abstract": "We transfer the notion of structural symmetries to lifted planning task representations, based on a generalizing concept of abstract structures we use to model planning tasks. We show that symmetries are preserved by common grounding methods and shed some light on the relation to previous symmetry concepts. An analysis of common planning benchmarks reveals that symmetries occur in the lifted representation of many domains. Our concept prepares the ground for exploiting symmetries beyond their current scope, such as for faster grounding and mutex generation, as well as for state space transformations and state space reductions.", "year": 2017, "venue": "", "authors": [ "Silvan Sievers", "Gabriele Röger", "Martin Wehrle" ], "externalIds": { "CorpusId": 21351384 }, "url": "https://www.semanticscholar.org/paper/649f0aa962d0881889d26e6e8d58fe0d93746f57", "referenceCount": 28, "citationCount": 13, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Improving performance by reformulating PDDL into a bagged representation", "abstract": null, "year": 2016, "venue": "International Conference on Automated Planning and Scheduling", "authors": [ "Patricia J. Riddle", "J. Douglas", "Mike Barley", "Santiago Franco" ], "externalIds": { "MAG": "2916602811", "CorpusId": 86773359 }, "url": "https://www.semanticscholar.org/paper/4a9766b17f58c05f7a10647e9fee5d4f944fc449", "referenceCount": 0, "citationCount": 17, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Merge-and-shrink abstraction: A method for generating lower bounds in factored state spaces", "abstract": null, "year": 2014, "venue": "Journal of the ACM", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "The Graph Neural Network Model", "abstract": "Many underlying relationships among data in several areas of science and engineering, e.g., computer vision, molecular chemistry, molecular biology, pattern recognition, and data mining, can be represented in terms of graphs. In this paper, we propose a new neural network model, called graph neural network (GNN) model, that extends existing neural network methods for processing the data represented in graph domains. This GNN model, which can directly process most of the practically useful types of graphs, e.g., acyclic, cyclic, directed, and undirected, implements a function tau(G,n) isin IRm that maps a graph G and one of its nodes n into an m-dimensional Euclidean space. A supervised learning algorithm is derived to estimate the parameters of the proposed GNN model. The computational cost of the proposed algorithm is also considered. Some experimental results are shown to validate the proposed learning algorithm, and to demonstrate its generalization capabilities.", "year": 2009, "venue": "IEEE Transactions on Neural Networks", "authors": [ "F. Scarselli", "M. Gori", "A. Tsoi", "M. Hagenbuchner", "G. Monfardini" ], "externalIds": { "DBLP": "journals/tnn/ScarselliGTHM09", "MAG": "2116341502", "DOI": "10.1109/TNN.2008.2005605", "CorpusId": 206756462, "PubMed": "19068426" }, "url": "https://www.semanticscholar.org/paper/3efd851140aa28e95221b55fcc5659eea97b172d", "referenceCount": 114, "citationCount": 6458, "influentialCitationCount": 388, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Learning Generalized Policies from Planning Examples Using Concept Languages", "abstract": null, "year": 2004, "venue": "Applied intelligence (Boston)", "authors": [ "Mario Martín", "Hector Geffner" ], "externalIds": { "MAG": "2096600060", "DBLP": "journals/apin/MartinG04", "DOI": "10.1023/B:APIN.0000011138.20292.dd", "CorpusId": 11590117 }, "url": "https://www.semanticscholar.org/paper/02cfef766dadbcc9191e58021dd1490f57163979", "referenceCount": 30, "citationCount": 82, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Proceedings of the Twenty-Second International Joint Conference on Artificial Intelligence Computing Perfect Heuristics in Polynomial Time: On Bisimulation and Merge-and-Shrink Abstraction in Optimal Planning", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": { "CorpusId": 265711650 }, "url": "https://www.semanticscholar.org/paper/f3e269651473227455e345ab7e4e14416db05a39", "referenceCount": 0, "citationCount": 29, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Graph Neural Networks and Graph Kernels For Learning Heuristics: Is there a difference?", "abstract": "Graph neural networks (GNNs) have been used in various works for learning heuristics to guide search for planning. However, they are hindered by their slow evaluation speed and their limited expressiveness. It is also a known fact that the expressiveness of common GNNs is bounded by the Weisfeiler-Lehman (WL) algorithm for testing graph isomorphism, and for generating features for graphs. Thus, one may ask how do GNNs compare against machine learning models operating on WL features of planning problems represented as graphs? Our experiments show that linear models with WL features outpeform GNN models for learning heuristics for planning in the learning track of the 2023 International Planning Competition (IPC). Most notably, our model WL-GOOSE is the first model in the learning for planning literature which can reliably learn heuristics from scratch that are competitive with h FF on problem sizes much larger than those seen in the training set.", "year": null, "venue": "", "authors": [ "Dillon Z. Chen", "Felipe W. Trevizan", "Sylvie Thiébaux" ], "externalIds": { "CorpusId": 267313471 }, "url": "https://www.semanticscholar.org/paper/9a42125f6a70927368c2ecd60019d1d44def1f36", "referenceCount": 36, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "for any P in Q , any reachable transition ( s, s ′ ) in P , and any reachable state t in P with t ∼ s , there is a transition ( t, t ′ ) in P such that ( s, s ′ ) ∼ ( t, t ′ ) , and", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2023. Goose: Learning domain-independent heuristics", "abstract": null, "year": null, "venue": "NeurIPS 2023 Workshop on Generalization in Planning", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2024. Generalized planning in PDDL domains with pretrained large language models", "abstract": null, "year": null, "venue": "Proc. AAAI", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2024 Code and data for the paper titled “symmetries and expressive requirements for learning general policies”", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "if s ∼ t for reachable states s and t in P , then s is a goal state iff t is", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "GraphGI:A GNN Explanation Method using Game Interaction": { "paper_title": "GraphGI:A GNN Explanation Method using Game Interaction", "arxiv_id": "2409.15698v1", "keyword": "graph neural network", "authors": [ "Xingping Xian", "Jianlu Liu", "Tao Wu", "Lin Yuan", "Chao Wang", "Baiyun Chen" ], "references": [ { "title": "GNNShap: Scalable and Accurate GNN Explanation using Shapley Values", "abstract": "Graph neural networks (GNNs) are popular machine learning models for graphs with many applications across scientific domains. However, GNNs are considered black box models, and it is challenging to understand how the model makes predictions. Game theoric Shapley value approaches are popular explanation methods in other domains but are not well-studied for graphs. Some studies have proposed Shapley value based GNN explanations, yet they have several limitations: they consider limited samples to approximate Shapley values; some mainly focus on small and large coalition sizes, and they are an order of magnitude slower than other explanation methods, making them inapplicable to even moderate-size graphs. In this work, we propose GNNShap, which provides explanations for edges since they provide more natural explanations for graphs and more fine-grained explanations. We overcome the limitations by sampling from all coalition sizes, parallelizing the sampling on GPUs, and speeding up model predictions by batching. GNNShap gives better fidelity scores and faster explanations than baselines on real-world datasets. The code is available at https://github.com/HipGraph/GNNShap.", "year": 2024, "venue": "The Web Conference", "authors": [ "Selahattin Akkas", "Ariful Azad" ], "externalIds": { "DBLP": "conf/www/AkkasA24", "ArXiv": "2401.04829", "DOI": "10.1145/3589334.3645599", "CorpusId": 266902676 }, "url": "https://www.semanticscholar.org/paper/676cee2be7f18a13e0d09472a9f7aaf3e0a1f25d", "referenceCount": 52, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DAG Matters! GFlowNets Enhanced Explainer For Graph Neural Networks", "abstract": "Uncovering rationales behind predictions of graph neural networks (GNNs) has received increasing attention over the years. Existing literature mainly focus on selecting a subgraph, through combinatorial optimization, to provide faithful explanations. However, the exponential size of candidate subgraphs limits the applicability of state-of-the-art methods to large-scale GNNs. We enhance on this through a different approach: by proposing a generative structure -- GFlowNets-based GNN Explainer (GFlowExplainer), we turn the optimization problem into a step-by-step generative problem. Our GFlowExplainer aims to learn a policy that generates a distribution of subgraphs for which the probability of a subgraph is proportional to its' reward. The proposed approach eliminates the influence of node sequence and thus does not need any pre-training strategies. We also propose a new cut vertex matrix to efficiently explore parent states for GFlowNets structure, thus making our approach applicable in a large-scale setting. We conduct extensive experiments on both synthetic and real datasets, and both qualitative and quantitative results show the superiority of our GFlowExplainer.", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Wenqian Li", "Yinchuan Li", "Zhigang Li", "Jianye Hao", "Yan Pang" ], "externalIds": { "ArXiv": "2303.02448", "DBLP": "conf/iclr/LiLLHP23", "DOI": "10.48550/arXiv.2303.02448", "CorpusId": 257365860 }, "url": "https://www.semanticscholar.org/paper/2fd0e6aaccee819a880a55d9190700c6b754d32d", "referenceCount": 40, "citationCount": 24, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "EdgeSHAPer: Bond-centric Shapley value-based explanation method for graph neural networks", "abstract": null, "year": 2022, "venue": "iScience", "authors": [ "A. Mastropietro", "G. Pasculli", "Christian Feldmann", "Raquel Rodríguez-Pérez", "J. Bajorath" ], "externalIds": { "PubMedCentral": "9483788", "DOI": "10.1016/j.isci.2022.105043", "CorpusId": 251970917, "PubMed": "36134335" }, "url": "https://www.semanticscholar.org/paper/e3e081e97a6d3b4c12b8129e4c53966ddcf62c0c", "referenceCount": 53, "citationCount": 19, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "FlowX: Towards Explainable Graph Neural Networks via Message Flows", "abstract": "We investigate the explainability of graph neural networks (GNNs) as a step toward elucidating their working mechanisms. While most current methods focus on explaining graph nodes, edges, or features, we argue that, as the inherent functional mechanism of GNNs, message flows are more natural for performing explainability. To this end, we propose a novel method here, known as FlowX, to explain GNNs by identifying important message flows. To quantify the importance of flows, we propose to follow the philosophy of Shapley values from cooperative game theory. To tackle the complexity of computing all coalitions’ marginal contributions, we propose a flow sampling scheme to compute Shapley value approximations as initial assessments of further training. We then propose an information-controlled learning algorithm to train flow scores toward diverse explanation targets: necessary or sufficient explanations. Experimental studies on both synthetic and real-world datasets demonstrate that our proposed FlowX and its variants lead to improved explainability of GNNs.", "year": 2022, "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", "authors": [ "Shurui Gui", "Hao Yuan", "Jie Wang", "Qicheng Lao", "Kang Li", "Shuiwang Ji" ], "externalIds": { "DBLP": "journals/corr/abs-2206-12987", "ArXiv": "2206.12987", "DOI": "10.1109/TPAMI.2023.3347470", "CorpusId": 250072908, "PubMed": "38147422" }, "url": "https://www.semanticscholar.org/paper/421b495c2379182b8874ce91af5ab1121d356834", "referenceCount": 71, "citationCount": 8, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "The Shapley Value in Machine Learning", "abstract": "Over the last few years, the Shapley value, a solution concept from cooperative game theory, has found numerous applications in machine learning. In this paper, we first discuss fundamental concepts of cooperative game theory and axiomatic properties of the Shapley value. Then we give an overview of the most important applications of the Shapley value in machine learning: feature selection, explainability, multi-agent reinforcement learning, ensemble pruning, and data valuation. We examine the most crucial limitations of the Shapley value and point out directions for future research.", "year": 2022, "venue": "International Joint Conference on Artificial Intelligence", "authors": [ "Benedek Rozemberczki", "Lauren Watson", "Péter Bayer", "Hao-Tsung Yang", "Oliver Kiss", "Sebastian Nilsson", "Rik Sarkar" ], "externalIds": { "DBLP": "journals/corr/abs-2202-05594", "ArXiv": "2202.05594", "DOI": "10.24963/ijcai.2022/778", "CorpusId": 246822765 }, "url": "https://www.semanticscholar.org/paper/09c72d9d46f6750e487afdb5f7cae7693ffccc10", "referenceCount": 83, "citationCount": 146, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "GStarX: Explaining Graph Neural Networks with Structure-Aware Cooperative Games", "abstract": "Explaining machine learning models is an important and increasingly popular area of research interest. The Shapley value from game theory has been proposed as a prime approach to compute feature importance towards model predictions on images, text, tabular data, and recently graph neural networks (GNNs) on graphs. In this work, we revisit the appropriateness of the Shapley value for GNN explanation, where the task is to identify the most important subgraph and constituent nodes for GNN predictions. We claim that the Shapley value is a non-ideal choice for graph data because it is by definition not structure-aware. We propose a Graph Structure-aware eXplanation (GStarX) method to leverage the critical graph structure information to improve the explanation. Specifically, we define a scoring function based on a new structure-aware value from the cooperative game theory proposed by Hamiache and Navarro (HN). When used to score node importance, the HN value utilizes graph structures to attribute cooperation surplus between neighbor nodes, resembling message passing in GNNs, so that node importance scores reflect not only the node feature importance, but also the node structural roles. We demonstrate that GStarX produces qualitatively more intuitive explanations, and quantitatively improves explanation fidelity over strong baselines on chemical graph property prediction and text graph sentiment classification.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Shichang Zhang", "Yozen Liu", "Neil Shah", "Yizhou Sun" ], "externalIds": { "ArXiv": "2201.12380", "DBLP": "conf/nips/ZhangLSS22", "CorpusId": 248987580 }, "url": "https://www.semanticscholar.org/paper/bb608cf0cfef9103f14f7d326be36e999dc88af5", "referenceCount": 49, "citationCount": 30, "influentialCitationCount": 6, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Interpreting Multivariate Shapley Interactions in DNNs", "abstract": "This paper aims to explain deep neural networks (DNNs) from the perspective of multivariate interactions. In this paper, we define and quantify the significance of interactions among multiple input variables of the DNN. Input variables with strong interactions usually form a coalition and reflect prototype features, which are memorized and used by the DNN for inference. We define the significance of interactions based on the Shapley value, which is designed to assign the attribution value of each input variable to the inference. We have conducted experiments with various DNNs. Experimental results have demonstrated the effectiveness of the proposed method.", "year": 2021, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Hao Zhang", "Yichen Xie", "Longjie Zheng", "Die Zhang", "Quanshi Zhang" ], "externalIds": { "DBLP": "conf/aaai/0063XZZZ21", "DOI": "10.1609/aaai.v35i12.17299", "CorpusId": 235348979 }, "url": "https://www.semanticscholar.org/paper/c996d77b109899ce22429ba02d7d59c74885349f", "referenceCount": 46, "citationCount": 55, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "GraphSVX: Shapley Value Explanations for Graph Neural Networks", "abstract": null, "year": 2021, "venue": "ECML/PKDD", "authors": [ "Alexandre Duval", "Fragkiskos D. Malliaros" ], "externalIds": { "DBLP": "conf/pkdd/DuvalM21", "ArXiv": "2104.10482", "DOI": "10.1007/978-3-030-86520-7_19", "CorpusId": 233324152 }, "url": "https://www.semanticscholar.org/paper/6c26f638f0b16244f634e163562594993b06ec9b", "referenceCount": 50, "citationCount": 67, "influentialCitationCount": 15, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Generative Causal Explanations for Graph Neural Networks", "abstract": "This paper presents Gem, a model-agnostic approach for providing interpretable explanations for any GNNs on various graph learning tasks. Specifically, we formulate the problem of providing explanations for the decisions of GNNs as a causal learning task. Then we train a causal explanation model equipped with a loss function based on Granger causality. Different from existing explainers for GNNs, Gem explains GNNs on graph-structured data from a causal perspective. It has better generalization ability as it has no requirements on the internal structure of the GNNs or prior knowledge on the graph learning tasks. In addition, Gem, once trained, can be used to explain the target GNN very quickly. Our theoretical analysis shows that several recent explainers fall into a unified framework of additive feature attribution methods. Experimental results on synthetic and real-world datasets show that Gem achieves a relative increase of the explanation accuracy by up to $30\\%$ and speeds up the explanation process by up to $110\\times$ as compared to its state-of-the-art alternatives.", "year": 2021, "venue": "International Conference on Machine Learning", "authors": [ "Wanyu Lin", "Hao Lan", "Baochun Li" ], "externalIds": { "ArXiv": "2104.06643", "DBLP": "conf/icml/LinLL21", "CorpusId": 233231366 }, "url": "https://www.semanticscholar.org/paper/2a5a8db41940990dc8fe8e7717ed85ba043204e1", "referenceCount": 35, "citationCount": 140, "influentialCitationCount": 19, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "On Explainability of Graph Neural Networks via Subgraph Explorations", "abstract": "We consider the problem of explaining the predictions of graph neural networks (GNNs), which otherwise are considered as black boxes. Existing methods invariably focus on explaining the importance of graph nodes or edges but ignore the substructures of graphs, which are more intuitive and human-intelligible. In this work, we propose a novel method, known as SubgraphX, to explain GNNs by identifying important subgraphs. Given a trained GNN model and an input graph, our SubgraphX explains its predictions by efficiently exploring different subgraphs with Monte Carlo tree search. To make the tree search more effective, we propose to use Shapley values as a measure of subgraph importance, which can also capture the interactions among different subgraphs. To expedite computations, we propose efficient approximation schemes to compute Shapley values for graph data. Our work represents the first attempt to explain GNNs via identifying subgraphs explicitly and directly. Experimental results show that our SubgraphX achieves significantly improved explanations, while keeping computations at a reasonable level.", "year": 2021, "venue": "International Conference on Machine Learning", "authors": [ "Hao Yuan", "Haiyang Yu", "Jie Wang", "Kang Li", "Shuiwang Ji" ], "externalIds": { "ArXiv": "2102.05152", "DBLP": "journals/corr/abs-2102-05152", "CorpusId": 231861768 }, "url": "https://www.semanticscholar.org/paper/123139463809b5acf98b95d4c8e958be334a32b5", "referenceCount": 52, "citationCount": 311, "influentialCitationCount": 51, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Explainability in Graph Neural Networks: A Taxonomic Survey", "abstract": "Deep learning methods are achieving ever-increasing performance on many artificial intelligence tasks. A major limitation of deep models is that they are not amenable to interpretability. This limitation can be circumvented by developing post hoc techniques to explain predictions, giving rise to the area of explainability. Recently, explainability of deep models on images and texts has achieved significant progress. In the area of graph data, graph neural networks (GNNs) and their explainability are experiencing rapid developments. However, there is neither a unified treatment of GNN explainability methods, nor a standard benchmark and testbed for evaluations. In this survey, we provide a unified and taxonomic view of current GNN explainability methods. Our unified and taxonomic treatments of this subject shed lights on the commonalities and differences of existing methods and set the stage for further methodological developments. To facilitate evaluations, we provide a testbed for GNN explainability, including datasets, common algorithms and evaluation metrics. Furthermore, we conduct comprehensive experiments to compare and analyze the performance of many techniques. Altogether, this work provides a unified methodological treatment of GNN explainability and a standardized testbed for evaluations.", "year": 2020, "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", "authors": [ "Hao Yuan", "Haiyang Yu", "Shurui Gui", "Shuiwang Ji" ], "externalIds": { "ArXiv": "2012.15445", "DBLP": "journals/corr/abs-2012-15445", "DOI": "10.1109/TPAMI.2022.3204236", "CorpusId": 229923402, "PubMed": "36063508" }, "url": "https://www.semanticscholar.org/paper/14f0ee2594c550de7fb5e590b322bcb1bcec8061", "referenceCount": 95, "citationCount": 486, "influentialCitationCount": 68, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Parameterized Explainer for Graph Neural Network", "abstract": "Despite recent progress in Graph Neural Networks (GNNs), explaining predictions made by GNNs remains a challenging open problem. The leading method independently addresses the local explanations (i.e., important subgraph structure and node features) to interpret why a GNN model makes the prediction for a single instance, e.g. a node or a graph. As a result, the explanation generated is painstakingly customized for each instance. The unique explanation interpreting each instance independently is not sufficient to provide a global understanding of the learned GNN model, leading to a lack of generalizability and hindering it from being used in the inductive setting. Besides, as it is designed for explaining a single instance, it is challenging to explain a set of instances naturally (e.g., graphs of a given class). In this study, we address these key challenges and propose PGExplainer, a parameterized explainer for GNNs. PGExplainer adopts a deep neural network to parameterize the generation process of explanations, which enables PGExplainer a natural approach to explaining multiple instances collectively. Compared to the existing work, PGExplainer has better generalization ability and can be utilized in an inductive setting easily. Experiments on both synthetic and real-life datasets show highly competitive performance with up to 24.7\\% relative improvement in AUC on explaining graph classification over the leading baseline.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Dongsheng Luo", "Wei Cheng", "Dongkuan Xu", "Wenchao Yu", "Bo Zong", "Haifeng Chen", "Xiang Zhang" ], "externalIds": { "DBLP": "conf/nips/LuoCXYZC020", "ArXiv": "2011.04573", "MAG": "3104818889", "CorpusId": 226281363 }, "url": "https://www.semanticscholar.org/paper/d9f5ec342df97e060b527a8bc18ae4e97401f246", "referenceCount": 55, "citationCount": 440, "influentialCitationCount": 128, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "PGM-Explainer: Probabilistic Graphical Model Explanations for Graph Neural Networks", "abstract": "In Graph Neural Networks (GNNs), the graph structure is incorporated into the learning of node representations. This complex structure makes explaining GNNs' predictions become much more challenging. In this paper, we propose PGM-Explainer, a Probabilistic Graphical Model (PGM) model-agnostic explainer for GNNs. Given a prediction to be explained, PGM-Explainer identifies crucial graph components and generates an explanation in form of a PGM approximating that prediction. Different from existing explainers for GNNs where the explanations are drawn from a set of linear functions of explained features, PGM-Explainer is able to demonstrate the dependencies of explained features in form of conditional probabilities. Our theoretical analysis shows that the PGM generated by PGM-Explainer includes the Markov-blanket of the target prediction, i.e. including all its statistical information. We also show that the explanation returned by PGM-Explainer contains the same set of independence statements in the perfect map. Our experiments on both synthetic and real-world datasets show that PGM-Explainer achieves better performance than existing explainers in many benchmark tasks.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Minh N. Vu", "M. Thai" ], "externalIds": { "DBLP": "conf/nips/VuT20", "MAG": "3093206758", "ArXiv": "2010.05788", "CorpusId": 222290499 }, "url": "https://www.semanticscholar.org/paper/a8ae2d8232db04d88cf622e5fabd11da3163aa8f", "referenceCount": 43, "citationCount": 271, "influentialCitationCount": 35, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Interpreting Graph Neural Networks for NLP With Differentiable Edge Masking", "abstract": "Graph neural networks (GNNs) have become a popular approach to integrating structural inductive biases into NLP models. However, there has been little work on interpreting them, and specifically on understanding which parts of the graphs (e.g. syntactic trees or co-reference structures) contribute to a prediction. In this work, we introduce a post-hoc method for interpreting the predictions of GNNs which identifies unnecessary edges. Given a trained GNN model, we learn a simple classifier that, for every edge in every layer, predicts if that edge can be dropped. We demonstrate that such a classifier can be trained in a fully differentiable fashion, employing stochastic gates and encouraging sparsity through the expected $L_0$ norm. We use our technique as an attribution method to analyze GNN models for two tasks -- question answering and semantic role labeling -- providing insights into the information flow in these models. We show that we can drop a large proportion of edges without deteriorating the performance of the model, while we can analyse the remaining edges for interpreting model predictions.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "M. Schlichtkrull", "Nicola De Cao", "Ivan Titov" ], "externalIds": { "ArXiv": "2010.00577", "DBLP": "journals/corr/abs-2010-00577", "MAG": "3091534907", "CorpusId": 222090060 }, "url": "https://www.semanticscholar.org/paper/c30c0092bf4eb8a44faec3fc60cdd5006276bcdc", "referenceCount": 75, "citationCount": 189, "influentialCitationCount": 14, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Higher-Order Explanations of Graph Neural Networks via Relevant Walks", "abstract": "Graph Neural Networks (GNNs) are a popular approach for predicting graph structured data. As GNNs tightly entangle the input graph into the neural network structure, common explainable AI approaches are not applicable. To a large extent, GNNs have remained black-boxes for the user so far. In this paper, we show that GNNs can in fact be naturally explained using higher-order expansions, i.e., by identifying groups of edges that jointly contribute to the prediction. Practically, we find that such explanations can be extracted using a nested attribution scheme, where existing techniques such as layer-wise relevance propagation (LRP) can be applied at each step. The output is a collection of walks into the input graph that are relevant for the prediction. Our novel explanation method, which we denote by GNN-LRP, is applicable to a broad range of graph neural networks and lets us extract practically relevant insights on sentiment analysis of text data, structure-property relationships in quantum chemistry, and image classification.", "year": 2020, "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", "authors": [ "Thomas Schnake", "Oliver Eberle", "Jonas Lederer", "Shinichi Nakajima", "Kristof T. Schutt", "Klaus-Robert Muller", "G. Montavon" ], "externalIds": { "ArXiv": "2006.03589", "DBLP": "journals/pami/SchnakeELNSMM22", "MAG": "3108823960", "DOI": "10.1109/TPAMI.2021.3115452", "CorpusId": 227225626, "PubMed": "34559639" }, "url": "https://www.semanticscholar.org/paper/9e707dd89bba25a3dd22c96f43bd72b9b3ab94bb", "referenceCount": 89, "citationCount": 175, "influentialCitationCount": 10, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science", "Mathematics" ] }, { "title": "XGNN: Towards Model-Level Explanations of Graph Neural Networks", "abstract": "Graphs neural networks (GNNs) learn node features by aggregating and combining neighbor information, which have achieved promising performance on many graph tasks. However, GNNs are mostly treated as black-boxes and lack human intelligible explanations. Thus, they cannot be fully trusted and used in certain application domains if GNN models cannot be explained. In this work, we propose a novel approach, known as XGNN, to interpret GNNs at the model-level. Our approach can provide high-level insights and generic understanding of how GNNs work. In particular, we propose to explain GNNs by training a graph generator so that the generated graph patterns maximize a certain prediction of the model. We formulate the graph generation as a reinforcement learning task, where for each step, the graph generator predicts how to add an edge into the current graph. The graph generator is trained via a policy gradient method based on information from the trained GNNs. In addition, we incorporate several graph rules to encourage the generated graphs to be valid. Experimental results on both synthetic and real-world datasets show that our proposed methods help understand and verify the trained GNNs. Furthermore, our experimental results indicate that the generated graphs can provide guidance on how to improve the trained GNNs.", "year": 2020, "venue": "Knowledge Discovery and Data Mining", "authors": [ "Haonan Yuan", "Jiliang Tang", "Xia Hu", "Shuiwang Ji" ], "externalIds": { "DBLP": "conf/kdd/YuanTHJ20", "MAG": "3033892090", "ArXiv": "2006.02587", "DOI": "10.1145/3394486.3403085", "CorpusId": 219305237 }, "url": "https://www.semanticscholar.org/paper/75c8466a0c1c3b9fe595efc83671984ef95bd679", "referenceCount": 49, "citationCount": 332, "influentialCitationCount": 31, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "RelEx: A Model-Agnostic Relational Model Explainer", "abstract": "In recent years, considerable progress has been made on improving the interpretability of machine learning models. This is essential, as complex deep learning models with millions of parameters produce state of the art performance, but it can be nearly impossible to explain their predictions. While various explainability techniques have achieved impressive results, nearly all of them assume each data instance to be independent and identically distributed (iid). This excludes relational models, such as Statistical Relational Learning (SRL), and the recently popular Graph Neural Networks (GNNs), resulting in few options to explain them. While there does exist work on explaining GNNs, GNN-Explainer, they assume access to the gradients of the model to learn explanations, which is restrictive in terms of its applicability across non-differentiable relational models and practicality. In this work, we develop RelEx, amodel-agnostic relational explainer to explain black-box relational models with only access to the outputs of the black-box. RelEx is able to explain any relational model, including SRL models and GNNs. We compare RelEx to the state-of-the-art relational explainer, GNN-Explainer, and relational extensions of iid explanation models and show that RelEx achieves comparable or better performance, while remaining model-agnostic.", "year": 2020, "venue": "AAAI/ACM Conference on AI, Ethics, and Society", "authors": [ "Yue Zhang", "David DeFazio", "Arti Ramesh" ], "externalIds": { "DBLP": "journals/corr/abs-2006-00305", "MAG": "3032116791", "ArXiv": "2006.00305", "DOI": "10.1145/3461702.3462562", "CorpusId": 219177300 }, "url": "https://www.semanticscholar.org/paper/7fd293b1984cac5900434ea796bbe74e05e6623a", "referenceCount": 28, "citationCount": 84, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "GraphLIME: Local Interpretable Model Explanations for Graph Neural Networks", "abstract": "Recently, graph neural networks (GNN) were shown to be successful in effectively representing graph structured data because of their good performance and generalization ability. However, explaining the effectiveness of GNN models is a challenging task because of the complex nonlinear transformations made over the iterations. In this paper, we propose GraphLIME, a local interpretable model explanation for graphs using the Hilbert-Schmidt Independence Criterion (HSIC) Lasso, which is a nonlinear feature selection method. GraphLIME is a generic GNN-model explanation framework that learns a nonlinear interpretable model locally in the subgraph of the node being explained. Through experiments on two real-world datasets, the explanations of GraphLIME are found to be of extraordinary degree and more descriptive in comparison to the existing explanation methods.", "year": 2020, "venue": "IEEE Transactions on Knowledge and Data Engineering", "authors": [ "Q. Huang", "M. Yamada", "Yuan Tian", "Dinesh Singh", "Dawei Yin", "Yi Chang" ], "externalIds": { "MAG": "3000120900", "DBLP": "journals/tkde/HuangYTSC23", "ArXiv": "2001.06216", "DOI": "10.1109/TKDE.2022.3187455", "CorpusId": 210714016 }, "url": "https://www.semanticscholar.org/paper/8819adc0f064d5293bd2b783a63e23f68109738a", "referenceCount": 36, "citationCount": 283, "influentialCitationCount": 20, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Explainability Methods for Graph Convolutional Neural Networks", "abstract": "With the growing use of graph convolutional neural networks (GCNNs) comes the need for explainability. In this paper, we introduce explainability methods for GCNNs. We develop the graph analogues of three prominent explainability methods for convolutional neural networks: contrastive gradient-based (CG) saliency maps, Class Activation Mapping (CAM), and Excitation Back-Propagation (EB) and their variants, gradient-weighted CAM (Grad-CAM) and contrastive EB (c-EB). We show a proof-of-concept of these methods on classification problems in two application domains: visual scene graphs and molecular graphs. To compare the methods, we identify three desirable properties of explanations: (1) their importance to classification, as measured by the impact of occlusions, (2) their contrastivity with respect to different classes, and (3) their sparseness on a graph. We call the corresponding quantitative metrics fidelity, contrastivity, and sparsity and evaluate them for each method. Lastly, we analyze the salient subgraphs obtained from explanations and report frequently occurring patterns.", "year": 2019, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Phillip E. Pope", "Soheil Kolouri", "Mohammad Rostami", "Charles E. Martin", "Heiko Hoffmann" ], "externalIds": { "DBLP": "conf/cvpr/PopeKRMH19", "MAG": "2979481854", "DOI": "10.1109/CVPR.2019.01103", "CorpusId": 198904065 }, "url": "https://www.semanticscholar.org/paper/cb2d9b2f171da67f7b47ac3e0eb935a0de223354", "referenceCount": 41, "citationCount": 396, "influentialCitationCount": 48, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Explainability Techniques for Graph Convolutional Networks", "abstract": "Graph Networks are used to make decisions in potentially complex scenarios but it is usually not obvious how or why they made them. In this work, we study the explainability of Graph Network decisions using two main classes of techniques, gradient-based and decomposition-based, on a toy dataset and a chemistry task. Our study sets the ground for future development as well as application to real-world problems.", "year": 2019, "venue": "International Conference on Machine Learning", "authors": [ "Federico Baldassarre", "Hossein Azizpour" ], "externalIds": { "DBLP": "journals/corr/abs-1905-13686", "MAG": "3034371431", "ArXiv": "1905.13686", "CorpusId": 173188615 }, "url": "https://www.semanticscholar.org/paper/8fb202cdcfec3b0e7ba0e3f88949d6d923b48b2d", "referenceCount": 48, "citationCount": 232, "influentialCitationCount": 30, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Data Shapley: Equitable Valuation of Data for Machine Learning", "abstract": "As data becomes the fuel driving technological and economic growth, a fundamental challenge is how to quantify the value of data in algorithmic predictions and decisions. For example, in healthcare and consumer markets, it has been suggested that individuals should be compensated for the data that they generate, but it is not clear what is an equitable valuation for individual data. In this work, we develop a principled framework to address data valuation in the context of supervised machine learning. Given a learning algorithm trained on $n$ data points to produce a predictor, we propose data Shapley as a metric to quantify the value of each training datum to the predictor performance. Data Shapley value uniquely satisfies several natural properties of equitable data valuation. We develop Monte Carlo and gradient-based methods to efficiently estimate data Shapley values in practical settings where complex learning algorithms, including neural networks, are trained on large datasets. In addition to being equitable, extensive experiments across biomedical, image and synthetic data demonstrate that data Shapley has several other benefits: 1) it is more powerful than the popular leave-one-out or leverage score in providing insight on what data is more valuable for a given learning task; 2) low Shapley value data effectively capture outliers and corruptions; 3) high Shapley value data inform what type of new data to acquire to improve the predictor.", "year": 2019, "venue": "International Conference on Machine Learning", "authors": [ "Amirata Ghorbani", "James Y. Zou" ], "externalIds": { "MAG": "2939984132", "ArXiv": "1904.02868", "DBLP": "conf/icml/GhorbaniZ19", "CorpusId": 102350503 }, "url": "https://www.semanticscholar.org/paper/b7a717233ec3ff37385ab1b06816d0ca375f5bb3", "referenceCount": 50, "citationCount": 620, "influentialCitationCount": 123, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "GNNExplainer: Generating Explanations for Graph Neural Networks", "abstract": "Graph Neural Networks (GNNs) are a powerful tool for machine learning on graphs. GNNs combine node feature information with the graph structure by recursively passing neural messages along edges of the input graph. However, incorporating both graph structure and feature information leads to complex models and explaining predictions made by GNNs remains unsolved. Here we propose GnnExplainer, the first general, model-agnostic approach for providing interpretable explanations for predictions of any GNN-based model on any graph-based machine learning task. Given an instance, GnnExplainer identifies a compact subgraph structure and a small subset of node features that have a crucial role in GNN's prediction. Further, GnnExplainer can generate consistent and concise explanations for an entire class of instances. We formulate GnnExplainer as an optimization task that maximizes the mutual information between a GNN's prediction and distribution of possible subgraph structures. Experiments on synthetic and real-world graphs show that our approach can identify important graph structures as well as node features, and outperforms alternative baseline approaches by up to 43.0% in explanation accuracy. GnnExplainer provides a variety of benefits, from the ability to visualize semantically relevant structures to interpretability, to giving insights into errors of faulty GNNs.", "year": 2019, "venue": "Neural Information Processing Systems", "authors": [ "Rex Ying", "Dylan Bourgeois", "Jiaxuan You", "M. Zitnik", "J. Leskovec" ], "externalIds": { "DBLP": "conf/nips/YingBYZL19", "MAG": "2970119293", "ArXiv": "1903.03894", "CorpusId": 202572927, "PubMed": "32265580" }, "url": "https://www.semanticscholar.org/paper/00358a3f17821476d93461192b9229fe7d92bb3f", "referenceCount": 62, "citationCount": 1070, "influentialCitationCount": 247, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Medicine", "Mathematics" ] }, { "title": "How Powerful are Graph Neural Networks?", "abstract": "Graph Neural Networks (GNNs) are an effective framework for representation learning of graphs. GNNs follow a neighborhood aggregation scheme, where the representation vector of a node is computed by recursively aggregating and transforming representation vectors of its neighboring nodes. Many GNN variants have been proposed and have achieved state-of-the-art results on both node and graph classification tasks. However, despite GNNs revolutionizing graph representation learning, there is limited understanding of their representational properties and limitations. Here, we present a theoretical framework for analyzing the expressive power of GNNs to capture different graph structures. Our results characterize the discriminative power of popular GNN variants, such as Graph Convolutional Networks and GraphSAGE, and show that they cannot learn to distinguish certain simple graph structures. We then develop a simple architecture that is provably the most expressive among the class of GNNs and is as powerful as the Weisfeiler-Lehman graph isomorphism test. We empirically validate our theoretical findings on a number of graph classification benchmarks, and demonstrate that our model achieves state-of-the-art performance.", "year": 2018, "venue": "International Conference on Learning Representations", "authors": [ "Keyulu Xu", "Weihua Hu", "J. Leskovec", "S. Jegelka" ], "externalIds": { "MAG": "2950468517", "ArXiv": "1810.00826", "DBLP": "journals/corr/abs-1810-00826", "CorpusId": 52895589 }, "url": "https://www.semanticscholar.org/paper/62ed9bf1d83c8db1f9cbf92ea2f57ea90ef683d9", "referenceCount": 45, "citationCount": 6421, "influentialCitationCount": 1375, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Semi-Supervised Classification with Graph Convolutional Networks", "abstract": "We present a scalable approach for semi-supervised learning on graph-structured data that is based on an efficient variant of convolutional neural networks which operate directly on graphs. We motivate the choice of our convolutional architecture via a localized first-order approximation of spectral graph convolutions. Our model scales linearly in the number of graph edges and learns hidden layer representations that encode both local graph structure and features of nodes. In a number of experiments on citation networks and on a knowledge graph dataset we demonstrate that our approach outperforms related methods by a significant margin.", "year": 2016, "venue": "International Conference on Learning Representations", "authors": [ "Thomas Kipf", "M. Welling" ], "externalIds": { "ArXiv": "1609.02907", "MAG": "2519887557", "DBLP": "journals/corr/KipfW16", "CorpusId": 3144218 }, "url": "https://www.semanticscholar.org/paper/36eff562f65125511b5dfab68ce7f7a943c27478", "referenceCount": 38, "citationCount": 25291, "influentialCitationCount": 6216, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Revisiting Semi-Supervised Learning with Graph Embeddings", "abstract": "We present a semi-supervised learning framework based on graph embeddings. Given a graph between instances, we train an embedding for each instance to jointly predict the class label and the neighborhood context in the graph. We develop both transductive and inductive variants of our method. In the transductive variant of our method, the class labels are determined by both the learned embeddings and input feature vectors, while in the inductive variant, the embeddings are defined as a parametric function of the feature vectors, so predictions can be made on instances not seen during training. On a large and diverse set of benchmark tasks, including text classification, distantly supervised entity extraction, and entity classification, we show improved performance over many of the existing models.", "year": 2016, "venue": "International Conference on Machine Learning", "authors": [ "Zhilin Yang", "William W. Cohen", "R. Salakhutdinov" ], "externalIds": { "MAG": "2315403234", "ArXiv": "1603.08861", "DBLP": "journals/corr/YangCS16", "CorpusId": 7008752 }, "url": "https://www.semanticscholar.org/paper/3d846cb01f6a975554035d2210b578ca61344b22", "referenceCount": 28, "citationCount": 1846, "influentialCitationCount": 274, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "An axiomatic approach to the concept of interaction among players in cooperative games", "abstract": null, "year": 1999, "venue": "International Journal of Game Theory", "authors": [ "M. Grabisch", "M. Roubens" ], "externalIds": { "MAG": "2009662043", "DBLP": "journals/ijgt/GrabischR99", "DOI": "10.1007/s001820050125", "CorpusId": 18033890 }, "url": "https://www.semanticscholar.org/paper/4c75cbc9847e6b0a870487bb03315e48f3734372", "referenceCount": 16, "citationCount": 316, "influentialCitationCount": 49, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Notes on the n-Person Game — II: The Value of an n-Person Game", "abstract": null, "year": 1951, "venue": "", "authors": [ "L. Shapley" ], "externalIds": { "MAG": "3092079550", "CorpusId": 232708946 }, "url": "https://www.semanticscholar.org/paper/6039f16196d665ebdf5972660e4fdce88391c697", "referenceCount": 0, "citationCount": 201, "influentialCitationCount": 12, "isOpenAccess": false, "fieldsOfStudy": [ "Economics" ] } ] }, "MotifDisco: Motif Causal Discovery For Time Series Motifs": { "paper_title": "MotifDisco: Motif Causal Discovery For Time Series Motifs", "arxiv_id": "2409.15219v1", "keyword": "graph neural network", "authors": [ "Josephine Lamp", "Mark Derdzinski", "Christopher Hannemann", "Sam Hatfield", "Joost van der Linden" ], "references": [ { "title": "A multi-head attention neural network with non-linear correlation approach for time series causal discovery", "abstract": null, "year": 2024, "venue": "Applied Soft Computing", "authors": [ "Nicolás Irribarra", "Kevin Michell", "Cristhian Bermeo", "Werner D. Kristjanpoller" ], "externalIds": { "DBLP": "journals/asc/IrribarraMBK24", "DOI": "10.1016/j.asoc.2024.112062", "CorpusId": 271760761 }, "url": "https://www.semanticscholar.org/paper/9778e87b9d6f9b8f4419be138394216fcfdbfd08", "referenceCount": 30, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Motif-Consistent Counterfactuals with Adversarial Refinement for Graph-Level Anomaly Detection", "abstract": "Graph-level anomaly detection is significant in diverse domains. To improve detection performance, counterfactual graphs have been exploited to benefit the generalization capacity by learning causal relations. Most existing studies directly introduce perturbations (e.g., flipping edges) to generate counterfactual graphs, which are prone to alter the semantics of generated examples and make them off the data manifold, resulting in sub-optimal performance. To address these issues, we propose a novel approach, Motif-consistent Counterfactuals with Adversarial Refinement (MotifCAR), for graph-level anomaly detection. The model combines the motif of one graph, the core subgraph containing the identification (category) information, and the contextual subgraph (non-motif) of another graph to produce a raw counterfactual graph. However, the produced raw graph might be distorted and cannot satisfy the important counterfactual properties: Realism, Validity, Proximity and Sparsity. Towards that, we present a Generative Adversarial Network (GAN)-based graph optimizer to refine the raw counterfactual graphs. It adopts the discriminator to guide the generator to generate graphs close to realistic data, i.e., meet the property Realism. Further, we design the motif consistency to force the motif of the generated graphs to be consistent with the realistic graphs, meeting the property Validity. Also, we devise the contextual loss and connection loss to control the contextual subgraph and the newly added links to meet the properties Proximity and Sparsity. As a result, the model can generate high-quality counterfactual graphs. Experiments demonstrate the superiority of MotifCAR.", "year": 2024, "venue": "Knowledge Discovery and Data Mining", "authors": [ "Chunjing Xiao", "Shikang Pang", "Wenxin Tai", "Yanlong Huang", "Goce Trajcevski", "Fan Zhou" ], "externalIds": { "DBLP": "conf/kdd/XiaoPTHT024", "ArXiv": "2407.13251", "DOI": "10.1145/3637528.3672050", "CorpusId": 271270284 }, "url": "https://www.semanticscholar.org/paper/14094e050d56af1d64eba88a2c8d071ac460cb1d", "referenceCount": 52, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Comprehensive Review and Empirical Evaluation of Causal Discovery Algorithms for Numerical Data", "abstract": "Causal analysis has become an essential component in understanding the underlying causes of phenomena across various fields. Despite its significance, existing literature on causal discovery algorithms is fragmented, with inconsistent methodologies, i.e., there is no universal classification standard for existing methods, and a lack of comprehensive evaluations, i.e., data characteristics are often ignored to be jointly analyzed when benchmarking algorithms. This study addresses these gaps by conducting an exhaustive review and empirical evaluation for causal discovery methods on numerical data, aiming to provide a clearer and more structured understanding of the field. Our research begins with a comprehensive literature review spanning over two decades, analyzing over 200 academic articles and identifying more than 40 representative algorithms. This extensive analysis leads to the development of a structured taxonomy tailored to the complexities of causal discovery, categorizing methods into six main types. To address the lack of comprehensive evaluations, our study conducts an extensive empirical assessment of 29 causal discovery algorithms on multiple synthetic and real-world datasets. We categorize synthetic datasets based on size, linearity, and noise distribution, employing five evaluation metrics, and summarize the top-3 algorithm recommendations, providing guidelines for users in various data scenarios. Our results highlight a significant impact of dataset characteristics on algorithm performance. Moreover, a metadata extraction strategy with an accuracy exceeding 80% is developed to assist users in algorithm selection on unknown datasets. Based on these insights, we offer professional and practical guidelines to help users choose the most suitable causal discovery methods for their specific dataset.", "year": 2024, "venue": "arXiv.org", "authors": [ "Wenjin Niu", "Zijun Gao", "Liyan Song", "Lingbo Li" ], "externalIds": { "DBLP": "journals/corr/abs-2407-13054", "ArXiv": "2407.13054", "DOI": "10.48550/arXiv.2407.13054", "CorpusId": 271270079 }, "url": "https://www.semanticscholar.org/paper/216707b13007e5e93e5a1ae5767f76bef2fef817", "referenceCount": 0, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Chronos: Learning the Language of Time Series", "abstract": "We introduce Chronos, a simple yet effective framework for pretrained probabilistic time series models. Chronos tokenizes time series values using scaling and quantization into a fixed vocabulary and trains existing transformer-based language model architectures on these tokenized time series via the cross-entropy loss. We pretrained Chronos models based on the T5 family (ranging from 20M to 710M parameters) on a large collection of publicly available datasets, complemented by a synthetic dataset that we generated via Gaussian processes to improve generalization. In a comprehensive benchmark consisting of 42 datasets, and comprising both classical local models and deep learning methods, we show that Chronos models: (a) significantly outperform other methods on datasets that were part of the training corpus; and (b) have comparable and occasionally superior zero-shot performance on new datasets, relative to methods that were trained specifically on them. Our results demonstrate that Chronos models can leverage time series data from diverse domains to improve zero-shot accuracy on unseen forecasting tasks, positioning pretrained models as a viable tool to greatly simplify forecasting pipelines.", "year": 2024, "venue": "arXiv.org", "authors": [ "Abdul Fatir Ansari", "Lorenzo Stella", "Caner Turkmen", "Xiyuan Zhang", "Pedro Mercado", "Huibin Shen", "Oleksandr Shchur", "Syama Sundar Rangapuram", "Sebastian Pineda Arango", "Shubham Kapoor", "Jasper Zschiegner", "Danielle C. Maddix", "Michael W. Mahoney", "Kari Torkkola", "Andrew Gordon Wilson", "Michael Bohlke-Schneider", "Yuyang Wang" ], "externalIds": { "ArXiv": "2403.07815", "DBLP": "journals/corr/abs-2403-07815", "DOI": "10.48550/arXiv.2403.07815", "CorpusId": 268363551 }, "url": "https://www.semanticscholar.org/paper/4fb78450650894091b0a55b5504a8b0a6f3dec37", "referenceCount": 0, "citationCount": 36, "influentialCitationCount": 7, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "EffCause: Discover Dynamic Causal Relationships Efficiently from Time-Series", "abstract": "Since the proposal of Granger causality, many researchers have followed the idea and developed extensions to the original algorithm. The classic Granger causality test aims to detect the existence of the static causal relationship. Notably, a fundamental assumption underlying most previous studies is the stationarity of causality, which requires the causality between variables to keep stable. However, this study argues that it is easy to break in real-world scenarios. Fortunately, our paper presents an essential observation: if we consider a sufficiently short window when discovering the rapidly changing causalities, they will keep approximately static and thus can be detected using the static way correctly. In light of this, we develop EffCause, bringing dynamics into classic Granger causality. Specifically, to efficiently examine the causalities on different sliding window lengths, we design two optimization schemes in EffCause and demonstrate the advantage of EffCause through extensive experiments on both simulated and real-world datasets. The results validate that EffCause achieves state-of-the-art accuracy in continuous causal discovery tasks while achieving faster computation. Case studies from cloud system failure analysis and traffic flow monitoring show that EffCause effectively helps us understand real-world time-series data and solve practical problems.", "year": 2024, "venue": "ACM Transactions on Knowledge Discovery from Data", "authors": [ "Yicheng Pan", "Yifan Zhang", "Xinrui Jiang", "Meng Ma", "Ping Wang" ], "externalIds": { "DBLP": "journals/tkdd/PanZJMW24", "DOI": "10.1145/3640818", "CorpusId": 266985329 }, "url": "https://www.semanticscholar.org/paper/56456b989942383191c8fb0878df4da29f7d4b1c", "referenceCount": 19, "citationCount": 1, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Framework for Variable-lag Motif Following Relation Inference In Time Series using Matrix Profile analysis", "abstract": "Knowing who follows whom and what patterns they are following are crucial steps to understand collective behaviors (e.g. a group of human, a school of fish, or a stock market). Time series is one of resources that can be used to get insight regarding following relations. However, the concept of following patterns or motifs and the solution to find them in time series are not obvious. In this work, we formalize a concept of following motifs between two time series and present a framework to infer following patterns between two time series. The framework utilizes one of efficient and scalable methods to retrieve motifs from time series called the Matrix Profile Method. We compare our proposed framework with several baselines. The framework performs better than baselines in the simulation datasets. In the dataset of sound recording, the framework is able to retrieve the following motifs within a pair of time series that two singers sing following each other. In the cryptocurrency dataset, the framework is capable of capturing the following motifs within a pair of time series from two digital currencies, which implies that the values of one currency follow the values of another currency patterns. Our framework can be utilized in any field of time series to get insight regarding following patterns between time series.", "year": 2024, "venue": "arXiv.org", "authors": [ "Naaek Chinpattanakarn", "Chainarong Amornbunchornvej" ], "externalIds": { "DBLP": "journals/corr/abs-2401-02860", "ArXiv": "2401.02860", "DOI": "10.48550/arXiv.2401.02860", "CorpusId": 266818524 }, "url": "https://www.semanticscholar.org/paper/ea48bce3ca3d6027767f2a6c21adefee45c80015", "referenceCount": 24, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Entropy Causal Graphs for Multivariate Time Series Anomaly Detection", "abstract": "Many multivariate time series anomaly detection frameworks have been proposed and widely applied. However, most of these frameworks do not consider intrinsic relationships between variables in multivariate time series data, thus ignoring the causal relationship among variables and degrading anomaly detection performance. This work proposes a novel framework called CGAD, an entropy Causal Graph for multivariate time series Anomaly Detection. CGAD utilizes transfer entropy to construct graph structures that unveil the underlying causal relationships among time series data. Weighted graph convolutional networks combined with causal convolutions are employed to model both the causal graph structures and the temporal patterns within multivariate time series data. Furthermore, CGAD applies anomaly scoring, leveraging median absolute deviation-based normalization to improve the robustness of the anomaly identification process. Extensive experiments demonstrate that CGAD outperforms state-of-the-art methods on real-world datasets with a 15% average improvement based on three different multivariate time series anomaly detection metrics.", "year": 2023, "venue": "arXiv.org", "authors": [ "F. Febrinanto", "Kristen Moore", "Chandra Thapa", "Mujie Liu", "Vidya Saikrishna", "Jiangang Ma", "Feng Xia" ], "externalIds": { "DBLP": "journals/corr/abs-2312-09478", "ArXiv": "2312.09478", "DOI": "10.48550/arXiv.2312.09478", "CorpusId": 266335763 }, "url": "https://www.semanticscholar.org/paper/374376e2184f669322f842166a729f3bf54a22c7", "referenceCount": 60, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "TempME: Towards the Explainability of Temporal Graph Neural Networks via Motif Discovery", "abstract": "Temporal graphs are widely used to model dynamic systems with time-varying interactions. In real-world scenarios, the underlying mechanisms of generating future interactions in dynamic systems are typically governed by a set of recurring substructures within the graph, known as temporal motifs. Despite the success and prevalence of current temporal graph neural networks (TGNN), it remains uncertain which temporal motifs are recognized as the significant indications that trigger a certain prediction from the model, which is a critical challenge for advancing the explainability and trustworthiness of current TGNNs. To address this challenge, we propose a novel approach, called Temporal Motifs Explainer (TempME), which uncovers the most pivotal temporal motifs guiding the prediction of TGNNs. Derived from the information bottleneck principle, TempME extracts the most interaction-related motifs while minimizing the amount of contained information to preserve the sparsity and succinctness of the explanation. Events in the explanations generated by TempME are verified to be more spatiotemporally correlated than those of existing approaches, providing more understandable insights. Extensive experiments validate the superiority of TempME, with up to 8.21% increase in terms of explanation accuracy across six real-world datasets and up to 22.96% increase in boosting the prediction Average Precision of current TGNNs.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Jialin Chen", "Rex Ying" ], "externalIds": { "DBLP": "journals/corr/abs-2310-19324", "ArXiv": "2310.19324", "DOI": "10.48550/arXiv.2310.19324", "CorpusId": 264829176 }, "url": "https://www.semanticscholar.org/paper/b40863427d4d2f5440ece176f4b8e6cec75b7d6e", "referenceCount": 77, "citationCount": 7, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Causal Feature Selection via Transfer Entropy", "abstract": "Machine learning algorithms are designed to capture complex relationships between features. In this context, the high dimensionality of data often results in poor model performance, with the risk of overfitting. Feature selection, the process of selecting a subset of relevant and non-redundant features, is an essential step to mitigate these issues. However, classical feature selection approaches do not inspect the causal relationship between features and the target variable, which can lead to misleading results in real-world applications. Causal discovery, instead, aims to identify causal relationships between features with observational data. In this paper, we propose a novel methodology at the intersection between feature selection and causal discovery, focusing on time series. We introduce a causal feature selection approach that relies on the forward and backward feature selection procedures and leverages transfer entropy to estimate the causal flow of information. In this context, we provide theoretical guarantees on the regression and classification errors for both the exact and the finite-sample cases. Finally, we present numerical validations on synthetic and real-world regression problems, showing results competitive w.r.t. the considered baselines.", "year": 2023, "venue": "IEEE International Joint Conference on Neural Network", "authors": [ "Paolo Bonetti", "A. Metelli", "Marcello Restelli" ], "externalIds": { "ArXiv": "2310.11059", "DBLP": "conf/ijcnn/BonettiMR24", "DOI": "10.1109/IJCNN60899.2024.10651028", "CorpusId": 264172738 }, "url": "https://www.semanticscholar.org/paper/c3f78f96ed67d1337ad527657f0514e746d66aff", "referenceCount": 46, "citationCount": 4, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Entropy-Aware Time-Varying Graph Neural Networks with Generalized Temporal Hawkes Process: Dynamic Link Prediction in the Presence of Node Addition and Deletion", "abstract": "This paper addresses the problem of learning temporal graph representations, which capture the changing nature of complex evolving networks. Existing approaches mainly focus on adding new nodes and edges to capture dynamic graph structures. However, to achieve more accurate representation of graph evolution, we consider both the addition and deletion of nodes and edges as events. These events occur at irregular time scales and are modeled using temporal point processes. Our goal is to learn the conditional intensity function of the temporal point process to investigate the influence of deletion events on node representation learning for link-level prediction. We incorporate network entropy, a measure of node and edge significance, to capture the effect of node deletion and edge removal in our framework. Additionally, we leveraged the characteristics of a generalized temporal Hawkes process, which considers the inhibitory effects of events where past occurrences can reduce future intensity. This framework enables dynamic representation learning by effectively modeling both addition and deletion events in the temporal graph. To evaluate our approach, we utilize autonomous system graphs, a family of inhomogeneous sparse graphs with instances of node and edge additions and deletions, in a link prediction task. By integrating these enhancements into our framework, we improve the accuracy of dynamic link prediction and enable better understanding of the dynamic evolution of complex networks.", "year": 2023, "venue": "Machine Learning and Knowledge Extraction", "authors": [ "Bahareh Najafi", "S. Parsaeefard", "Alberto Leon-Garcia" ], "externalIds": { "DBLP": "journals/make/NajafiPL23", "DOI": "10.3390/make5040069", "CorpusId": 263694832 }, "url": "https://www.semanticscholar.org/paper/da43521ac2d6549e60ff224dfd4b7996dee5fc14", "referenceCount": 0, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Survey on Causal Discovery Methods for I.I.D. and Time Series Data", "abstract": "The ability to understand causality from data is one of the major milestones of human-level intelligence. Causal Discovery (CD) algorithms can identify the cause-effect relationships among the variables of a system from related observational data with certain assumptions. Over the years, several methods have been developed primarily based on the statistical properties of data to uncover the underlying causal mechanism. In this study, we present an extensive discussion on the methods designed to perform causal discovery from both independent and identically distributed (I.I.D.) data and time series data. For this purpose, we first introduce the common terminologies used in causal discovery literature and then provide a comprehensive discussion of the algorithms designed to identify causal relations in different settings. We further discuss some of the benchmark datasets available for evaluating the algorithmic performance, off-the-shelf tools or software packages to perform causal discovery readily, and the common metrics used to evaluate these methods. We also evaluate some widely used causal discovery algorithms on multiple benchmark datasets and compare their performances. Finally, we conclude by discussing the research challenges and the applications of causal discovery algorithms in multiple areas of interest.", "year": 2023, "venue": "Trans. Mach. Learn. Res.", "authors": [ "Uzma Hasan", "Emam Hossain", "Md. Osman Gani" ], "externalIds": { "ArXiv": "2303.15027", "DBLP": "journals/tmlr/HasanHG23", "CorpusId": 257766636 }, "url": "https://www.semanticscholar.org/paper/164358180c300fb4d45a3b156ef2746ce91128f0", "referenceCount": 182, "citationCount": 17, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Exploring The Impact of Motif-Driven Causal Temporal Analysis Using Graph Neural Network in Improving Large Language Model Performance for Pharmacovigilance", "abstract": "Pharmacovigilance analytics can lower the probability of negative outcomes when choosing a route of therapy by identifying pharmacological adverse effects in the past. Despite the abundance of resources such as medical reports, review websites, online platforms, etc. available for adverse signal detection, it has proven challenging to manually extract the pertinent data and relate the medicine to its side effects both when there is the prevalence of mass adverse effect from the same drug in various documents and areas. We can quickly identify the gap and propose a causal temporal motif-driven large language model to identify and extract the causal association between the medication and the events based on but not limited to the commencement of the event and ignoring the prior reaction which may be due to some other reason revealing is challenging and the current state of the art still relies on Named Entity Recognition (NER) approaches. These NER-based approaches also experience cold start issues, in which new side effects remain isolated and are not connected to the drug from which they originated. We propose an approach that uses a causal temporal graph neural network including motif embedding using local linear embedding under Centrality constrain followed by a motif-based similarity search for finding similar subgraph and causal relationships between the drug and the adverse events.", "year": 2023, "venue": "2023 9th International Conference on Advanced Computing and Communication Systems (ICACCS)", "authors": [ "Aparna Kalla", "Sudipta Mukhopadhyay", "Zonunfeli Ralte", "Indrajit Kar" ], "externalIds": { "DOI": "10.1109/ICACCS57279.2023.10112876", "CorpusId": 258510522 }, "url": "https://www.semanticscholar.org/paper/a0d99d016c362fb51050c5b11e9053ac88850414", "referenceCount": 29, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Causal Discovery from Temporal Data: An Overview and New Perspectives", "abstract": "Temporal data, representing chronological observations of complex systems, has always been a typical data structure that can be widely generated by many domains, such as industry, medicine and finance. Analyzing this type of data is extremely valuable for various applications. Thus, different temporal data analysis tasks, eg, classification, clustering and prediction, have been proposed in the past decades. Among them, causal discovery, learning the causal relations from temporal data, is considered an interesting yet critical task and has attracted much research attention. Existing causal discovery works can be divided into two highly correlated categories according to whether the temporal data is calibrated, ie, multivariate time series causal discovery, and event sequence causal discovery. However, most previous surveys are only focused on the time series causal discovery and ignore the second category. In this paper, we specify the correlation between the two categories and provide a systematical overview of existing solutions. Furthermore, we provide public datasets, evaluation metrics and new perspectives for temporal data causal discovery.", "year": 2023, "venue": "arXiv.org", "authors": [ "Chang Gong", "Di Yao", "Chuzhe Zhang", "Wenbin Li", "Jingping Bi" ], "externalIds": { "ArXiv": "2303.10112", "DBLP": "journals/corr/abs-2303-10112", "DOI": "10.48550/arXiv.2303.10112", "CorpusId": 257622788 }, "url": "https://www.semanticscholar.org/paper/78a763ba54911697670150a565c626930b3e3557", "referenceCount": 317, "citationCount": 9, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "GlucoSynth: Generating Differentially-Private Synthetic Glucose Traces", "abstract": "We focus on the problem of generating high-quality, private synthetic glucose traces, a task generalizable to many other time series sources. Existing methods for time series data synthesis, such as those using Generative Adversarial Networks (GANs), are not able to capture the innate characteristics of glucose data and cannot provide any formal privacy guarantees without severely degrading the utility of the synthetic data. In this paper we present GlucoSynth, a novel privacy-preserving GAN framework to generate synthetic glucose traces. The core intuition behind our approach is to conserve relationships amongst motifs (glucose events) within the traces, in addition to temporal dynamics. Our framework incorporates differential privacy mechanisms to provide strong formal privacy guarantees. We provide a comprehensive evaluation on the real-world utility of the data using 1.2 million glucose traces; GlucoSynth outperforms all previous methods in its ability to generate high-quality synthetic glucose traces with strong privacy guarantees.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Josephine Lamp", "M. Derdzinski", "Christopher Hannemann", "Joost van der Linden", "Lu Feng", "Tianhao Wang", "David Evans" ], "externalIds": { "ArXiv": "2303.01621", "DBLP": "conf/nips/LampDHL00E23", "DOI": "10.48550/arXiv.2303.01621", "CorpusId": 257353812 }, "url": "https://www.semanticscholar.org/paper/3ed9c6c7f5ee8bc8386a59f034321ed6974020b1", "referenceCount": 40, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Motiflets - Simple and Accurate Detection of Motifs in Time Series", "abstract": "\n A time series motif intuitively is a short time series that repeats itself approximately the same within a larger time series. Such motifs often represent concealed structures, such as heart beats in an ECG recording, the riff in a pop song, or sleep spindles in EEG sleep data. Motif discovery (MD) is the task of finding such motifs in a given input series. As there are varying definitions of what exactly a motif is, a number of different algorithms exist. As central parameters they all take the length\n l\n of the motif and the maximal distance\n r\n between the motif's occurrences. In practice, however, especially suitable values for\n r\n are very hard to determine upfront, and found motifs show a high variability even for very similar\n r\n values. Accordingly, finding an interesting motif with these methods requires extensive trial-and-error.\n \n \n In this paper, we present a different approach to the MD problem. We define\n k\n -Motiflets as the set of exactly\n k\n occurrences of a motif of length\n l\n , whose maximum pairwise distance is minimal. This turns the MD problem upside-down: The central parameter of our approach is not the distance threshold\n r\n , but the desired number of occurrence\n k\n of the motif, which we show is considerably more intuitive and easier to set. Based on this definition, we present exact and approximate algorithms for finding\n k\n -Motiflets and analyze their complexity. To further ease the use of our method, we describe statistical tools to automatically determine meaningful values for its input parameters. Thus, for the first time, extracting meaningful motif sets without any a-priori knowledge becomes feasible. By evaluation on several real-world data sets and comparison to four state-of-the-art MD algorithms, we show that our proposed algorithm is both quantitatively superior to its competitors, finding larger motif sets at higher similarity, and qualitatively better, leading to clearer and easier to interpret motifs without any need for manual tuning.\n", "year": 2022, "venue": "Proceedings of the VLDB Endowment", "authors": [ "Patrick Schäfer", "U. Leser" ], "externalIds": { "DBLP": "journals/pvldb/0001L22", "ArXiv": "2206.03735", "DOI": "10.14778/3574245.3574257", "CorpusId": 255596771 }, "url": "https://www.semanticscholar.org/paper/8c76b3f0eac900f4d04e25a24680b36db37d44ea", "referenceCount": 36, "citationCount": 11, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Causal Inference in Time Series in Terms of Rényi Transfer Entropy", "abstract": "Uncovering causal interdependencies from observational data is one of the great challenges of a nonlinear time series analysis. In this paper, we discuss this topic with the help of an information-theoretic concept known as Rényi’s information measure. In particular, we tackle the directional information flow between bivariate time series in terms of Rényi’s transfer entropy. We show that by choosing Rényi’s parameter α, we can appropriately control information that is transferred only between selected parts of the underlying distributions. This, in turn, is a particularly potent tool for quantifying causal interdependencies in time series, where the knowledge of “black swan” events, such as spikes or sudden jumps, are of key importance. In this connection, we first prove that for Gaussian variables, Granger causality and Rényi transfer entropy are entirely equivalent. Moreover, we also partially extend these results to heavy-tailed α-Gaussian variables. These results allow establishing a connection between autoregressive and Rényi entropy-based information-theoretic approaches to data-driven causal inference. To aid our intuition, we employed the Leonenko et al. entropy estimator and analyzed Rényi’s information flow between bivariate time series generated from two unidirectionally coupled Rössler systems. Notably, we find that Rényi’s transfer entropy not only allows us to detect a threshold of synchronization but it also provides non-trivial insight into the structure of a transient regime that exists between the region of chaotic correlations and synchronization threshold. In addition, from Rényi’s transfer entropy, we could reliably infer the direction of coupling and, hence, causality, only for coupling strengths smaller than the onset value of the transient regime, i.e., when two Rössler systems are coupled but have not yet entered synchronization.", "year": 2022, "venue": "Entropy", "authors": [ "P. Jizba", "H. Lavička", "Zlata Tabachová" ], "externalIds": { "PubMedCentral": "9321760", "DBLP": "journals/entropy/JizbaLT22", "ArXiv": "2203.11407", "DOI": "10.3390/e24070855", "CorpusId": 247596993, "PubMed": "35885081" }, "url": "https://www.semanticscholar.org/paper/0d7ef5bd1f4ffd0f74b8ec04c9272d5cd4b924ef", "referenceCount": 88, "citationCount": 8, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Physics", "Mathematics", "Medicine" ] }, { "title": "Survey and Evaluation of Causal Discovery Methods for Time Series", "abstract": "We introduce in this survey the major concepts, models, and algorithms proposed so far to infer causal relations from observational time series, a task usually referred to as causal discovery in time series. To do so, after a description of the underlying concepts and modelling assumptions, we present different methods according to the family of approaches they belong to: Granger causality, constraint-based approaches, noise-based approaches, score-based approaches, logic-based approaches, topology-based approaches, and difference-based approaches. We then evaluate several representative methods to illustrate the behaviour of different families of approaches. This illustration is conducted on both artificial and real datasets, with different characteristics. The main conclusions one can draw from this survey is that causal discovery in times series is an active research field in which new methods (in every family of approaches) are regularly proposed, and that no family or method stands out in all situations. Indeed, they all rely on assumptions that may or may not be appropriate for a particular dataset.", "year": 2022, "venue": "Journal of Artificial Intelligence Research", "authors": [ "Charles K. Assaad", "Emilie Devijver", "Éric Gaussier" ], "externalIds": { "DBLP": "conf/ijcai/AssaadDG23", "DOI": "10.1613/jair.1.13428", "CorpusId": 247213389 }, "url": "https://www.semanticscholar.org/paper/9847da8e939e49cc32ab944ae10f99a2030415b0", "referenceCount": 150, "citationCount": 87, "influentialCitationCount": 8, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MotifExplainer: a Motif-based Graph Neural Network Explainer", "abstract": "We consider the explanation problem of Graph Neural Networks (GNNs). Most existing GNN explanation methods identify the most important edges or nodes but fail to consider substructures, which are more important for graph data. The only method that considers subgraphs tries to search all possible subgraphs and identify the most significant subgraphs. However, the subgraphs identified may not be recurrent or statistically important. In this work, we propose a novel method, known as MotifExplainer, to explain GNNs by identifying important motifs, recurrent and statistically significant patterns in graphs. Our proposed motif-based methods can provide better human-understandable explanations than methods based on nodes, edges, and regular subgraphs. Given an input graph and a pre-trained GNN model, our method first extracts motifs in the graph using well-designed motif extraction rules. Then we generate motif embedding by feeding motifs into the pre-trained GNN. Finally, we employ an attention-based method to identify the most influential motifs as explanations for the final prediction results. The empirical studies on both synthetic and real-world datasets demonstrate the effectiveness of our method.", "year": 2022, "venue": "arXiv.org", "authors": [ "Zhaoning Yu", "Hongyang Gao" ], "externalIds": { "DBLP": "journals/corr/abs-2202-00519", "ArXiv": "2202.00519", "CorpusId": 246442194 }, "url": "https://www.semanticscholar.org/paper/2d23a740ade86345d53bceda3326952be8659f28", "referenceCount": 47, "citationCount": 13, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Multi-Scale Adaptive Graph Neural Network for Multivariate Time Series Forecasting", "abstract": "Multivariate time series (MTS) forecasting plays an important role in the automation and optimization of intelligent applications. It is a challenging task, as we need to consider both complex intra-variable dependencies and inter-variable dependencies. Existing works only learn temporal patterns with the help of single inter-variable dependencies. However, there are multi-scale temporal patterns in many real-world MTS. Single inter-variable dependencies make the model prefer to learn one type of prominent and shared temporal patterns. In this article, we propose a multi-scale adaptive graph neural network (MAGNN) to address the above issue. MAGNN exploits a multi-scale pyramid network to preserve the underlying temporal dependencies at different time scales. Since the inter-variable dependencies may be different under distinct time scales, an adaptive graph learning module is designed to infer the scale-specific inter-variable dependencies without pre-defined priors. Given the multi-scale feature representations and scale-specific inter-variable dependencies, a multi-scale temporal graph neural network is introduced to jointly model intra-variable dependencies and inter-variable dependencies. After that, we develop a scale-wise fusion module to effectively promote the collaboration across different time scales, and automatically capture the importance of contributed temporal patterns. Experiments on six real-world datasets demonstrate that MAGNN outperforms the state-of-the-art methods across various settings.", "year": 2022, "venue": "IEEE Transactions on Knowledge and Data Engineering", "authors": [ "Ling Chen", "Donghui Chen", "Zongjiang Shang", "Binqing Wu", "Cen Zheng", "Bo Wen", "Wei Zhang" ], "externalIds": { "DBLP": "journals/tkde/ChenCSWZWZ23", "ArXiv": "2201.04828", "DOI": "10.1109/TKDE.2023.3268199", "CorpusId": 245906234 }, "url": "https://www.semanticscholar.org/paper/7e05147e9d207a6bf0d38274d1f9497a17e43f46", "referenceCount": 48, "citationCount": 27, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Motif Graph Neural Network", "abstract": "Graphs can model complicated interactions between entities, which naturally emerge in many important applications. These applications can often be cast into standard graph learning tasks, in which a crucial step is to learn low-dimensional graph representations. Graph neural networks (GNNs) are currently the most popular model in graph embedding approaches. However, standard GNNs in the neighborhood aggregation paradigm suffer from limited discriminative power in distinguishing high-order graph structures as opposed to low-order structures. To capture high-order structures, researchers have resorted to motifs and developed motif-based GNNs. However, the existing motif-based GNNs still often suffer from less discriminative power on high-order structures. To overcome the above limitations, we propose motif GNN (MGNN), a novel framework to better capture high-order structures, hinging on our proposed motif redundancy minimization operator and injective motif combination. First, MGNN produces a set of node representations with respect to each motif. The next phase is our proposed redundancy minimization among motifs which compares the motifs with each other and distills the features unique to each motif. Finally, MGNN performs the updating of node representations by combining multiple representations from different motifs. In particular, to enhance the discriminative power, MGNN uses an injective function to combine the representations with respect to different motifs. We further show that our proposed architecture increases the expressive power of GNNs with a theoretical analysis. We demonstrate that MGNN outperforms state-of-the-art methods on seven public benchmarks on both the node classification and graph classification tasks.", "year": 2021, "venue": "IEEE Transactions on Neural Networks and Learning Systems", "authors": [ "Xuexin Chen", "Ruichu Cai", "Yuan Fang", "Min Wu", "Zijian Li", "Z. Hao" ], "externalIds": { "ArXiv": "2112.14900", "DBLP": "journals/corr/abs-2112-14900", "DOI": "10.1109/TNNLS.2023.3281716", "CorpusId": 245634314, "PubMed": "37335782" }, "url": "https://www.semanticscholar.org/paper/4d7edd08c2c78dbe3d1af40ccaf5ef26e4c4b6c4", "referenceCount": 89, "citationCount": 11, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Event2Graph: Event-driven Bipartite Graph for Multivariate Time-series Anomaly Detection", "abstract": "Modeling inter-dependencies between time-series is the key to achieve high performance in anomaly detection for multivariate time-series data. The de-facto solution to model the dependencies is to feed the data into a recurrent neural network (RNN). However, the fully connected network structure underneath the RNN (either GRU or LSTM) assumes a static and complete dependency graph between time-series, which may not hold in many real-world applications. To alleviate this assumption, we propose a dynamic bipartite graph structure to encode the inter-dependencies between time-series. More concretely, we model time series as one type of nodes, and the time series segments (regarded as event) as another type of nodes, where the edge between two types of nodes describe a temporal pattern occurred on a specific time series at a certain time. Based on this design, relations between time series can be explicitly modelled via dynamic connections to event nodes, and the multivariate time-series anomaly detection problem can be formulated as a self-supervised, edge stream prediction problem in dynamic graphs. We conducted extensive experiments to demonstrate the effectiveness of the design.", "year": 2021, "venue": "arXiv.org", "authors": [ "Yuhang Wu", "Mengting Gu", "Lan Wang", "Yusan Lin", "Fei Wang", "Hao Yang" ], "externalIds": { "DBLP": "journals/corr/abs-2108-06783", "ArXiv": "2108.06783", "CorpusId": 237091642 }, "url": "https://www.semanticscholar.org/paper/f5b78f7fd6505a4774756792c940e4ca1bd2a39b", "referenceCount": 31, "citationCount": 8, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Granger Causality: A Review and Recent Advances", "abstract": "Introduced more than a half-century ago, Granger causality has become a popular tool for analyzing time series data in many application domains, from economics and finance to genomics and neuroscience. Despite this popularity, the validity of this framework for inferring causal relationships among time series has remained the topic of continuous debate. Moreover, while the original definition was general, limitations in computational tools have constrained the applications of Granger causality to primarily simple bivariate vector autoregressive processes. Starting with a review of early developments and debates, this article discusses recent advances that address various shortcomings of the earlier approaches, from models for high-dimensional time series to more recent developments that account for nonlinear and non-Gaussian observations and allow for subsampled and mixed-frequency time series.", "year": 2021, "venue": "Annual Review of Statistics and Its Application", "authors": [ "A. Shojaie", "E. Fox" ], "externalIds": { "ArXiv": "2105.02675", "DBLP": "journals/corr/abs-2105-02675", "DOI": "10.1146/annurev-statistics-040120-010930", "CorpusId": 233864585, "PubMed": "37840549" }, "url": "https://www.semanticscholar.org/paper/45c0ddc2d4d789a6634212646681be389b298bb7", "referenceCount": 135, "citationCount": 155, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics", "Medicine" ] }, { "title": "Disentangled Motif-aware Graph Learning for Phrase Grounding", "abstract": "In this paper, we propose a novel graph learning framework for phrase grounding in the image. Developing from the sequential to the dense graph model, existing works capture coarse-grained context but fail to distinguish the diversity of context among phrases and image regions. In contrast, we pay special attention to different motifs implied in the context of the scene graph and devise the disentangled graph network to integrate the motif-aware contextual information into representations. Besides, we adopt interventional strategies at the feature and the structure levels to consolidate and generalize representations. Finally, the cross-modal attention network is utilized to fuse intra-modal features, where each phrase can be computed similarity with regions to select the best-grounded one. We validate the efficiency of disentangled and interventional graph network (DIGN) through a series of ablation studies, and our model achieves state-of-the-art performance on Flickr30K Entities and ReferIt Game benchmarks.", "year": 2021, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Zongshen Mu", "Siliang Tang", "Jie Tan", "Qiang Yu", "Yueting Zhuang" ], "externalIds": { "DBLP": "journals/corr/abs-2104-06008", "ArXiv": "2104.06008", "DOI": "10.1609/aaai.v35i15.17602", "CorpusId": 233219515 }, "url": "https://www.semanticscholar.org/paper/a83c406324897951013a4128b11b5714bef1f7c5", "referenceCount": 42, "citationCount": 32, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Real-World Evidence and Glycemic Improvement Using Dexcom G6 Features", "abstract": "Background: Optional features of continuous glucose monitoring (CGM) systems empower patients and caregivers to understand and manage diabetes in new ways. We examined associations between use of optional features, demographics, and glycemic outcomes. Methods: Retrospective cohort studies were performed with data from US-based users of the G6 CGM System (Dexcom, Inc.). For all cohorts, data included sensor glucose values (SGVs). In separate cohorts, use of alert features (for hyperglycemia, existing hypoglycemia, and impending hypoglycemia), remote data sharing feature (Share), software for retrospective pattern analysis (CLARITY), “virtual assistant” feature that announces the current SGV and trend in response to a spoken request were assessed. Descriptive statistics were used to summarize feature set utilization patterns and relate them to glycemic outcomes. Results: Most individual features were consistently adopted by high proportions of G6 users. Threshold SGVs chosen for activating hyperglycemia and hypoglycemia alerts varied with age and were higher among the youngest and oldest patients. Use of the Share feature was more common among young patients and those with type 1 diabetes. Individuals who used more of the alert and notification features had more favorable glycemic outcomes, including time in range (TIR), than those who used fewer. More extensive engagement with CLARITY notifications was associated with higher TIR. Frequent use of the virtual assistant feature was associated with higher TIR and lower mean SGV. Conclusions: Optional features of the G6 CGM system are acceptable to and appear to benefit patients who use them. Different levels of engagement suggest that demographics and personal circumstances play a role in how patients and caregivers use CGM features to help manage diabetes.", "year": 2021, "venue": "Diabetes Technology & Therapeutics", "authors": [ "H. Akturk", "Robert Dowd", "K. Shankar", "M. Derdzinski" ], "externalIds": { "PubMedCentral": "7957382", "DOI": "10.1089/dia.2020.0654", "CorpusId": 231594523, "PubMed": "33434439" }, "url": "https://www.semanticscholar.org/paper/f474e54ded3a1032f664e37bec01357103d4eb1e", "referenceCount": 28, "citationCount": 38, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Amortized Causal Discovery: Learning to Infer Causal Graphs from Time-Series Data", "abstract": "Standard causal discovery methods must fit a new model whenever they encounter samples from a new underlying causal graph. However, these samples often share relevant information - for instance, the dynamics describing the effects of causal relations - which is lost when following this approach. We propose Amortized Causal Discovery, a novel framework that leverages such shared dynamics to learn to infer causal relations from time-series data. This enables us to train a single, amortized model that infers causal relations across samples with different underlying causal graphs, and thus makes use of the information that is shared. We demonstrate experimentally that this approach, implemented as a variational model, leads to significant improvements in causal discovery performance, and show how it can be extended to perform well under hidden confounding.", "year": 2020, "venue": "CLEaR", "authors": [ "Sindy Löwe", "David Madras", "R. Zemel", "M. Welling" ], "externalIds": { "MAG": "3036859210", "DBLP": "journals/corr/abs-2006-10833", "ArXiv": "2006.10833", "CorpusId": 219955853 }, "url": "https://www.semanticscholar.org/paper/24c3e71af21109b23ed9c8d1af13a5921c4f8020", "referenceCount": 67, "citationCount": 111, "influentialCitationCount": 21, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Temporal Network Motifs: Models, Limitations, Evaluation", "abstract": "Investigating the frequency and distribution of small subgraphs with a few nodes/edges, i.e., motifs, is an effective analysis method for static networks. Motif-driven analysis is also useful for temporal networks where the spectrum of motifs is significantly larger due to the additional temporal information on edges. This variety makes it challenging to design a temporal motif model that can consider all aspects of temporality. In the literature, previous works have introduced various models that handle different characteristics. In this work, we compare the existing temporal motif models and evaluate the facets of temporal networks that are overlooked in the literature. We first survey four temporal motif models and highlight their differences. Then, we evaluate the advantages and limitations of these models with respect to the temporal inducedness and timing constraints. In addition, we suggest a new lens, event pairs, to investigate temporal correlations. We believe that our comparative survey and extensive evaluation will catalyze the research on temporal network motif models.", "year": 2020, "venue": "IEEE Transactions on Knowledge and Data Engineering", "authors": [ "Penghang Liu", "V. Guarrasi", "Ahmet Erdem Sarıyüce" ], "externalIds": { "MAG": "3028529594", "DBLP": "journals/tkde/LiuGS23", "ArXiv": "2005.11817", "DOI": "10.1109/TKDE.2021.3077495", "CorpusId": 218870024 }, "url": "https://www.semanticscholar.org/paper/2ea76896d4642304d7aa538e8e9556edb94d8e1d", "referenceCount": 52, "citationCount": 24, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Physics" ] }, { "title": "Multivariate Time Series Forecasting with Transfer Entropy Graph", "abstract": "Multivariate time series (MTS) forecasting is an important problem in many fields. Accurate forecasting results can effectively help decision-making. To date, many MTS forecasting methods have been proposed and widely applied. However, these methods assume that the predicted value of a single variable is affected by all other variables, which ignores the causal relationship among variables. To address the above issue, a novel end-to-end deep learning model, termed graph neural network with transfer entropy (TEGNN) is proposed in this paper. To characterize the causal information among variables, the transfer entropy (TE) graph is introduced in our model, where each variable is regarded as a graph node and each edge represents the casual relationship between variables. In addition, convolutional neural network (CNN) filters with different perception scales are used for time series feature extraction, which is used to generate the feature of each node. Finally, graph neural network (GNN) is adopted to tackle the forecasting problem of graph structure generated by MTS. Three benchmark datasets from the real world are used to evaluate the proposed TEGNN and the comprehensive experiments show that the proposed method achieves state-of-the-art results in MTS forecasting task.", "year": 2020, "venue": "Tsinghua Science and Technology", "authors": [ "Haoyan Xu", "Yida Huang", "Ziheng Duan", "Xiaoqian Wang", "Jie Feng", "Pengyu Song" ], "externalIds": { "MAG": "3033415292", "ArXiv": "2005.01185", "DOI": "10.26599/tst.2021.9010081", "CorpusId": 219531251 }, "url": "https://www.semanticscholar.org/paper/4fb598f7669a30ae783344de6bde02f14ba91117", "referenceCount": 41, "citationCount": 27, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Variable-lag Granger Causality and Transfer Entropy for Time Series Analysis", "abstract": "Granger causality is a fundamental technique for causal inference in time series data, commonly used in the social and biological sciences. Typical operationalizations of Granger causality make a strong assumption that every time point of the effect time series is influenced by a combination of other time series with a fixed time delay. The assumption of fixed time delay also exists in Transfer Entropy, which is considered to be a non-linear version of Granger causality. However, the assumption of the fixed time delay does not hold in many applications, such as collective behavior, financial markets, and many natural phenomena. To address this issue, we develop Variable-lag Granger causality and Variable-lag Transfer Entropy, generalizations of both Granger causality and Transfer Entropy that relax the assumption of the fixed time delay and allow causes to influence effects with arbitrary time delays. In addition, we propose methods for inferring both Variable-lag Granger causality and Transfer Entropy relations. In our approaches, we utilize an optimal warping path of Dynamic Time Warping to infer variable-lag causal relations. We demonstrate our approaches on an application for studying coordinated collective behavior and other real-world casual-inference datasets and show that our proposed approaches perform better than several existing methods in both simulated and real-world datasets. Our approaches can be applied in any domain of time series analysis. The software of this work is available in the R-CRAN package: VLTimeCausality.", "year": 2020, "venue": "ACM Transactions on Knowledge Discovery from Data", "authors": [ "Chainarong Amornbunchornvej", "E. Zheleva", "T. Berger-Wolf" ], "externalIds": { "MAG": "3003660316", "DBLP": "journals/tkdd/Amornbunchornvej21a", "ArXiv": "2002.00208", "DOI": "10.1145/3441452", "CorpusId": 211010786 }, "url": "https://www.semanticscholar.org/paper/0f334159d0b9a9061f60f91ec7b11ebab4096edd", "referenceCount": 50, "citationCount": 36, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Economics", "Physics", "Mathematics" ] }, { "title": "RTransferEntropy - Quantifying information flow between different time series using effective transfer entropy", "abstract": null, "year": 2019, "venue": "SoftwareX", "authors": [ "Simon Behrendt", "T. Dimpfl", "F. Peter", "David J. Zimmermann" ], "externalIds": { "DBLP": "journals/softx/BehrendtDPZ19", "MAG": "2955282559", "DOI": "10.1016/J.SOFTX.2019.100265", "CorpusId": 198343881 }, "url": "https://www.semanticscholar.org/paper/848821757e69b8e68b8847ada97336e53049a816", "referenceCount": 29, "citationCount": 128, "influentialCitationCount": 11, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Inductive Representation Learning on Large Graphs", "abstract": "Low-dimensional embeddings of nodes in large graphs have proved extremely useful in a variety of prediction tasks, from content recommendation to identifying protein functions. However, most existing approaches require that all nodes in the graph are present during training of the embeddings; these previous approaches are inherently transductive and do not naturally generalize to unseen nodes. Here we present GraphSAGE, a general, inductive framework that leverages node feature information (e.g., text attributes) to efficiently generate node embeddings for previously unseen data. Instead of training individual embeddings for each node, we learn a function that generates embeddings by sampling and aggregating features from a node's local neighborhood. Our algorithm outperforms strong baselines on three inductive node-classification benchmarks: we classify the category of unseen nodes in evolving information graphs based on citation and Reddit post data, and we show that our algorithm generalizes to completely unseen graphs using a multi-graph dataset of protein-protein interactions.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "William L. Hamilton", "Z. Ying", "J. Leskovec" ], "externalIds": { "DBLP": "conf/nips/HamiltonYL17", "MAG": "2952779545", "ArXiv": "1706.02216", "CorpusId": 4755450 }, "url": "https://www.semanticscholar.org/paper/6b7d6e6416343b2a122f8416e69059ce919026ef", "referenceCount": 42, "citationCount": 12811, "influentialCitationCount": 2278, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Causal Network Inference by Optimal Causation Entropy", "abstract": "The broad abundance of time series data, which is in sharp contrast to limited knowledge of the underlying network dynamic processes that produce such observations, calls for a rigorous and efficient method of causal network inference. Here we develop mathematical theory of causation entropy, an information-theoretic statistic designed for model-free causality inference. For stationary Markov processes, we prove that for a given node in the network, its causal parents form the minimal set of nodes that maximizes causation entropy, a result we refer to as the optimal causation entropy principle. Furthermore, this principle guides us in developing computational and data efficient algorithms for causal network inference based on a two-step discovery and removal algorithm for time series data for a network-coupled dynamical system. Validation in terms of analytical and numerical results for Gaussian processes on large random networks highlights that inference by our algorithm outperforms previous leading meth...", "year": 2014, "venue": "SIAM Journal on Applied Dynamical Systems", "authors": [ "Jie Sun", "D. Taylor", "E. Bollt" ], "externalIds": { "DBLP": "journals/siamads/0007TB15", "MAG": "3105301591", "ArXiv": "1401.7574", "DOI": "10.1137/140956166", "CorpusId": 194752 }, "url": "https://www.semanticscholar.org/paper/c71578f0b21109a12d4d0aba5c351218b21fcac4", "referenceCount": 91, "citationCount": 184, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Temporal motifs in time-dependent networks", "abstract": "Temporal networks are commonly used to represent systems where connections between elements are active only for restricted periods of time, such as telecommunication, neural signal processing, biochemical reaction and human social interaction networks. We introduce the framework of temporal motifs to study the mesoscale topological–temporal structure of temporal networks in which the events of nodes do not overlap in time. Temporal motifs are classes of similar event sequences, where the similarity refers not only to topology but also to the temporal order of the events. We provide a mapping from event sequences to coloured directed graphs that enables an efficient algorithm for identifying temporal motifs. We discuss some aspects of temporal motifs, including causality and null models, and present basic statistics of temporal motifs in a large mobile call network.", "year": 2011, "venue": "arXiv.org", "authors": [ "Lauri Kovanen", "M. Karsai", "K. Kaski", "J. Kertész", "J. Saramäki" ], "externalIds": { "ArXiv": "1107.5646", "DBLP": "journals/corr/abs-1107-5646", "MAG": "3101999497", "DOI": "10.1088/1742-5468/2011/11/P11005", "CorpusId": 15458440 }, "url": "https://www.semanticscholar.org/paper/761331db984188e20b796c6f9079e433ea3d99fb", "referenceCount": 44, "citationCount": 267, "influentialCitationCount": 19, "isOpenAccess": true, "fieldsOfStudy": [ "Physics", "Computer Science", "Mathematics" ] }, { "title": "Granger causality and transfer entropy are equivalent for Gaussian variables.", "abstract": "Granger causality is a statistical notion of causal influence based on prediction via vector autoregression. Developed originally in the field of econometrics, it has since found application in a broader arena, particularly in neuroscience. More recently transfer entropy, an information-theoretic measure of time-directed information transfer between jointly dependent processes, has gained traction in a similarly wide field. While it has been recognized that the two concepts must be related, the exact relationship has until now not been formally described. Here we show that for Gaussian variables, Granger causality and transfer entropy are entirely equivalent, thus bridging autoregressive and information-theoretic approaches to data-driven causal inference.", "year": 2009, "venue": "Physical Review Letters", "authors": [ "L. Barnett", "A. Barrett", "A. Seth" ], "externalIds": { "MAG": "2079656335", "ArXiv": "0910.4514", "DOI": "10.1103/PhysRevLett.103.238701", "CorpusId": 1266025, "PubMed": "20366183" }, "url": "https://www.semanticscholar.org/paper/d4eb18d616b212639926aa8c82e512aa91bd5606", "referenceCount": 42, "citationCount": 909, "influentialCitationCount": 57, "isOpenAccess": true, "fieldsOfStudy": [ "Physics", "Mathematics", "Medicine" ] }, { "title": "Time series shapelets: a new primitive for data mining", "abstract": "Classification of time series has been attracting great interest over the past decade. Recent empirical evidence has strongly suggested that the simple nearest neighbor algorithm is very difficult to beat for most time series problems. While this may be considered good news, given the simplicity of implementing the nearest neighbor algorithm, there are some negative consequences of this. First, the nearest neighbor algorithm requires storing and searching the entire dataset, resulting in a time and space complexity that limits its applicability, especially on resource-limited sensors. Second, beyond mere classification accuracy, we often wish to gain some insight into the data.\n In this work we introduce a new time series primitive, time series shapelets, which addresses these limitations. Informally, shapelets are time series subsequences which are in some sense maximally representative of a class. As we shall show with extensive empirical evaluations in diverse domains, algorithms based on the time series shapelet primitives can be interpretable, more accurate and significantly faster than state-of-the-art classifiers.", "year": 2009, "venue": "Knowledge Discovery and Data Mining", "authors": [ "Lexiang Ye", "Eamonn J. Keogh" ], "externalIds": { "DBLP": "conf/kdd/YeK09", "MAG": "2029438113", "DOI": "10.1145/1557019.1557122", "CorpusId": 5144823 }, "url": "https://www.semanticscholar.org/paper/9c40543348fef37369c5b19ea26994ca1db8d9e8", "referenceCount": 16, "citationCount": 1016, "influentialCitationCount": 139, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Permutation entropy: a natural complexity measure for time series.", "abstract": "We introduce complexity parameters for time series based on comparison of neighboring values. The definition directly applies to arbitrary real-world data. For some well-known chaotic dynamical systems it is shown that our complexity behaves similar to Lyapunov exponents, and is particularly useful in the presence of dynamical or observational noise. The advantages of our method are its simplicity, extremely fast calculation, robustness, and invariance with respect to nonlinear monotonous transformations.", "year": 2002, "venue": "Physical Review Letters", "authors": [ "C. Bandt", "B. Pompe" ], "externalIds": { "MAG": "2014683958", "DOI": "10.1103/PHYSREVLETT.88.174102", "CorpusId": 5785692, "PubMed": "12005759" }, "url": "https://www.semanticscholar.org/paper/04de9ce062c6ac999fa009b9c264da20a8d8a282", "referenceCount": 9, "citationCount": 3518, "influentialCitationCount": 369, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Medicine" ] }, { "title": "Wavelet entropy: a new tool for analysis of short duration brain electrical signals", "abstract": null, "year": 2001, "venue": "Journal of Neuroscience Methods", "authors": [ "O. Rosso", "S. Blanco", "J. Yordanova", "V. Kolev", "A. Figliola", "M. Schürmann", "E. Basar" ], "externalIds": { "MAG": "2106706488", "DOI": "10.1016/S0165-0270(00)00356-3", "CorpusId": 17609298, "PubMed": "11166367" }, "url": "https://www.semanticscholar.org/paper/db172c97f43722d52e8b1a9b160c7fb27e4044a6", "referenceCount": 30, "citationCount": 798, "influentialCitationCount": 31, "isOpenAccess": false, "fieldsOfStudy": [ "Psychology", "Medicine" ] }, { "title": "Measuring information transfer", "abstract": "An information theoretic measure is derived that quantifies the statistical coherence between systems evolving in time. The standard time delayed mutual information fails to distinguish information that is actually exchanged from shared information due to common history and input signals. In our new approach, these influences are excluded by appropriate conditioning of transition probabilities. The resulting transfer entropy is able to distinguish effectively driving and responding elements and to detect asymmetry in the interaction of subsystems.", "year": 2000, "venue": "Physical Review Letters", "authors": [ "T. Schreiber" ], "externalIds": { "ArXiv": "nlin/0001042", "MAG": "2041782669", "DOI": "10.1103/PhysRevLett.85.461", "CorpusId": 7411376, "PubMed": "10991308" }, "url": "https://www.semanticscholar.org/paper/b86514fbdeb401509a7f1d02a06c3c592fca3f3f", "referenceCount": 11, "citationCount": 3648, "influentialCitationCount": 344, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Physics", "Medicine" ] }, { "title": "Silhouettes: a graphical aid to the interpretation and validation of cluster analysis", "abstract": null, "year": 1987, "venue": "", "authors": [ "P. Rousseeuw" ], "externalIds": { "MAG": "1987971958", "DOI": "10.1016/0377-0427(87)90125-7", "CorpusId": 189900 }, "url": "https://www.semanticscholar.org/paper/f16841e022038e94a59f7e0a82002102b78d79a4", "referenceCount": 21, "citationCount": 15870, "influentialCitationCount": 847, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Dynamic programming algorithm optimization for spoken word recognition", "abstract": "This paper reports on an optimum dynamic progxamming (DP) based time-normalization algorithm for spoken word recognition. First, a general principle of time-normalization is given using time-warping function. Then, two time-normalized distance definitions, called symmetric and asymmetric forms, are derived from the principle. These two forms are compared with each other through theoretical discussions and experimental studies. The symmetric form algorithm superiority is established. A new technique, called slope constraint, is successfully introduced, in which the warping function slope is restricted so as to improve discrimination between words in different categories. The effective slope constraint characteristic is qualitatively analyzed, and the optimum slope constraint condition is determined through experiments. The optimized algorithm is then extensively subjected to experimental comparison with various DP-algorithms, previously applied to spoken word recognition by different research groups. The experiment shows that the present algorithm gives no more than about two-thirds errors, even compared to the best conventional algorithm.", "year": 1978, "venue": "", "authors": [ "Hiroaki Sakoe" ], "externalIds": { "MAG": "2128160875", "DOI": "10.1109/TASSP.1978.1163055", "CorpusId": 17900407 }, "url": "https://www.semanticscholar.org/paper/18f355d7ef4aa9f82bf5c00f84e46714efa5fd77", "referenceCount": 9, "citationCount": 6125, "influentialCitationCount": 627, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A general statistical framework for assessing categorical clustering in free recall.", "abstract": null, "year": 1976, "venue": "", "authors": [ "L. Hubert", "J. Levin" ], "externalIds": { "MAG": "2008788779", "DOI": "10.1037/0033-2909.83.6.1072", "CorpusId": 197650521 }, "url": "https://www.semanticscholar.org/paper/f30c63da803e4edc5ec4434280ad6ed3a11cbc29", "referenceCount": 19, "citationCount": 396, "influentialCitationCount": 22, "isOpenAccess": false, "fieldsOfStudy": [ "Psychology" ] }, { "title": "Investigating causal relations by econometric models and cross-spectral methods", "abstract": "There occurs on some occasions a difficulty in deciding the direction of causality between two related variables and also whether or not feedback is occurring. Testable definitions of causality and feedback are proposed and illustrated by use of simple two-variable models. The important problem of apparent instantaneous causality is discussed and it is suggested that the problem often arises due to slowness in recordhag information or because a sufficiently wide class of possible causal variables has not been used. It can be shown that the cross spectrum between two variables can be decomposed into two parts, each relating to a single causal arm of a feedback situation. Measures of causal lag and causal strength can then be constructed. A generalization of this result with the partial cross spectrum is suggested.The object of this paper is to throw light on the relationships between certain classes of econometric models involving feedback and the functions arising in spectral analysis, particularly the cross spectrum and the partial cross spectrum. Causality and feedback are here defined in an explicit and testable fashion. It is shown that in the two-variable case the feedback mechanism can be broken down into two causal relations and that the cross spectrum can be considered as the sum of two cross spectra, each closely connected with one of the causations. The next three sections of the paper briefly introduce those aspects of spectral methods, model building, and causality which are required later. Section IV presents the results for the two-variable case and Section V generalizes these results for three variables.", "year": 1969, "venue": "", "authors": [ "C. Granger" ], "externalIds": { "MAG": "2178225550", "DOI": "10.1017/CBO9780511753978.002", "CorpusId": 41012146 }, "url": "https://www.semanticscholar.org/paper/6a7c63a73724c0ca68b1675e256bb8b9a35c94f4", "referenceCount": 7, "citationCount": 23175, "influentialCitationCount": 1989, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Neural Temporal Walks: Motif-Aware Representation Learning on Continuous-Time Dynamic Graphs", "abstract": "Continuous-time dynamic graphs naturally abstract many real-world systems, such as social and transactional networks. While the research on continuous-time dynamic graph representation learning has made significant advances recently, neither graph topological properties nor temporal dependencies have been well-considered and explicitly modeled in capturing dynamic patterns. In this paper, we introduce a new approach, Neural Temporal Walks ( NeurTWs ), for representation learning on continuous-time dynamic graphs. By considering not only time constraints but also structural and tree traversal properties, our method conducts spatiotemporal-biased random walks to retrieve a set of representative motifs, enabling temporal nodes to be characterized effectively. With a component based on neural ordinary differential equations, the extracted motifs allow for irregularly-sampled temporal nodes to be embedded explicitly over multiple different interaction time intervals, enabling the effective capture of the underlying spatiotemporal dynamics. To enrich supervision signals, we further design a harder contrastive pretext task for model optimization. Our method demonstrates overwhelming superiority under both transductive and inductive settings on six real-world datasets 1 .", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Ming Jin", "Yuan-Fang Li", "Shirui Pan" ], "externalIds": { "DBLP": "conf/nips/JinLP22", "CorpusId": 258509474 }, "url": "https://www.semanticscholar.org/paper/92c1c49b490b47a16fd8d54ed2284fbd7a01201a", "referenceCount": 46, "citationCount": 61, "influentialCitationCount": 7, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Mixed Noise and Constraint-Based Approach to Causal Inference in Time Series", "abstract": null, "year": 2021, "venue": "ECML/PKDD", "authors": [ "Charles K. Assaad", "Emilie Devijver", "Éric Gaussier", "Ali Aït-Bachir" ], "externalIds": { "DBLP": "conf/pkdd/AssaadDGA21", "DOI": "10.1007/978-3-030-86486-6_28", "CorpusId": 236325244 }, "url": "https://www.semanticscholar.org/paper/087992616443444efe6e73e65b45475ecfbfb617", "referenceCount": 21, "citationCount": 9, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "R´enyi’s information transfer between financial time series", "abstract": null, "year": 2012, "venue": "Physica A: Statistical Mechanics and its Applications", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "A Mathematical Theory of Communication", "abstract": "This paper opened the new area the information theory. Before this paper, most people believed that the only way to make the error probability of transmission as small as desired is to reduce the data rate (such as a long repetition scheme). However, surprisingly this paper revealed that it does not need to reduce the data rate for achieving that much of small errors. It proved that we can get some positive data rate that has the same small error probability and also there is an upper bound of the data rate, which means we cannot achieve the data rate with any encoding scheme that has small enough error probability over the upper bound.", "year": 2006, "venue": "", "authors": [ "Jin Woo Shin", "Sang Joon Kim" ], "externalIds": { "CorpusId": 5747983 }, "url": "https://www.semanticscholar.org/paper/6d12a1d23b21a9b170118a56386552bc5d4727de", "referenceCount": 0, "citationCount": 62860, "influentialCitationCount": 5070, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "A dendrite method for cluster analysis", "abstract": "A method for identifying clusters of points in a multidimensional Euclidean space is described and its application to taxonomy considered. It reconciles, in a sense, two different approaches to the investigation of the spatial relationships between the points, viz., the agglomerative and the divisive methods. A graph, the shortest dendrite of Florek etal. (1951a), is constructed on a nearest neighbour basis and then divided into clusters by applying the criterion of minimum within cluster sum of squares. This procedure ensures an effective reduction of the number of possible splits. The method may be applied to a dichotomous division, but is perfectly suitable also for a global division into any number of clusters. An informal indicator of the \"best number\" of clusters is suggested. It is a\"variance ratio criterion\" giving some insight into the structure of the points. The method is illustrated by three examples, one of which is original. The results obtained by the dendrite method are compared with those...", "year": 1974, "venue": "", "authors": [ "T. Caliński", "J. Harabasz" ], "externalIds": { "MAG": "2085487226", "DOI": "10.1080/03610927408827101", "CorpusId": 122217223 }, "url": "https://www.semanticscholar.org/paper/5a9b93d6b3fef4dd85bf5821c5889aed8cfcc1e4", "referenceCount": 11, "citationCount": 6612, "influentialCitationCount": 289, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Some methods for classification and analysis of multivariate observations", "abstract": "The main purpose of this paper is to describe a process for partitioning an N-dimensional population into k sets on the basis of a sample. The process, which is called 'k-means,' appears to give partitions which are reasonably efficient in the sense of within-class variance. That is, if p is the probability mass function for the population, S = {S1, S2, * *, Sk} is a partition of EN, and ui, i = 1, 2, * , k, is the conditional mean of p over the set Si, then W2(S) = ff=ISi f z u42 dp(z) tends to be low for the partitions S generated by the method. We say 'tends to be low,' primarily because of intuitive considerations, corroborated to some extent by mathematical analysis and practical computational experience. Also, the k-means procedure is easily programmed and is computationally economical, so that it is feasible to process very large samples on a digital computer. Possible applications include methods for similarity grouping, nonlinear prediction, approximating multivariate distributions, and nonparametric tests for independence among several variables. In addition to suggesting practical classification methods, the study of k-means has proved to be theoretically interesting. The k-means concept represents a generalization of the ordinary sample mean, and one is naturally led to study the pertinent asymptotic behavior, the object being to establish some sort of law of large numbers for the k-means. This problem is sufficiently interesting, in fact, for us to devote a good portion of this paper to it. The k-means are defined in section 2.1, and the main results which have been obtained on the asymptotic behavior are given there. The rest of section 2 is devoted to the proofs of these results. Section 3 describes several specific possible applications, and reports some preliminary results from computer experiments conducted to explore the possibilities inherent in the k-means idea. The extension to general metric spaces is indicated briefly in section 4. The original point of departure for the work described here was a series of problems in optimal classification (MacQueen [9]) which represented special", "year": 1967, "venue": "", "authors": [ "J. MacQueen" ], "externalIds": { "MAG": "2127218421", "CorpusId": 6278891 }, "url": "https://www.semanticscholar.org/paper/ac8ab51a86f1a9ae74dd0e4576d1a019f5e654ed", "referenceCount": 19, "citationCount": 27462, "influentialCitationCount": 1482, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] } ] }, "FastGL: A GPU-Efficient Framework for Accelerating Sampling-Based GNN Training at Large Scale": { "paper_title": "FastGL: A GPU-Efficient Framework for Accelerating Sampling-Based GNN Training at Large Scale", "arxiv_id": "2409.14939v1", "keyword": "graph neural network", "authors": [ "Zeyu Zhu", "Peisong Wang", "Qinghao Hu", "Gang Li", "Xiaoyao Liang", "Jian Cheng" ], "references": [ { "title": "Scalable and Efficient Full-Graph GNN Training for Large Graphs", "abstract": "Graph Neural Networks (GNNs) have emerged as powerful tools to capture structural information from graph-structured data, achieving state-of-the-art performance on applications such as recommendation, knowledge graph, and search. Graphs in these domains typically contain hundreds of millions of nodes and billions of edges. However, previous GNN systems demonstrate poor scalability because large and interleaved computation dependencies in GNN training cause significant overhead in current parallelization methods. We present G3, a distributed system that can efficiently train GNNs over billion-edge graphs at scale. G3 introduces GNN hybrid parallelism which synthesizes three dimensions of parallelism to scale out GNN training by sharing intermediate results peer-to-peer in fine granularity, eliminating layer-wise barriers for global collective communication or neighbor replications as seen in prior works. G3 leverages locality-aware iterative partitioning and multi-level pipeline scheduling to exploit acceleration opportunities by distributing balanced workload among workers and overlapping computation with communication in both inter-layer and intra-layer training processes. We show via a prototype implementation and comprehensive experiments that G3 can achieve as much as 2.24x speedup in a 16-node cluster, and better final accuracy over prior works.", "year": 2023, "venue": "Proc. ACM Manag. Data", "authors": [ "Xinchen Wan", "Kaiqiang Xu", "Xudong Liao", "Yilun Jin", "Kai Chen", "Xin Jin" ], "externalIds": { "DBLP": "journals/pacmmod/WanXLJ0023", "DOI": "10.1145/3589288", "CorpusId": 259213239 }, "url": "https://www.semanticscholar.org/paper/3229291b9b7e9a7200b543089e05a4442cbe6a9b", "referenceCount": 54, "citationCount": 24, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "IGB: Addressing The Gaps In Labeling, Features, Heterogeneity, and Size of Public Graph Datasets for Deep Learning Research", "abstract": "Graph neural networks (GNNs) have shown high potential for a variety of real-world, challenging applications, but one of the major obstacles in GNN research is the lack of large-scale flexible datasets. Most existing public datasets for GNNs are relatively small, which limits the ability of GNNs to generalize to unseen data. The few existing large-scale graph datasets provide very limited labeled data. This makes it difficult to determine if the GNN model's low accuracy for unseen data is inherently due to insufficient training data or if the model failed to generalize. Additionally, datasets used to train GNNs need to offer flexibility to enable a thorough study of the impact of various factors while training GNN models. In this work, we introduce the Illinois Graph Benchmark (IGB), a research dataset tool that the developers can use to train, scrutinize and systematically evaluate GNN models with high fidelity. IGB includes both homogeneous and heterogeneous academic graphs of enormous sizes, with more than 40% of their nodes labeled. Compared to the largest graph datasets publicly available, the IGB provides over 162× more labeled data for deep learning practitioners and developers to create and evaluate models with higher accuracy. The IGB dataset is a collection of academic graphs designed to be flexible, enabling the study of various GNN architectures, embedding generation techniques, and analyzing system performance issues for node classification tasks. IGB is open-sourced, supports DGL and PyG frameworks, and comes with releases of the raw text that we believe foster emerging language models and GNN research projects. An early public version of IGB is available at https://github.com/IllinoisGraphBenchmark/IGB-Datasets.", "year": 2023, "venue": "Knowledge Discovery and Data Mining", "authors": [ "Arpandeep Khatua", "Vikram Sharma Mailthody", "Bhagyashree Taleka", "Tengfei Ma", "Xiang Song", "Wen-mei W. Hwu" ], "externalIds": { "DBLP": "conf/kdd/KhatuaMT0SH23", "ArXiv": "2302.13522", "DOI": "10.1145/3580305.3599843", "CorpusId": 257219811 }, "url": "https://www.semanticscholar.org/paper/6155e94e5174e4c615f890c185acbe4b635dba16", "referenceCount": 84, "citationCount": 20, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DSP: Efficient GNN Training with Multiple GPUs", "abstract": "Jointly utilizing multiple GPUs to train graph neural networks (GNNs) is crucial for handling large graphs and achieving high efficiency. However, we find that existing systems suffer from high communication costs and low GPU utilization due to improper data layout and training procedures. Thus, we propose a system dubbed Distributed Sampling and Pipelining (DSP) for multi-GPU GNN training. DSP adopts a tailored data layout to utilize the fast NVLink connections among the GPUs, which stores the graph topology and popular node features in GPU memory. For efficient graph sampling with multiple GPUs, we introduce a collective sampling primitive (CSP), which pushes the sampling tasks to data to reduce communication. We also design a producer-consumer-based pipeline, which allows tasks from different mini-batches to run congruently to improve GPU utilization. We compare DSP with state-of-the-art GNN training frameworks, and the results show that DSP consistently outperforms the baselines under different datasets, GNN models and GPU counts. The speedup of DSP can be up to 26x and is over 2x in most cases.", "year": 2023, "venue": "ACM SIGPLAN Symposium on Principles & Practice of Parallel Programming", "authors": [ "Zhenkun Cai", "Qihui Zhou", "Xiao Yan", "Da Zheng", "Xiang Song", "Chenguang Zheng", "James Cheng", "G. Karypis" ], "externalIds": { "DBLP": "conf/ppopp/CaiZ0ZSZCK23", "DOI": "10.1145/3572848.3577528", "CorpusId": 257051770 }, "url": "https://www.semanticscholar.org/paper/5b671f29e7830283d983a7f18f745b12abd490f8", "referenceCount": 44, "citationCount": 22, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Betty: Enabling Large-Scale GNN Training with Batch-Level Graph Partitioning", "abstract": "The Graph Neural Network (GNN) is showing outstanding results in improving the performance of graph-based applications. Recent studies demonstrate that GNN performance can be boosted via using more advanced aggregators, deeper aggregation depth, larger sampling rate, etc. While leading to promising results, the improvements come at a cost of significantly increased memory footprint, easily exceeding GPU memory capacity. In this paper, we introduce a method, Betty, to make GNN training more scalable and accessible via batch-level partitioning. Different from DNN training, a mini-batch in GNN has complex dependencies between input features and output labels, making batch-level partitioning difficult. Betty introduces two noveltechniques, redundancy-embedded graph (REG) partitioning and memory-aware partitioning, to effectively mitigate the redundancy and load imbalances issues across the partitions. Our evaluation of large-scale real-world datasets shows that Betty can significantly mitigate the memory bottleneck, enabling scalable GNN training with much deeper aggregation depths, larger sampling rate, larger training batch sizes, together with more advanced aggregators, with a few as a single GPU.", "year": 2023, "venue": "International Conference on Architectural Support for Programming Languages and Operating Systems", "authors": [ "Shuangyan Yang", "Minjia Zhang", "Wenqian Dong", "Dong Li" ], "externalIds": { "DBLP": "conf/asplos/YangZD023", "DOI": "10.1145/3575693.3575725", "CorpusId": 256391203 }, "url": "https://www.semanticscholar.org/paper/b506b880d2a818ea34bd65734cf79a03862104b8", "referenceCount": 49, "citationCount": 19, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "T-GCN: A Sampling Based Streaming Graph Neural Network System with Hybrid Architecture", "abstract": "As many real-world applications are streaming and attached with time instances, a few works have been proposed to learn streaming graph neural networks (GNNs). Unfortunately, current streaming GNNs are observed to have a large training overhead and suffer from bad parallel scalability on multiple GPUs. These drawbacks pose severe challenges to online learning of streaming GNNs and their application to real-time scenarios. To improve training efficiency, one promising solution is to use sampling, a technique widely used in static GNNs. However, to the best of our knowledge, sampling has not been investigated in learning streaming GNNs. Based on these observations, in this paper, we propose T-GCN, the first sampling-based streaming GNN system, which targets temporal-aware streaming graphs and takes advantage of a hybrid CPU-GPU co-processing architecture to achieve high throughput and low latency. T-GCN proposes an efficient sampling method, namely Segment Its Search, to offer high sampling speed with respect to three typical types of general graph sampling methods (i.e., node-wise, layer-wise, and subgraph sampling). We propose a locality-aware data partitioning method to reduce CPU-GPU communication latency and data transfer overhead, and an NVLink-specific task schedule to fully exploit NVLink's fast speed and improve GPU-GPU communication efficiency. Besides, we further pipeline the computation and the communication by introducing an efficient memory management mechanism, to improve scalability while hiding data communication. Overall, with respect to end-to-end performance, for single-GPU training, T-GCN achieves up to 7.9× speedup than state-of-the-art works. In terms of scalability, T-GCN runs 5.2× faster on average with 8 GPUs than one GPU. Additionally, in terms of sampling, T-GCN also yields a maximum of 38.8× speedup with our Segment Its Search sampling method.", "year": 2022, "venue": "International Conference on Parallel Architectures and Compilation Techniques", "authors": [ "Chengying Huan", "S. Song", "Yongchao Liu", "Heng Zhang", "Hang Liu", "Charles He", "Kang Chen", "Jinlei Jiang", "Yongwei Wu" ], "externalIds": { "DBLP": "conf/IEEEpact/HuanSLZ0HCJW22", "DOI": "10.1145/3559009.3569648", "CorpusId": 256304986 }, "url": "https://www.semanticscholar.org/paper/a1162fa7e6e09872341001175c39a8662d41162b", "referenceCount": 52, "citationCount": 7, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "GNNLab: a factored system for sample-based GNN training over GPUs", "abstract": "We propose GNNLab, a sample-based GNN training system in a single machine multi-GPU setup. GNNLab adopts a factored design for multiple GPUs, where each GPU is dedicated to the task of graph sampling or model training. It accelerates both tasks by eliminating GPU memory contention. To balance GPU workloads, GNNLab applies a global queue to bridge GPUs asynchronously and adopts a simple yet effective method to adaptively allocate GPUs for different tasks. GNNLab further leverages temporarily switching to avoid idle waiting on GPUs. Furthermore, GNNLab proposes a new pre-sampling based caching policy that takes both sampling algorithms and GNN datasets into account, and shows an efficient and robust caching performance. Evaluations on three representative GNN models and four real-life graphs show that GNNLab outperforms the state-of-the-art GNN systems DGL and PyG by up to 9.1× (from 2.4×) and 74.3× (from 10.2×), respectively. In addition, our pre-sampling based caching policy achieves 90% -- 99% of the optimal cache hit rate in all experiments.", "year": 2022, "venue": "European Conference on Computer Systems", "authors": [ "Jianbang Yang", "Dahai Tang", "Xiaoniu Song", "Lei Wang", "Qiang Yin", "Rong Chen", "Wenyuan Yu", "Jingren Zhou" ], "externalIds": { "DBLP": "conf/eurosys/YangTSWYCYZ22", "DOI": "10.1145/3492321.3519557", "CorpusId": 247765694 }, "url": "https://www.semanticscholar.org/paper/740b9d4414a955a74c16f5f3617d2b5dde2e9adf", "referenceCount": 67, "citationCount": 65, "influentialCitationCount": 10, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Rethinking graph data placement for graph neural network training on multiple GPUs", "abstract": "Graph partitioning is commonly used for dividing graph data for parallel processing. While they achieve good performance for the traditional graph processing algorithms, the existing graph partitioning methods are unsatisfactory for data-parallel GNN training on GPUs. In this work, we rethink the graph data placement problem for large-scale GNN training on multiple GPUs. We find that loading input features is a performance bottleneck for GNN training on large graphs that cannot be stored on GPU. To reduce the data loading overhead, we first propose a performance model of data movement among CPU and GPUs in GNN training. Then, based on the performance model, we provide an efficient algorithm to divide and distribute the graph data onto multiple GPUs so that the data loading time is minimized. For cases where data placement alone cannot achieve good performance, we propose a locality-aware neighbor sampling technique to further reduce the data movement overhead without losing accuracy. Our experiments with graphs of different sizes on different numbers of GPUs show that our techniques not only achieve smaller data loading time but also incur much less preprocessing overhead than the existing graph partitioning methods.", "year": 2022, "venue": "ACM SIGPLAN Symposium on Principles & Practice of Parallel Programming", "authors": [ "Shihui Song", "Peng Jiang" ], "externalIds": { "DBLP": "conf/ics/SongJ22", "DOI": "10.1145/3524059.3532384", "CorpusId": 247765547 }, "url": "https://www.semanticscholar.org/paper/20dc2612c0296952a363f2aa5d78d2178503553f", "referenceCount": 41, "citationCount": 16, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "BNS-GCN: Efficient Full-Graph Training of Graph Convolutional Networks with Partition-Parallelism and Random Boundary Node Sampling", "abstract": "Graph Convolutional Networks (GCNs) have emerged as the state-of-the-art method for graph-based learning tasks. However, training GCNs at scale is still challenging, hindering both the exploration of more sophisticated GCN architectures and their applications to real-world large graphs. While it might be natural to consider graph partition and distributed training for tackling this challenge, this direction has only been slightly scratched the surface in the previous works due to the limitations of existing designs. In this work, we first analyze why distributed GCN training is ineffective and identify the underlying cause to be the excessive number of boundary nodes of each partitioned subgraph, which easily explodes the memory and communication costs for GCN training. Furthermore, we propose a simple yet effective method dubbed BNS-GCN that adopts random Boundary-Node-Sampling to enable efficient and scalable distributed GCN training. Experiments and ablation studies consistently validate the effectiveness of BNS-GCN, e.g., boosting the throughput by up to 16.2x and reducing the memory usage by up to 58%, while maintaining a full-graph accuracy. Furthermore, both theoretical and empirical analysis show that BNS-GCN enjoys a better convergence than existing sampling-based methods. We believe that our BNS-GCN has opened up a new paradigm for enabling GCN training at scale. The code is available at https://github.com/RICE-EIC/BNS-GCN.", "year": 2022, "venue": "Conference on Machine Learning and Systems", "authors": [ "Cheng Wan", "Youjie Li", "Ang Li", "Namjae Kim", "Yingyan Lin" ], "externalIds": { "DBLP": "conf/mlsys/WanLLKL22", "ArXiv": "2203.10983", "DOI": "10.48550/arXiv.2203.10983", "CorpusId": 247763186 }, "url": "https://www.semanticscholar.org/paper/d43acd18e8ddabba5c35e88788723ab8d18345da", "referenceCount": 49, "citationCount": 57, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Cooperative Behavior Planning for Automated Driving Using Graph Neural Networks", "abstract": "Urban intersections are prone to delays and inefficiencies due to static precedence rules and occlusions limiting the view on prioritized traffic. Existing approaches to improve traffic flow, widely known as automatic intersection management systems, are mostly based on non-learning reservation schemes or optimization algorithms. Machine learning-based techniques show promising results in planning for a single ego vehicle. This work proposes to leverage machine learning algorithms to optimize traffic flow at urban intersections by jointly planning for multiple vehicles. Learning-based behavior planning poses several challenges, demanding for a suited input and output representation as well as large amounts of ground-truth data. We address the former issue by using a flexible graph-based input representation accompanied by a graph neural network. This allows to efficiently encode the scene and inherently provide individual outputs for all involved vehicles. To learn a sensible policy, without relying on the imitation of expert demonstrations, the cooperative planning task is considered as a reinforcement learning problem. We train and evaluate the proposed method in an open-source simulation environment for decision making in automated driving. Compared to a first-in-first-out scheme and traffic governed by static priority rules, the learned planner shows a significant gain in flow rate, while reducing the number of induced stops. In addition to synthetic simulations, the approach is also evaluated based on real-world traffic data taken from the publicly available inD dataset.", "year": 2022, "venue": "2022 IEEE Intelligent Vehicles Symposium (IV)", "authors": [ "Marvin Klimke", "Benjamin Völz", "M. Buchholz" ], "externalIds": { "ArXiv": "2202.11376", "DBLP": "conf/ivs/KlimkeVB22", "DOI": "10.1109/IV51971.2022.9827230", "CorpusId": 250720578 }, "url": "https://www.semanticscholar.org/paper/8053b1962425ae96478ee0e9168512f7141fc6a2", "referenceCount": 25, "citationCount": 18, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ByteGNN: Efficient Graph Neural Network Training at Large Scale", "abstract": "Graph neural networks (GNNs) have shown excellent performance in a wide range of applications such as recommendation, risk control, and drug discovery. With the increase in the volume of graph data, distributed GNN systems become essential to support efficient GNN training. However, existing distributed GNN training systems suffer from various performance issues including high network communication cost, low CPU utilization, and poor end-to-end performance. In this paper, we propose ByteGNN, which addresses the limitations in existing distributed GNN systems with three key designs: (1) an abstraction of mini-batch graph sampling to support high parallelism, (2) a two-level scheduling strategy to improve resource utilization and to reduce the end-to-end GNN training time, and (3) a graph partitioning algorithm tailored for GNN workloads. Our experiments show that ByteGNN outperforms the state-of-the-art distributed GNN systems with up to 3.5--23.8 times faster end-to-end execution, 2--6 times higher CPU utilization, and around half of the network communication cost.", "year": 2022, "venue": "Proceedings of the VLDB Endowment", "authors": [ "Che Zheng", "Hongzhi Chen", "Yuxuan Cheng", "Zhezheng Song", "Yifan Wu", "Changji", "Li", "James Cheng", "Han Yang", "Shuai Zhang" ], "externalIds": { "DBLP": "journals/pvldb/ZhengCCSWLCYZ22", "DOI": "10.14778/3514061.3514069", "CorpusId": 249146827 }, "url": "https://www.semanticscholar.org/paper/1df8b8cc125f667c6495b76e347da621109b0f73", "referenceCount": 79, "citationCount": 64, "influentialCitationCount": 8, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "BGL: GPU-Efficient GNN Training by Optimizing Graph Data I/O and Preprocessing", "abstract": "Graph neural networks (GNNs) have extended the success of deep neural networks (DNNs) to non-Euclidean graph data, achieving ground-breaking performance on various tasks such as node classification and graph property prediction. Nonetheless, existing systems are inefficient to train large graphs with billions of nodes and edges with GPUs. The main bottlenecks are the process of preparing data for GPUs - subgraph sampling and feature retrieving. This paper proposes BGL, a distributed GNN training system designed to address the bottlenecks with a few key ideas. First, we propose a dynamic cache engine to minimize feature retrieving traffic. By a co-design of caching policy and the order of sampling, we find a sweet spot of low overhead and high cache hit ratio. Second, we improve the graph partition algorithm to reduce cross-partition communication during subgraph sampling. Finally, careful resource isolation reduces contention between different data preprocessing stages. Extensive experiments on various GNN models and large graph datasets show that BGL significantly outperforms existing GNN training systems by 20.68x on average.", "year": 2021, "venue": "Symposium on Networked Systems Design and Implementation", "authors": [ "Tianfeng Liu", "Yangrui Chen", "Dan Li", "Chuan Wu", "Yibo Zhu", "Jun He", "Yanghua Peng", "Hongzheng Chen", "Hongzhi Chen", "Chuanxiong Guo" ], "externalIds": { "DBLP": "journals/corr/abs-2112-08541", "ArXiv": "2112.08541", "CorpusId": 245218921 }, "url": "https://www.semanticscholar.org/paper/94f0823f8db5360972a7a68b453e28ddf9c4e992", "referenceCount": 56, "citationCount": 55, "influentialCitationCount": 12, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Accelerating Training and Inference of Graph Neural Networks with Fast Sampling and Pipelining", "abstract": "Improving the training and inference performance of graph neural networks (GNNs) is faced with a challenge uncommon in general neural networks: creating mini-batches requires a lot of computation and data movement due to the exponential growth of multi-hop graph neighborhoods along network layers. Such a unique challenge gives rise to a diverse set of system design choices. We argue in favor of performing mini-batch training with neighborhood sampling in a distributed multi-GPU environment, under which we identify major performance bottlenecks hitherto under-explored by developers: mini-batch preparation and transfer. We present a sequence of improvements to mitigate these bottlenecks, including a performance-engineered neighborhood sampler, a shared-memory parallelization strategy, and the pipelining of batch transfer with GPU computation. We also conduct an empirical analysis that supports the use of sampling for inference, showing that test accuracies are not materially compromised. Such an observation unifies training and inference, simplifying model implementation. We report comprehensive experimental results with several benchmark data sets and GNN architectures, including a demonstration that, for the ogbn-papers100M data set, our system SALIENT achieves a speedup of 3x over a standard PyTorch-Geometric implementation with a single GPU and a further 8x parallel speedup with 16 GPUs. Therein, training a 3-layer GraphSAGE model with sampling fanout (15, 10, 5) takes 2.0 seconds per epoch and inference with fanout (20, 20, 20) takes 2.4 seconds, attaining test accuracy 64.58%.", "year": 2021, "venue": "Conference on Machine Learning and Systems", "authors": [ "Tim Kaler", "Nickolas Stathas", "Anne Ouyang", "A. Iliopoulos", "T. Schardl", "C. Leiserson", "Jie Chen" ], "externalIds": { "DBLP": "journals/corr/abs-2110-08450", "ArXiv": "2110.08450", "CorpusId": 239016145 }, "url": "https://www.semanticscholar.org/paper/8d93f1738790954941105d5631ad777c85fd08c1", "referenceCount": 46, "citationCount": 42, "influentialCitationCount": 7, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Dorylus: Affordable, Scalable, and Accurate GNN Training with Distributed CPU Servers and Serverless Threads", "abstract": "A graph neural network (GNN) enables deep learning on structured graph data. There are two major GNN training obstacles: 1) it relies on high-end servers with many GPUs which are expensive to purchase and maintain, and 2) limited memory on GPUs cannot scale to today's billion-edge graphs. This paper presents Dorylus: a distributed system for training GNNs. Uniquely, Dorylus can take advantage of serverless computing to increase scalability at a low cost. The key insight guiding our design is computation separation. Computation separation makes it possible to construct a deep, bounded-asynchronous pipeline where graph and tensor parallel tasks can fully overlap, effectively hiding the network latency incurred by Lambdas. With the help of thousands of Lambda threads, Dorylus scales GNN training to billion-edge graphs. Currently, for large graphs, CPU servers offer the best performance-per-dollar over GPU servers. Just using Lambdas on top of CPU servers offers up to 2.75x more performance-per-dollar than training only with CPU servers. Concretely, Dorylus is 1.22x faster and 4.83x cheaper than GPU servers for massive sparse graphs. Dorylus is up to 3.8x faster and 10.7x cheaper compared to existing sampling-based systems.", "year": 2021, "venue": "USENIX Symposium on Operating Systems Design and Implementation", "authors": [ "John Thorpe", "Yifan Qiao", "Jon Eyolfson", "Shen Teng", "Guanzhou Hu", "Zhihao Jia", "Jinliang Wei", "Keval Vora", "R. Netravali", "Miryung Kim", "G. Xu" ], "externalIds": { "DBLP": "conf/osdi/ThorpeQETHJWVNK21", "ArXiv": "2105.11118", "CorpusId": 235167057 }, "url": "https://www.semanticscholar.org/paper/fb1a90bd9179e6d3f754b565847523a3dc775671", "referenceCount": 110, "citationCount": 117, "influentialCitationCount": 15, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FlexGraph: a flexible and efficient distributed framework for GNN training", "abstract": "Graph neural networks (GNNs) aim to learn a low-dimensional feature for each vertex in the graph from its input high-dimensional feature, by aggregating the features of the vertex's neighbors iteratively. This paper presents Flex-Graph, a distributed framework for training GNN models. FlexGraph is able to efficiently train GNN models with flexible definitions of neighborhood and hierarchical aggregation schemes, which are the two main characteristics associated with GNNs. In contrast, existing GNN frameworks are usually designed for GNNs having fixed definitions and aggregation schemes. They cannot support different kinds of GNN models well simultaneously. Underlying FlexGraph are a simple GNN programming abstraction called NAU and a compact data structure for modeling various aggregation operations. To achieve better performance, FlexGraph is equipped with a hybrid execution strategy to select proper and efficient operations according to different contexts during aggregating neighborhood features, an application-driven workload balancing strategy to balance GNN training workload and reduce synchronization overhead, and a pipeline processing strategy to overlap computations and communications. Using real-life datasets and GNN models GCN, PinSage and MAGNN, we verify that NAU makes FlexGraph more expressive than prior frameworks (e.g., DGL and Euler) which adopt GAS-like programming abstractions, e.g., it can handle MAGNN that is beyond the reach of DGL and Euler. The evaluation further shows that FlexGraph outperforms the state-of-the-art GNN frameworks such as DGL and Euler in training time by on average 8.5× on GCN and PinSage.", "year": 2021, "venue": "European Conference on Computer Systems", "authors": [ "Lei Wang", "Qiang Yin", "Chao Tian", "Jianbang Yang", "Rong Chen", "Wenyuan Yu", "Zihang Yao", "Jingren Zhou" ], "externalIds": { "DBLP": "conf/eurosys/WangY0YCYYZ21", "DOI": "10.1145/3447786.3456229", "CorpusId": 233328090 }, "url": "https://www.semanticscholar.org/paper/016e5d45c8f932e59d8d048bae357469cccf1064", "referenceCount": 46, "citationCount": 49, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Biased Graph Neural Network Sampler with Near-Optimal Regret", "abstract": "Graph neural networks (GNN) have recently emerged as a vehicle for applying deep network architectures to graph and relational data. However, given the increasing size of industrial datasets, in many practical situations the message passing computations required for sharing information across GNN layers are no longer scalable. Although various sampling methods have been introduced to approximate full-graph training within a tractable budget, there remain unresolved complications such as high variances and limited theoretical guarantees. To address these issues, we build upon existing work and treat GNN neighbor sampling as a multi-armed bandit problem but with a newly-designed reward function that introduces some degree of bias designed to reduce variance and avoid unstable, possibly-unbounded pay outs. And unlike prior bandit-GNN use cases, the resulting policy leads to near-optimal regret while accounting for the GNN training dynamics introduced by SGD. From a practical standpoint, this translates into lower variance estimates and competitive or superior test accuracy across several benchmarks.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Qingru Zhang", "D. Wipf", "Quan Gan", "Le Song" ], "externalIds": { "DBLP": "conf/nips/ZhangWGS21", "ArXiv": "2103.01089", "CorpusId": 232092233 }, "url": "https://www.semanticscholar.org/paper/f051766065c7865b7ac360c6fa5c25476e2c7fae", "referenceCount": 40, "citationCount": 21, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "GCNAX: A Flexible and Energy-efficient Accelerator for Graph Convolutional Neural Networks", "abstract": "Graph convolutional neural networks (GCNs) have emerged as an effective approach to extend deep learning for graph data analytics. Given that graphs are usually irregular, as nodes in a graph may have a varying number of neighbors, processing GCNs efficiently pose a significant challenge on the underlying hardware. Although specialized GCN accelerators have been proposed to deliver better performance over generic processors, prior accelerators not only under-utilize the compute engine, but also impose redundant data accesses that reduce throughput and energy efficiency. Therefore, optimizing the overall flow of data between compute engines and memory, i.e., the GCN dataflow, which maximizes utilization and minimizes data movement is crucial for achieving efficient GCN processing.In this paper, we propose a flexible and optimized dataflow for GCNs that simultaneously improves resource utilization and reduces data movement. This is realized by fully exploring the design space of GCN dataflows and evaluating the number of execution cycles and DRAM accesses through an analysis framework. Unlike prior GCN dataflows, which employ rigid loop orders and loop fusion strategies, the proposed dataflow can reconFigure the loop order and loop fusion strategy to adapt to different GCN configurations, which results in much improved efficiency. We then introduce a novel accelerator architecture called GCNAX, which tailors the compute engine, buffer structure and size based on the proposed dataflow. Evaluated on five real-world graph datasets, our simulation results show that GCNAX reduces DRAM accesses by a factor of $8.1 \\times$ and $2.4 \\times$, while achieving $8.9 \\times, 1.6 \\times$ speedup and $9.5 \\times$, $2.3 \\times$ energy savings on average over HyGCN and AWB-GCN, respectively.", "year": 2021, "venue": "International Symposium on High-Performance Computer Architecture", "authors": [ "Jiajun Li", "A. Louri", "Avinash Karanth", "Razvan C. Bunescu" ], "externalIds": { "MAG": "3157609068", "DBLP": "conf/hpca/LiLKB21a", "DOI": "10.1109/HPCA51647.2021.00070", "CorpusId": 233375962 }, "url": "https://www.semanticscholar.org/paper/61a0bc1c9ba2783b99cd64f74b58663a82ad2818", "referenceCount": 66, "citationCount": 84, "influentialCitationCount": 19, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "PaGraph: Scaling GNN training on large graphs via computation-aware caching", "abstract": "Emerging graph neural networks (GNNs) have extended the successes of deep learning techniques against datasets like images and texts to more complex graph-structured data. By leveraging GPU accelerators, existing frameworks combine both mini-batch and sampling for effective and efficient model training on large graphs. However, this setup faces a scalability issue since loading rich vertices features from CPU to GPU through a limited bandwidth link usually dominates the training cycle. In this paper, we propose PaGraph, a system that supports general and efficient sampling-based GNN training on single-server with multi-GPU. PaGraph significantly reduces the data loading time by exploiting available GPU resources to keep frequently accessed graph data with a cache. It also embodies a lightweight yet effective caching policy that takes into account graph structural information and data access patterns of sampling-based GNN training simultaneously. Furthermore, to scale out on multiple GPUs, PaGraph develops a fast GNN-computation-aware partition algorithm to avoid cross-partition access during data parallel training and achieves better cache efficiency. Evaluations on two representative GNN models, GCN and GraphSAGE, show that PaGraph achieves up to 96.8% data loading time reductions and up to 4.8X performance speedup over the state-of-the-art baselines. Together with preprocessing optimization, PaGraph further delivers up to 16.0X end-to-end speedup.", "year": 2020, "venue": "ACM Symposium on Cloud Computing", "authors": [ "Zhiqi Lin", "Cheng Li", "Youshan Miao", "Yunxin Liu", "Yinlong Xu" ], "externalIds": { "DBLP": "conf/cloud/LinLMLX20", "MAG": "3096566397", "DOI": "10.1145/3419111.3421281", "CorpusId": 222296394 }, "url": "https://www.semanticscholar.org/paper/c72f3f0ab953ed4e42b66b071471ce26dc0e4675", "referenceCount": 55, "citationCount": 130, "influentialCitationCount": 29, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DistDGL: Distributed Graph Neural Network Training for Billion-Scale Graphs", "abstract": "Graph neural networks (GNN) have shown great success in learning from graph-structured data. They are widely used in various applications, such as recommendation, fraud detection, and search. In these domains, the graphs are typically large, containing hundreds of millions of nodes and several billions of edges. To tackle this challenge, we develop DistDGL, a system for training GNNs in a mini-batch fashion on a cluster of machines. DistDGL is based on the Deep Graph Library (DGL), a popular GNN development framework. DistDGL distributes the graph and its associated data (initial features and embeddings) across the machines and uses this distribution to derive a computational decomposition by following an owner-compute rule. DistDGL follows a synchronous training approach and allows ego-networks forming the mini-batches to include non-local nodes. To minimize the overheads associated with distributed computations, DistDGL uses a high-quality and light-weight min-cut graph partitioning algorithm along with multiple balancing constraints. This allows it to reduce communication overheads and statically balance the computations. It further reduces the communication by replicating halo nodes and by using sparse embedding updates. The combination of these design choices allows DistDGL to train high-quality models while achieving high parallel efficiency and memory scalability. We demonstrate our optimizations on both inductive and transductive GNN models. Our results show that DistDGL achieves linear speedup without compromising model accuracy and requires only 13 seconds to complete a training epoch for a graph with 100 million nodes and 3 billion edges on a cluster with 16 machines.", "year": 2020, "venue": "Workshop on Irregular Applications: Architectures and Algorithms", "authors": [ "Da Zheng", "Chao Ma", "Minjie Wang", "Jinjing Zhou", "Qidong Su", "Xiang Song", "Quan Gan", "Zheng Zhang", "G. Karypis" ], "externalIds": { "DBLP": "conf/sc/Zheng0WZSSGZK20", "ArXiv": "2010.05337", "MAG": "3093166425", "DOI": "10.1109/IA351965.2020.00011", "CorpusId": 222291574 }, "url": "https://www.semanticscholar.org/paper/037df1500b9b8d4a57455b7ad205f86cc94a0b13", "referenceCount": 37, "citationCount": 208, "influentialCitationCount": 48, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "C-SAW: A Framework for Graph Sampling and Random Walk on GPUs", "abstract": "Many applications require to learn, mine, analyze and visualize large-scale graphs. These graphs are often too large to be addressed efficiently using conventional graph processing technologies. Fortunately, recent research efforts find out graph sampling and random walk, which significantly reduce the size of original graphs, can benefit the tasks of learning, mining, analyzing and visualizing large graphs by capturing the desirable graph properties. This paper introduces C-SAW, the first framework that accelerates Sampling and Random Walk framework on GPUs. Particularly, C-SAW makes three contributions: First, our framework provides a generic API which allows users to implement a wide range of sampling and random walk algorithms with ease. Second, offloading this framework on GPU, we introduce warp-centric parallel selection, and two novel optimizations for collision migration. Third, towards supporting graphs that exceed the GPU memory capacity, we introduce efficient data transfer optimizations for out-of-memory and multi-GPU sampling, such as workload-aware scheduling and batched multi-instance sampling. Taken together, our framework constantly outperforms the state of the art projects in addition to the capability of supporting a wide range of sampling and random walk algorithms.", "year": 2020, "venue": "International Conference for High Performance Computing, Networking, Storage and Analysis", "authors": [ "Santosh Pandey", "Lingda Li", "A. Hoisie", "X. Li", "Hang Liu" ], "externalIds": { "MAG": "3087202534", "ArXiv": "2009.09103", "DBLP": "journals/corr/abs-2009-09103", "DOI": "10.1109/SC41405.2020.00060", "CorpusId": 221818972 }, "url": "https://www.semanticscholar.org/paper/5496654b0e517cc0a3c37786011d651c86425e45", "referenceCount": 84, "citationCount": 47, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Accelerating graph sampling for graph machine learning using GPUs", "abstract": "Representation learning algorithms automatically learn the features of data. Several representation learning algorithms for graph data, such as DeepWalk, node2vec, and Graph-SAGE, sample the graph to produce mini-batches that are suitable for training a DNN. However, sampling time can be a significant fraction of training time, and existing systems do not efficiently parallelize sampling. Sampling is an \"embarrassingly parallel\" problem and may appear to lend itself to GPU acceleration, but the irregularity of graphs makes it hard to use GPU resources effectively. This paper presents NextDoor, a system designed to effectively perform graph sampling on GPUs. NextDoor employs a new approach to graph sampling that we call transit-parallelism, which allows load balancing and caching of edges. NextDoor provides end-users with a high-level abstraction for writing a variety of graph sampling algorithms. We implement several graph sampling applications, and show that NextDoor runs them orders of magnitude faster than existing systems.", "year": 2020, "venue": "European Conference on Computer Systems", "authors": [ "Abhinav Jangda", "Sandeep Polisetty", "Arjun Guha", "M. Serafini" ], "externalIds": { "DBLP": "conf/eurosys/JangdaPGS21", "ArXiv": "2009.06693", "DOI": "10.1145/3447786.3456244", "CorpusId": 233328073 }, "url": "https://www.semanticscholar.org/paper/528ff02f6d6505bd121107edd69ab9c777a53d7e", "referenceCount": 47, "citationCount": 64, "influentialCitationCount": 10, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "HAGERec: Hierarchical Attention Graph Convolutional Network Incorporating Knowledge Graph for Explainable Recommendation", "abstract": null, "year": 2020, "venue": "Knowledge-Based Systems", "authors": [ "Zuoxi Yang", "Shoubin Dong" ], "externalIds": { "MAG": "3040301022", "DBLP": "journals/kbs/YangD20", "DOI": "10.1016/j.knosys.2020.106194", "CorpusId": 224898605 }, "url": "https://www.semanticscholar.org/paper/c4cca20a45472e012e0babd5908ddbb74dc3d76d", "referenceCount": 33, "citationCount": 80, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Multi-behavior Recommendation with Graph Convolutional Networks", "abstract": "Traditional recommendation models that usually utilize only one type of user-item interaction are faced with serious data sparsity or cold start issues. Multi-behavior recommendation taking use of multiple types of user-item interactions, such as clicks and favorites, can serve as an effective solution. Early efforts towards multi-behavior recommendation fail to capture behaviors' different influence strength on target behavior. They also ignore behaviors' semantics which is implied in multi-behavior data. Both of these two limitations make the data not fully exploited for improving the recommendation performance on the target behavior. In this work, we approach this problem by innovatively constructing a unified graph to represent multi-behavior data and proposing a new model named MBGCN (short for Multi-Behavior Graph Convolutional Network ). Learning behavior strength by user-item propagation layer and capturing behavior semantics by item-item propagation layer, MBGCN can well address the limitations of existing works. Empirical results on two real-world datasets verify the effectiveness of our model in exploiting multi-behavior data. Our model outperforms the best baseline by 25.02% and 6.51% averagely on two datasets. Further studies on cold-start users confirm the practicability of our proposed model.", "year": 2020, "venue": "Annual International ACM SIGIR Conference on Research and Development in Information Retrieval", "authors": [ "Bowen Jin", "Chen Gao", "Xiangnan He", "Depeng Jin", "Yong Li" ], "externalIds": { "MAG": "3035287707", "DBLP": "conf/sigir/JinG0JL20", "DOI": "10.1145/3397271.3401072", "CorpusId": 220730113 }, "url": "https://www.semanticscholar.org/paper/b440396ba1a92513bb32f67c7ede2fa5556b3246", "referenceCount": 45, "citationCount": 262, "influentialCitationCount": 35, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Scaling Graph Neural Networks with Approximate PageRank", "abstract": "Graph neural networks (GNNs) have emerged as a powerful approach for solving many network mining tasks. However, learning on large graphs remains a challenge -- many recently proposed scalable GNN approaches rely on an expensive message-passing procedure to propagate information through the graph. We present the PPRGo model which utilizes an efficient approximation of information diffusion in GNNs resulting in significant speed gains while maintaining state-of-the-art prediction performance. In addition to being faster, PPRGo is inherently scalable, and can be trivially parallelized for large datasets like those found in industry settings. We demonstrate that PPRGo outperforms baselines in both distributed and single-machine training environments on a number of commonly used academic graphs. To better analyze the scalability of large-scale graph learning methods, we introduce a novel benchmark graph with 12.4 million nodes, 173 million edges, and 2.8 million node features. We show that training PPRGo from scratch and predicting labels for all nodes in this graph takes under 2 minutes on a single machine, far outpacing other baselines on the same graph. We discuss the practical application of PPRGo to solve large-scale node classification problems at Google.", "year": 2020, "venue": "Knowledge Discovery and Data Mining", "authors": [ "Aleksandar Bojchevski", "Johannes Klicpera", "Bryan Perozzi", "Amol Kapoor", "Martin J. Blais", "Benedek R'ozemberczki", "Michal Lukasik", "Stephan Gunnemann" ], "externalIds": { "DBLP": "journals/corr/abs-2007-01570", "ArXiv": "2007.01570", "MAG": "3039500550", "DOI": "10.1145/3394486.3403296", "CorpusId": 220347100 }, "url": "https://www.semanticscholar.org/paper/3da4626411d83c19c9919bb41dba94fff88da90e", "referenceCount": 58, "citationCount": 324, "influentialCitationCount": 33, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "GNNAdvisor: An Adaptive and Efficient Runtime System for GNN Acceleration on GPUs", "abstract": "As the emerging trend of graph-based deep learning, Graph Neural Networks (GNNs) excel for their capability to generate high-quality node feature vectors (embeddings). However, the existing one-size-fits-all GNN implementations are insufficient to catch up with the evolving GNN architectures, the ever-increasing graph sizes, and the diverse node embedding dimensionalities. To this end, we propose \\textbf{GNNAdvisor}, an adaptive and efficient runtime system to accelerate various GNN workloads on GPU platforms. First, GNNAdvisor explores and identifies several performance-relevant features from both the GNN model and the input graph, and uses them as a new driving force for GNN acceleration. Second, GNNAdvisor implements a novel and highly-efficient 2D workload management, tailored for GNN computation to improve GPU utilization and performance under different application settings. Third, GNNAdvisor capitalizes on the GPU memory hierarchy for acceleration by gracefully coordinating the execution of GNNs according to the characteristics of the GPU memory structure and GNN workloads. Furthermore, to enable automatic runtime optimization, GNNAdvisor incorporates a lightweight analytical model for an effective design parameter search. Extensive experiments show that GNNAdvisor outperforms the state-of-the-art GNN computing frameworks, such as Deep Graph Library ($3.02\\times$ faster on average) and NeuGraph (up to $4.10\\times$ faster), on mainstream GNN architectures across various datasets.", "year": 2020, "venue": "USENIX Symposium on Operating Systems Design and Implementation", "authors": [ "Yuke Wang", "Boyuan Feng", "Gushu Li", "Shuangchen Li", "Lei Deng", "Yuan Xie", "Yufei Ding" ], "externalIds": { "DBLP": "conf/osdi/WangFLL00D21", "ArXiv": "2006.06608", "CorpusId": 235490184 }, "url": "https://www.semanticscholar.org/paper/a72bbf818135b30ab24835e663bc8dcb7b8274ff", "referenceCount": 63, "citationCount": 100, "influentialCitationCount": 15, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Open Graph Benchmark: Datasets for Machine Learning on Graphs", "abstract": "We present the Open Graph Benchmark (OGB), a diverse set of challenging and realistic benchmark datasets to facilitate scalable, robust, and reproducible graph machine learning (ML) research. OGB datasets are large-scale (up to 100+ million nodes and 1+ billion edges), encompass multiple important graph ML tasks, and cover a diverse range of domains, ranging from social and information networks to biological networks, molecular graphs, source code ASTs, and knowledge graphs. For each dataset, we provide a unified evaluation protocol using meaningful application-specific data splits and evaluation metrics. In addition to building the datasets, we also perform extensive benchmark experiments for each dataset. Our experiments suggest that OGB datasets present significant challenges of scalability to large-scale graphs and out-of-distribution generalization under realistic data splits, indicating fruitful opportunities for future research. Finally, OGB provides an automated end-to-end graph ML pipeline that simplifies and standardizes the process of graph data loading, experimental setup, and model evaluation. OGB will be regularly updated and welcomes inputs from the community. OGB datasets as well as data loaders, evaluation scripts, baseline code, and leaderboards are publicly available at this https URL .", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Weihua Hu", "Matthias Fey", "M. Zitnik", "Yuxiao Dong", "Hongyu Ren", "Bowen Liu", "Michele Catasta", "J. Leskovec" ], "externalIds": { "MAG": "3021975806", "DBLP": "journals/corr/abs-2005-00687", "ArXiv": "2005.00687", "CorpusId": 218487328 }, "url": "https://www.semanticscholar.org/paper/597bd2e45427563cdf025e53a3239006aa364cfc", "referenceCount": 110, "citationCount": 2219, "influentialCitationCount": 467, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Joint 3D Tracking and Forecasting with Graph Neural Network and Diversity Sampling", "abstract": "3D multi-object tracking (MOT) and trajectory forecasting are two critical components in modern 3D perception systems that require accurate modeling of multi-agent interaction. We hypothesize that it is beneficial to unify both tasks under one framework in order to learn a shared feature representation of agent interaction. To evaluate this hypothesis, we propose a unified solution for 3D MOT and trajectory forecasting which also incorporates two additional novel computational units. First, we propose a feature interaction technique by introducing Graph Neural Networks (GNNs) to capture the way in which multiple agents interact with one another. The GNN is able to model complex hierarchical interactions, improve the discriminative feature learning for MOT association, and provide socially-aware context for trajectory forecasting. Second, we use a diversity sampling function to improve the quality and diversity of our forecasted trajectories. The learned sampling function is trained to efficiently extract a variety of outcomes from a generative trajectory distribution and helps avoid the problem of generating many duplicate trajectory samples. We evaluate on the KITTI and nuScenes datasets, showing that our unified method with feature interaction and diversity sampling achieves new state-of-the-art performance on both 3D MOT and trajectory forecasting. Our code will be made available at this https URL.", "year": 2020, "venue": "arXiv.org", "authors": [ "Xinshuo Weng", "Ye Yuan", "Kris M. Kitani" ], "externalIds": { "ArXiv": "2003.07847", "DBLP": "journals/corr/abs-2003-07847", "MAG": "3011353537", "CorpusId": 212737170 }, "url": "https://www.semanticscholar.org/paper/11b62510b08eb17a496f795b40ffb0e748a70b7f", "referenceCount": 82, "citationCount": 31, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Improving the Accuracy, Scalability, and Performance of Graph Neural Networks with Roc", "abstract": null, "year": 2020, "venue": "Conference on Machine Learning and Systems", "authors": [ "Zhihao Jia", "Sina Lin", "Mingyu Gao", "M. Zaharia", "A. Aiken" ], "externalIds": { "MAG": "3037699692", "DBLP": "conf/mlsys/JiaLGZA20", "CorpusId": 219850480 }, "url": "https://www.semanticscholar.org/paper/9d5dbe7230161db2d3573c3d5e7868dda42a6d3e", "referenceCount": 0, "citationCount": 209, "influentialCitationCount": 37, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "PyTorch: An Imperative Style, High-Performance Deep Learning Library", "abstract": "Deep learning frameworks have often focused on either usability or speed, but not both. PyTorch is a machine learning library that shows that these two goals are in fact compatible: it was designed from first principles to support an imperative and Pythonic programming style that supports code as a model, makes debugging easy and is consistent with other popular scientific computing libraries, while remaining efficient and supporting hardware accelerators such as GPUs. In this paper, we detail the principles that drove the implementation of PyTorch and how they are reflected in its architecture. We emphasize that every aspect of PyTorch is a regular Python program under the full control of its user. We also explain how the careful and pragmatic implementation of the key components of its runtime enables them to work together to achieve compelling performance. We demonstrate the efficiency of individual subsystems, as well as the overall speed of PyTorch on several commonly used benchmarks.", "year": 2019, "venue": "Neural Information Processing Systems", "authors": [ "Adam Paszke", "Sam Gross", "Francisco Massa", "Adam Lerer", "James Bradbury", "Gregory Chanan", "Trevor Killeen", "Zeming Lin", "N. Gimelshein", "L. Antiga", "Alban Desmaison", "Andreas Köpf", "E. Yang", "Zach DeVito", "Martin Raison", "Alykhan Tejani", "Sasank Chilamkurthy", "Benoit Steiner", "Lu Fang", "Junjie Bai", "Soumith Chintala" ], "externalIds": { "MAG": "2970971581", "DBLP": "journals/corr/abs-1912-01703", "ArXiv": "1912.01703", "CorpusId": 202786778 }, "url": "https://www.semanticscholar.org/paper/3c8a456509e6c0805354bd40a35e3f2dbf8069b1", "referenceCount": 39, "citationCount": 36158, "influentialCitationCount": 3694, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Layer-Dependent Importance Sampling for Training Deep and Large Graph Convolutional Networks", "abstract": "Graph convolutional networks (GCNs) have recently received wide attentions, due to their successful applications in different graph tasks and different domains. Training GCNs for a large graph, however, is still a challenge. Original full-batch GCN training requires calculating the representation of all the nodes in the graph per GCN layer, which brings in high computation and memory costs. To alleviate this issue, several sampling-based methods are proposed to train GCNs on a subset of nodes. Among them, the node-wise neighbor-sampling method recursively samples a fixed number of neighbor nodes, and thus its computation cost suffers from exponential growing neighbor size across layers; while the layer-wise importance-sampling method discards the neighbor-dependent constraints, and thus the nodes sampled across layer suffer from sparse connection problem. To deal with the above two problems, we propose a new effective sampling algorithm called LAyer-Dependent ImportancE Sampling (LADIES). Based on the sampled nodes in the upper layer, LADIES selects nodes that are in the neighborhood of these nodes and uses the constructed bipartite graph to compute the importance probability. Then, it samples a fixed number of nodes according to the probability for the whole layer, and recursively conducts such procedure per layer to construct the whole computation graph. We prove theoretically and experimentally, that our proposed sampling algorithm outperforms the previous sampling methods regarding both time and memory. Furthermore, LADIES is shown to have better generalization accuracy than original full-batch GCN, due to its stochastic nature.", "year": 2019, "venue": "Neural Information Processing Systems", "authors": [ "Difan Zou", "Ziniu Hu", "Yewen Wang", "Song Jiang", "Yizhou Sun", "Quanquan Gu" ], "externalIds": { "DBLP": "journals/corr/abs-1911-07323", "ArXiv": "1911.07323", "MAG": "2970090796", "CorpusId": 202779897 }, "url": "https://www.semanticscholar.org/paper/9dccd22276aaad5b6ec24e68bb7d29d954226a54", "referenceCount": 19, "citationCount": 244, "influentialCitationCount": 26, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Deep Graph Library: Towards Efficient and Scalable Deep Learning on Graphs", "abstract": "Accelerating research in the emerging field of deep graph learning requires new tools. Such systems should support graph as the core abstraction and take care to maintain both forward (i.e. supporting new research ideas) and backward (i.e. integration with existing components) compatibility. In this paper, we present Deep Graph Library (DGL). DGL enables arbitrary message handling and mutation operators, flexible propagation rules, and is framework agnostic so as to leverage high-performance tensor, autograd operations, and other feature extraction modules already available in existing frameworks. DGL carefully handles the sparse and irregular graph structure, deals with graphs big and small which may change dynamically, fuses operations, and performs auto-batching, all to take advantages of modern hardware. DGL has been tested on a variety of models, including but not limited to the popular Graph Neural Networks (GNN) and its variants, with promising speed, memory footprint and scalability.", "year": 2019, "venue": "arXiv.org", "authors": [ "Minjie Wang", "Lingfan Yu", "Da Zheng", "Quan Gan", "Yujie Gai", "Zihao Ye", "Mufei Li", "Jinjing Zhou", "Qi Huang", "Chao Ma", "Ziyue Huang", "Qipeng Guo", "Haotong Zhang", "Haibin Lin", "J. Zhao", "Jinyang Li", "Alex Smola", "Zheng Zhang" ], "externalIds": { "DBLP": "journals/corr/abs-1909-01315", "ArXiv": "1909.01315", "MAG": "2971933740", "CorpusId": 202539732 }, "url": "https://www.semanticscholar.org/paper/fd075bcdf2d7e13d23f7c249a8eded343d5bbe3b", "referenceCount": 34, "citationCount": 665, "influentialCitationCount": 99, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "GraphSAINT: Graph Sampling Based Inductive Learning Method", "abstract": "Graph Convolutional Networks (GCNs) are powerful models for learning representations of attributed this http URL scale GCNs to large graphs, state-of-the-art methods use various layer sampling techniques to alleviate the \"neighbor explosion\" problem during minibatch training. Here we proposeGraphSAINT, a graph sampling based inductive learning method that improves training efficiency in a fundamentally different way. By a change of perspective, GraphSAINT constructs minibatches by sampling the training graph, rather than the nodes or edges across GCN layers. Each iteration, a complete GCN is built from the properly sampled subgraph. Thus, we ensure fixed number of well-connected nodes in all layers. We further propose normalization technique to eliminate bias, and sampling algorithms for variance reduction. Importantly, we can decouple the sampling process from the forward and backward propagation of training, and extend GraphSAINT with other graph samplers and GCN variants. Comparing with strong baselines using layer sampling, GraphSAINT demonstrates superior performance in both accuracy and training time on four large graphs.", "year": 2019, "venue": "International Conference on Learning Representations", "authors": [ "Hanqing Zeng", "Hongkuan Zhou", "Ajitesh Srivastava", "R. Kannan", "V. Prasanna" ], "externalIds": { "MAG": "2961295589", "DBLP": "conf/iclr/ZengZSKP20", "ArXiv": "1907.04931", "CorpusId": 195886159 }, "url": "https://www.semanticscholar.org/paper/fc3e99ebc07b3014f6736a6a7b077edf2f1634c0", "referenceCount": 35, "citationCount": 842, "influentialCitationCount": 190, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "NeuGraph: Parallel Deep Neural Network Computation on Large Graphs", "abstract": "Recent deep learning models have moved beyond low dimensional regular grids such as image, video, and speech, to high-dimensional graph-structured data, such as social networks, e-commerce user-item graphs, and knowledge graphs. This evolution has led to large graph-based neural network models that go beyond what existing deep learning frameworks or graph computing systems are designed for. We present NeuGraph, a new framework that bridges the graph and dataflow models to support efficient and scalable parallel neural network computation on graphs. NeuGraph introduces graph computation optimizations into the management of data partitioning, scheduling, and parallelism in dataflow-based deep learning frameworks. Our evaluation shows that, on small graphs that can fit in a single GPU, NeuGraph outperforms state-of-the-art implementations by a significant margin, while scaling to large real-world graphs that none of the existing frameworks can handle directly with GPUs. (Please stay tuned for further updates.)", "year": 2019, "venue": "USENIX Annual Technical Conference", "authors": [ "Lingxiao Ma", "Zhi Yang", "Youshan Miao", "Jilong Xue", "Ming Wu", "Lidong Zhou", "Yafei Dai" ], "externalIds": { "MAG": "2951136539", "DBLP": "conf/usenix/MaYMXWZD19", "CorpusId": 196171782 }, "url": "https://www.semanticscholar.org/paper/87e0fc77548a6fde608e0835fa99880f8a7fa8d4", "referenceCount": 49, "citationCount": 219, "influentialCitationCount": 41, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Cluster-GCN: An Efficient Algorithm for Training Deep and Large Graph Convolutional Networks", "abstract": "Graph convolutional network (GCN) has been successfully applied to many graph-based applications; however, training a large-scale GCN remains challenging. Current SGD-based algorithms suffer from either a high computational cost that exponentially grows with number of GCN layers, or a large space requirement for keeping the entire graph and the embedding of each node in memory. In this paper, we propose Cluster-GCN, a novel GCN algorithm that is suitable for SGD-based training by exploiting the graph clustering structure. Cluster-GCN works as the following: at each step, it samples a block of nodes that associate with a dense subgraph identified by a graph clustering algorithm, and restricts the neighborhood search within this subgraph. This simple but effective strategy leads to significantly improved memory and computational efficiency while being able to achieve comparable test accuracy with previous algorithms. To test the scalability of our algorithm, we create a new Amazon2M data with 2 million nodes and 61 million edges which is more than 5 times larger than the previous largest publicly available dataset (Reddit). For training a 3-layer GCN on this data, Cluster-GCN is faster than the previous state-of-the-art VR-GCN (1523 seconds vs 1961 seconds) and using much less memory (2.2GB vs 11.2GB). Furthermore, for training 4 layer GCN on this data, our algorithm can finish in around 36 minutes while all the existing GCN training algorithms fail to train due to the out-of-memory issue. Furthermore, Cluster-GCN allows us to train much deeper GCN without much time and memory overhead, which leads to improved prediction accuracy---using a 5-layer Cluster-GCN, we achieve state-of-the-art test F1 score 99.36 on the PPI dataset, while the previous best result was 98.71 by~\\citezhang2018gaan.", "year": 2019, "venue": "Knowledge Discovery and Data Mining", "authors": [ "Wei-Lin Chiang", "Xuanqing Liu", "Si Si", "Yang Li", "Samy Bengio", "Cho-Jui Hsieh" ], "externalIds": { "DBLP": "conf/kdd/ChiangLSLBH19", "MAG": "2963468055", "ArXiv": "1905.07953", "DOI": "10.1145/3292500.3330925", "CorpusId": 159042192 }, "url": "https://www.semanticscholar.org/paper/05c4eb154ad9512a69569c18d68bc4428ee8bb83", "referenceCount": 17, "citationCount": 1093, "influentialCitationCount": 128, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "PyTorch-BigGraph: A Large-scale Graph Embedding System", "abstract": "Graph embedding methods produce unsupervised node features from graphs that can then be used for a variety of machine learning tasks. Modern graphs, particularly in industrial applications, contain billions of nodes and trillions of edges, which exceeds the capability of existing embedding systems. We present PyTorch-BigGraph (PBG), an embedding system that incorporates several modifications to traditional multi-relation embedding systems that allow it to scale to graphs with billions of nodes and trillions of edges. PBG uses graph partitioning to train arbitrarily large embeddings on either a single machine or in a distributed environment. We demonstrate comparable performance with existing embedding systems on common benchmarks, while allowing for scaling to arbitrarily large graphs and parallelization on multiple machines. We train and evaluate embeddings on several large social network graphs as well as the full Freebase dataset, which contains over 100 million nodes and 2 billion edges.", "year": 2019, "venue": "USENIX workshop on Tackling computer systems problems with machine learning techniques", "authors": [ "Adam Lerer", "Ledell Yu Wu", "Jiajun Shen", "Timothée Lacroix", "Luca Wehrstedt", "Abhijit Bose", "A. Peysakhovich" ], "externalIds": { "MAG": "2926442184", "DBLP": "conf/mlsys/LererWSLWBP19", "ArXiv": "1903.12287", "CorpusId": 88523916 }, "url": "https://www.semanticscholar.org/paper/7ac58400e5063bed9b7c35f87e44ddb917ccf357", "referenceCount": 45, "citationCount": 354, "influentialCitationCount": 56, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Fast Graph Representation Learning with PyTorch Geometric", "abstract": "We introduce PyTorch Geometric, a library for deep learning on irregularly structured input data such as graphs, point clouds and manifolds, built upon PyTorch. In addition to general graph data structures and processing methods, it contains a variety of recently published methods from the domains of relational learning and 3D data processing. PyTorch Geometric achieves high data throughput by leveraging sparse GPU acceleration, by providing dedicated CUDA kernels and by introducing efficient mini-batch handling for input examples of different size. In this work, we present the library in detail and perform a comprehensive comparative study of the implemented methods in homogeneous evaluation scenarios.", "year": 2019, "venue": "arXiv.org", "authors": [ "Matthias Fey", "J. E. Lenssen" ], "externalIds": { "MAG": "2918342466", "DBLP": "journals/corr/abs-1903-02428", "ArXiv": "1903.02428", "CorpusId": 70349949 }, "url": "https://www.semanticscholar.org/paper/63a513832f56addb67be81a2fa399b233f3030fc", "referenceCount": 51, "citationCount": 3663, "influentialCitationCount": 408, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "AliGraph: A Comprehensive Graph Neural Network Platform", "abstract": "An increasing number of machine learning tasks require dealing with large graph datasets, which capture rich and complex relation- ship among potentially billions of elements. Graph Neural Network (GNN) becomes an effective way to address the graph learning problem by converting the graph data into a low dimensional space while keeping both the structural and property information to the maximum extent and constructing a neural network for training and referencing. However, it is challenging to provide an efficient graph storage and computation capabilities to facilitate GNN training and enable development of new GNN algorithms. In this paper, we present a comprehensive graph neural network system, namely AliGraph, which consists of distributed graph storage, optimized sampling operators and runtime to efficiently support not only existing popular GNNs but also a series of in-house developed ones for different scenarios. The system is currently deployed at Alibaba to support a variety of business scenarios, including product recommendation and personalized search at Alibaba's E-Commerce platform. By conducting extensive experiments on a real-world dataset with 492.90 million vertices, 6.82 billion edges and rich attributes, Ali- Graph performs an order of magnitude faster in terms of graph building (5 minutes vs hours reported from the state-of-the-art PowerGraph platform). At training, AliGraph runs 40%-50% faster with the novel caching strategy and demonstrates around 12 times speed up with the improved runtime. In addition, our in-house developed GNN models all showcase their statistically significant superiorities in terms of both effectiveness and efficiency (e.g., 4.12% 17.19% lift by F1 scores).", "year": 2019, "venue": "Proceedings of the VLDB Endowment", "authors": [ "Rong Zhu", "Kun Zhao", "Hongxia Yang", "Wei Lin", "Chang Zhou", "Baole Ai", "Yong Li", "Jingren Zhou" ], "externalIds": { "MAG": "2964571482", "DBLP": "journals/pvldb/ZhuZYLZALZ19", "ArXiv": "1902.08730", "DOI": "10.14778/3352063.3352127", "CorpusId": 67855424 }, "url": "https://www.semanticscholar.org/paper/70f7cd54b5918aed60c3d8c3a8e4aa7e8d634c41", "referenceCount": 69, "citationCount": 345, "influentialCitationCount": 28, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Graph Neural Networks for Social Recommendation", "abstract": "In recent years, Graph Neural Networks (GNNs), which can naturally integrate node information and topological structure, have been demonstrated to be powerful in learning on graph data. These advantages of GNNs provide great potential to advance social recommendation since data in social recommender systems can be represented as user-user social graph and user-item graph; and learning latent factors of users and items is the key. However, building social recommender systems based on GNNs faces challenges. For example, the user-item graph encodes both interactions and their associated opinions; social relations have heterogeneous strengths; users involve in two graphs (e.g., the user-user social graph and the user-item graph). To address the three aforementioned challenges simultaneously, in this paper, we present a novel graph neural network framework (GraphRec) for social recommendations. In particular, we provide a principled approach to jointly capture interactions and opinions in the user-item graph and propose the framework GraphRec, which coherently models two graphs and heterogeneous strengths. Extensive experiments on two real-world datasets demonstrate the effectiveness of the proposed framework GraphRec.", "year": 2019, "venue": "The Web Conference", "authors": [ "Wenqi Fan", "Yao Ma", "Qing Li", "Yuan He", "Y. Zhao", "Jiliang Tang", "Dawei Yin" ], "externalIds": { "MAG": "2914721378", "DBLP": "journals/corr/abs-1902-07243", "ArXiv": "1902.07243", "DOI": "10.1145/3308558.3313488", "CorpusId": 67769538 }, "url": "https://www.semanticscholar.org/paper/398d6f4432e6aa7acf21c0bbaaebac48998faad3", "referenceCount": 46, "citationCount": 1571, "influentialCitationCount": 83, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "How Powerful are Graph Neural Networks?", "abstract": "Graph Neural Networks (GNNs) are an effective framework for representation learning of graphs. GNNs follow a neighborhood aggregation scheme, where the representation vector of a node is computed by recursively aggregating and transforming representation vectors of its neighboring nodes. Many GNN variants have been proposed and have achieved state-of-the-art results on both node and graph classification tasks. However, despite GNNs revolutionizing graph representation learning, there is limited understanding of their representational properties and limitations. Here, we present a theoretical framework for analyzing the expressive power of GNNs to capture different graph structures. Our results characterize the discriminative power of popular GNN variants, such as Graph Convolutional Networks and GraphSAGE, and show that they cannot learn to distinguish certain simple graph structures. We then develop a simple architecture that is provably the most expressive among the class of GNNs and is as powerful as the Weisfeiler-Lehman graph isomorphism test. We empirically validate our theoretical findings on a number of graph classification benchmarks, and demonstrate that our model achieves state-of-the-art performance.", "year": 2018, "venue": "International Conference on Learning Representations", "authors": [ "Keyulu Xu", "Weihua Hu", "J. Leskovec", "S. Jegelka" ], "externalIds": { "MAG": "2950468517", "ArXiv": "1810.00826", "DBLP": "journals/corr/abs-1810-00826", "CorpusId": 52895589 }, "url": "https://www.semanticscholar.org/paper/62ed9bf1d83c8db1f9cbf92ea2f57ea90ef683d9", "referenceCount": 45, "citationCount": 6421, "influentialCitationCount": 1375, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Adaptive Sampling Towards Fast Graph Representation Learning", "abstract": "Graph Convolutional Networks (GCNs) have become a crucial tool on learning representations of graph vertices. The main challenge of adapting GCNs on large-scale graphs is the scalability issue that it incurs heavy cost both in computation and memory due to the uncontrollable neighborhood expansion across layers. In this paper, we accelerate the training of GCNs through developing an adaptive layer-wise sampling method. By constructing the network layer by layer in a top-down passway, we sample the lower layer conditioned on the top one, where the sampled neighborhoods are shared by different parent nodes and the over expansion is avoided owing to the fixed-size sampling. More importantly, the proposed sampler is adaptive and applicable for explicit variance reduction, which in turn enhances the training of our method. Furthermore, we propose a novel and economical approach to promote the message passing over distant nodes by applying skip connections. Intensive experiments on several benchmarks verify the effectiveness of our method regarding the classification accuracy while enjoying faster convergence speed.", "year": 2018, "venue": "Neural Information Processing Systems", "authors": [ "Wen-bing Huang", "Tong Zhang", "Yu Rong", "Junzhou Huang" ], "externalIds": { "MAG": "2890703109", "DBLP": "journals/corr/abs-1809-05343", "ArXiv": "1809.05343", "CorpusId": 52279871 }, "url": "https://www.semanticscholar.org/paper/abfa95058fa50c55a0b923a6c35830f470c125ad", "referenceCount": 26, "citationCount": 456, "influentialCitationCount": 51, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Graph Convolutional Neural Networks for Web-Scale Recommender Systems", "abstract": "Recent advancements in deep neural networks for graph-structured data have led to state-of-the-art performance on recommender system benchmarks. However, making these methods practical and scalable to web-scale recommendation tasks with billions of items and hundreds of millions of users remains an unsolved challenge. Here we describe a large-scale deep recommendation engine that we developed and deployed at Pinterest. We develop a data-efficient Graph Convolutional Network (GCN) algorithm, which combines efficient random walks and graph convolutions to generate embeddings of nodes (i.e., items) that incorporate both graph structure as well as node feature information. Compared to prior GCN approaches, we develop a novel method based on highly efficient random walks to structure the convolutions and design a novel training strategy that relies on harder-and-harder training examples to improve robustness and convergence of the model. We also develop an efficient MapReduce model inference algorithm to generate embeddings using a trained model. Overall, we can train on and embed graphs that are four orders of magnitude larger than typical GCN implementations. We show how GCN embeddings can be used to make high-quality recommendations in various settings at Pinterest, which has a massive underlying graph with 3 billion nodes representing pins and boards, and 17 billion edges. According to offline metrics, user studies, as well as A/B tests, our approach generates higher-quality recommendations than comparable deep learning based systems. To our knowledge, this is by far the largest application of deep graph embeddings to date and paves the way for a new generation of web-scale recommender systems based on graph convolutional architectures.", "year": 2018, "venue": "Knowledge Discovery and Data Mining", "authors": [ "Rex Ying", "Ruining He", "Kaifeng Chen", "Pong Eksombatchai", "William L. Hamilton", "J. Leskovec" ], "externalIds": { "ArXiv": "1806.01973", "DBLP": "conf/kdd/YingHCEHL18", "MAG": "3100848837", "DOI": "10.1145/3219819.3219890", "CorpusId": 46949657 }, "url": "https://www.semanticscholar.org/paper/6c96c2d4a3fbd572fef2d59cb856521ee1746789", "referenceCount": 33, "citationCount": 3101, "influentialCitationCount": 196, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "FastGCN: Fast Learning with Graph Convolutional Networks via Importance Sampling", "abstract": "The graph convolutional networks (GCN) recently proposed by Kipf and Welling are an effective graph model for semi-supervised learning. This model, however, was originally designed to be learned with the presence of both training and test data. Moreover, the recursive neighborhood expansion across layers poses time and memory challenges for training with large, dense graphs. To relax the requirement of simultaneous availability of test data, we interpret graph convolutions as integral transforms of embedding functions under probability measures. Such an interpretation allows for the use of Monte Carlo approaches to consistently estimate the integrals, which in turn leads to a batched training scheme as we propose in this work---FastGCN. Enhanced with importance sampling, FastGCN not only is efficient for training but also generalizes well for inference. We show a comprehensive set of experiments to demonstrate its effectiveness compared with GCN and related models. In particular, training is orders of magnitude more efficient while predictions remain comparably accurate.", "year": 2018, "venue": "International Conference on Learning Representations", "authors": [ "Jie Chen", "Tengfei Ma", "Cao Xiao" ], "externalIds": { "DBLP": "conf/iclr/ChenMX18", "MAG": "2963695795", "ArXiv": "1801.10247", "CorpusId": 22191393 }, "url": "https://www.semanticscholar.org/paper/2503dff90685857ce7295e37d0045e2eef41c8b8", "referenceCount": 24, "citationCount": 1378, "influentialCitationCount": 166, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Graph Attention Networks", "abstract": "We present graph attention networks (GATs), novel neural network architectures that operate on graph-structured data, leveraging masked self-attentional layers to address the shortcomings of prior methods based on graph convolutions or their approximations. By stacking layers in which nodes are able to attend over their neighborhoods' features, we enable (implicitly) specifying different weights to different nodes in a neighborhood, without requiring any kind of costly matrix operation (such as inversion) or depending on knowing the graph structure upfront. In this way, we address several key challenges of spectral-based graph neural networks simultaneously, and make our model readily applicable to inductive as well as transductive problems. Our GAT models have achieved or matched state-of-the-art results across four established transductive and inductive graph benchmarks: the Cora, Citeseer and Pubmed citation network datasets, as well as a protein-protein interaction dataset (wherein test graphs remain unseen during training).", "year": 2017, "venue": "International Conference on Learning Representations", "authors": [ "Petar Velickovic", "Guillem Cucurull", "Arantxa Casanova", "Adriana Romero", "P. Lio’", "Yoshua Bengio" ], "externalIds": { "MAG": "2766453196", "ArXiv": "1710.10903", "DBLP": "journals/corr/abs-1710-10903", "DOI": "10.17863/CAM.48429", "CorpusId": 3292002 }, "url": "https://www.semanticscholar.org/paper/33998aff64ce51df8dee45989cdca4b6b1329ec4", "referenceCount": 45, "citationCount": 16828, "influentialCitationCount": 3007, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Stochastic Training of Graph Convolutional Networks with Variance Reduction", "abstract": "Graph convolutional networks (GCNs) are powerful deep neural networks for graph-structured data. However, GCN computes the representation of a node recursively from its neighbors, making the receptive field size grow exponentially with the number of layers. Previous attempts on reducing the receptive field size by subsampling neighbors do not have a convergence guarantee, and their receptive field size per node is still in the order of hundreds. In this paper, we develop control variate based algorithms which allow sampling an arbitrarily small neighbor size. Furthermore, we prove new theoretical guarantee for our algorithms to converge to a local optimum of GCN. Empirical results show that our algorithms enjoy a similar convergence with the exact algorithm using only two neighbors per node. The runtime of our algorithms on a large Reddit dataset is only one seventh of previous neighbor sampling algorithms.", "year": 2017, "venue": "International Conference on Machine Learning", "authors": [ "Jianfei Chen", "Jun Zhu", "Le Song" ], "externalIds": { "MAG": "2952636194", "DBLP": "conf/icml/ChenZS18", "CorpusId": 3636539 }, "url": "https://www.semanticscholar.org/paper/a60c69c2fae27ebbb73c87f7f2a4765556bd7f9f", "referenceCount": 25, "citationCount": 457, "influentialCitationCount": 67, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Inductive Representation Learning on Large Graphs", "abstract": "Low-dimensional embeddings of nodes in large graphs have proved extremely useful in a variety of prediction tasks, from content recommendation to identifying protein functions. However, most existing approaches require that all nodes in the graph are present during training of the embeddings; these previous approaches are inherently transductive and do not naturally generalize to unseen nodes. Here we present GraphSAGE, a general, inductive framework that leverages node feature information (e.g., text attributes) to efficiently generate node embeddings for previously unseen data. Instead of training individual embeddings for each node, we learn a function that generates embeddings by sampling and aggregating features from a node's local neighborhood. Our algorithm outperforms strong baselines on three inductive node-classification benchmarks: we classify the category of unseen nodes in evolving information graphs based on citation and Reddit post data, and we show that our algorithm generalizes to completely unseen graphs using a multi-graph dataset of protein-protein interactions.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "William L. Hamilton", "Z. Ying", "J. Leskovec" ], "externalIds": { "DBLP": "conf/nips/HamiltonYL17", "MAG": "2952779545", "ArXiv": "1706.02216", "CorpusId": 4755450 }, "url": "https://www.semanticscholar.org/paper/6b7d6e6416343b2a122f8416e69059ce919026ef", "referenceCount": 42, "citationCount": 12811, "influentialCitationCount": 2278, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Semi-Supervised Classification with Graph Convolutional Networks", "abstract": "We present a scalable approach for semi-supervised learning on graph-structured data that is based on an efficient variant of convolutional neural networks which operate directly on graphs. We motivate the choice of our convolutional architecture via a localized first-order approximation of spectral graph convolutions. Our model scales linearly in the number of graph edges and learns hidden layer representations that encode both local graph structure and features of nodes. In a number of experiments on citation networks and on a knowledge graph dataset we demonstrate that our approach outperforms related methods by a significant margin.", "year": 2016, "venue": "International Conference on Learning Representations", "authors": [ "Thomas Kipf", "M. Welling" ], "externalIds": { "ArXiv": "1609.02907", "MAG": "2519887557", "DBLP": "journals/corr/KipfW16", "CorpusId": 3144218 }, "url": "https://www.semanticscholar.org/paper/36eff562f65125511b5dfab68ce7f7a943c27478", "referenceCount": 38, "citationCount": 25291, "influentialCitationCount": 6216, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "DeepWalk: online learning of social representations", "abstract": "We present DeepWalk, a novel approach for learning latent representations of vertices in a network. These latent representations encode social relations in a continuous vector space, which is easily exploited by statistical models. DeepWalk generalizes recent advancements in language modeling and unsupervised feature learning (or deep learning) from sequences of words to graphs. DeepWalk uses local information obtained from truncated random walks to learn latent representations by treating walks as the equivalent of sentences. We demonstrate DeepWalk's latent representations on several multi-label network classification tasks for social networks such as BlogCatalog, Flickr, and YouTube. Our results show that DeepWalk outperforms challenging baselines which are allowed a global view of the network, especially in the presence of missing information. DeepWalk's representations can provide F1 scores up to 10% higher than competing methods when labeled data is sparse. In some experiments, DeepWalk's representations are able to outperform all baseline methods while using 60% less training data. DeepWalk is also scalable. It is an online learning algorithm which builds useful incremental results, and is trivially parallelizable. These qualities make it suitable for a broad class of real world applications such as network classification, and anomaly detection.", "year": 2014, "venue": "Knowledge Discovery and Data Mining", "authors": [ "Bryan Perozzi", "Rami Al-Rfou", "S. Skiena" ], "externalIds": { "DBLP": "conf/kdd/PerozziAS14", "MAG": "2154851992", "ArXiv": "1403.6652", "DOI": "10.1145/2623330.2623732", "CorpusId": 3051291 }, "url": "https://www.semanticscholar.org/paper/fff114cbba4f3ba900f33da574283e3de7f26c83", "referenceCount": 51, "citationCount": 8938, "influentialCitationCount": 1507, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "P3: Distributed deep graph learning at scale", "abstract": null, "year": 2021, "venue": "15th { USENIX } Symposium on Operating Systems Design and Implementation ( { OSDI } 21)", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "NVIDIA Corporation", "abstract": null, "year": null, "venue": "Cuda c++ programming guide", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "TabGraphs: A Benchmark and Strong Baselines for Learning on Graphs with Tabular Node Features": { "paper_title": "TabGraphs: A Benchmark and Strong Baselines for Learning on Graphs with Tabular Node Features", "arxiv_id": "2409.14500v2", "keyword": "graph neural network", "authors": [ "Gleb Bazhenov", "Oleg Platonov", "Liudmila Prokhorenkova" ], "references": [ { "title": "4DBInfer: A 4D Benchmarking Toolbox for Graph-Centric Predictive Modeling on Relational DBs", "abstract": "Although RDBs store vast amounts of rich, informative data spread across interconnected tables, the progress of predictive machine learning models as applied to such tasks arguably falls well behind advances in other domains such as computer vision or natural language processing. This deficit stems, at least in part, from the lack of established/public RDB benchmarks as needed for training and evaluation purposes. As a result, related model development thus far often defaults to tabular approaches trained on ubiquitous single-table benchmarks, or on the relational side, graph-based alternatives such as GNNs applied to a completely different set of graph datasets devoid of tabular characteristics. To more precisely target RDBs lying at the nexus of these two complementary regimes, we explore a broad class of baseline models predicated on: (i) converting multi-table datasets into graphs using various strategies equipped with efficient subsampling, while preserving tabular characteristics; and (ii) trainable models with well-matched inductive biases that output predictions based on these input subgraphs. Then, to address the dearth of suitable public benchmarks and reduce siloed comparisons, we assemble a diverse collection of (i) large-scale RDB datasets and (ii) coincident predictive tasks. From a delivery standpoint, we operationalize the above four dimensions (4D) of exploration within a unified, scalable open-source toolbox called 4DBInfer. We conclude by presenting evaluations using 4DBInfer, the results of which highlight the importance of considering each such dimension in the design of RDB predictive models, as well as the limitations of more naive approaches such as simply joining adjacent tables. Our source code is released at https://github.com/awslabs/multi-table-benchmark .", "year": 2024, "venue": "arXiv.org", "authors": [ "Minjie Wang", "Quan Gan", "David Wipf", "Zhenkun Cai", "Ning Li", "Jianheng Tang", "Yanlin Zhang", "Zizhao Zhang", "Zunyao Mao", "Yakun Song", "Yanbo Wang", "Jiahang Li", "Han Zhang", "Guang Yang", "Xiao Qin", "Chuan Lei", "Mu-Nan Zhang", "Weinan Zhang", "Christos Faloutsos", "Zheng Zhang" ], "externalIds": { "ArXiv": "2404.18209", "DBLP": "journals/corr/abs-2404-18209", "DOI": "10.48550/arXiv.2404.18209", "CorpusId": 269449748 }, "url": "https://www.semanticscholar.org/paper/cd4af2a6fdfc85b9fb514989a41551186cf303dc", "referenceCount": 74, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "TuneTables: Context Optimization for Scalable Prior-Data Fitted Networks", "abstract": "While tabular classification has traditionally relied on from-scratch training, a recent breakthrough called prior-data fitted networks (PFNs) challenges this approach. Similar to large language models, PFNs make use of pretraining and in-context learning to achieve strong performance on new tasks in a single forward pass. However, current PFNs have limitations that prohibit their widespread adoption. Notably, TabPFN achieves very strong performance on small tabular datasets but is not designed to make predictions for datasets of size larger than 1000. In this work, we overcome these limitations and substantially improve the performance of PFNs by developing context optimization techniques for PFNs. Specifically, we propose TuneTables, a novel prompt-tuning strategy that compresses large datasets into a smaller learned context. TuneTables scales TabPFN to be competitive with state-of-the-art tabular classification methods on larger datasets, while having a substantially lower inference time than TabPFN. Furthermore, we show that TuneTables can be used as an interpretability tool and can even be used to mitigate biases by optimizing a fairness objective.", "year": 2024, "venue": "arXiv.org", "authors": [ "Ben Feuer", "R. Schirrmeister", "Valeriia Cherepanova", "Chinmay Hegde", "Frank Hutter", "Micah Goldblum", "Niv Cohen", "Colin White" ], "externalIds": { "DBLP": "journals/corr/abs-2402-11137", "ArXiv": "2402.11137", "DOI": "10.48550/arXiv.2402.11137", "CorpusId": 267751225 }, "url": "https://www.semanticscholar.org/paper/22be54a496344470e9379761aeed66115d218979", "referenceCount": 71, "citationCount": 5, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Relational Deep Learning: Graph Representation Learning on Relational Databases", "abstract": "Much of the world's most valued data is stored in relational databases and data warehouses, where the data is organized into many tables connected by primary-foreign key relations. However, building machine learning models using this data is both challenging and time consuming. The core problem is that no machine learning method is capable of learning on multiple tables interconnected by primary-foreign key relations. Current methods can only learn from a single table, so the data must first be manually joined and aggregated into a single training table, the process known as feature engineering. Feature engineering is slow, error prone and leads to suboptimal models. Here we introduce an end-to-end deep representation learning approach to directly learn on data laid out across multiple tables. We name our approach Relational Deep Learning (RDL). The core idea is to view relational databases as a temporal, heterogeneous graph, with a node for each row in each table, and edges specified by primary-foreign key links. Message Passing Graph Neural Networks can then automatically learn across the graph to extract representations that leverage all input data, without any manual feature engineering. Relational Deep Learning leads to more accurate models that can be built much faster. To facilitate research in this area, we develop RelBench, a set of benchmark datasets and an implementation of Relational Deep Learning. The data covers a wide spectrum, from discussions on Stack Exchange to book reviews on the Amazon Product Catalog. Overall, we define a new research area that generalizes graph machine learning and broadens its applicability to a wide set of AI use cases.", "year": 2023, "venue": "arXiv.org", "authors": [ "Matthias Fey", "Weihua Hu", "Kexin Huang", "J. E. Lenssen", "Rishabh Ranjan", "Joshua Robinson", "Rex Ying", "Jiaxuan You", "J. Leskovec" ], "externalIds": { "ArXiv": "2312.04615", "DBLP": "journals/corr/abs-2312-04615", "DOI": "10.48550/arXiv.2312.04615", "CorpusId": 265502702 }, "url": "https://www.semanticscholar.org/paper/17a6213e30895fb7e5d0b368236ea8beeb94ee86", "referenceCount": 46, "citationCount": 15, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "GFS: Graph-based Feature Synthesis for Prediction over Relational Databases", "abstract": "Relational databases are extensively utilized in a variety of modern information system applications, and they always carry valuable data patterns. There are a huge number of data mining or machine learning tasks conducted on relational databases. However, it is worth noting that there are limited machine learning models specifically designed for relational databases, as most models are primarily tailored for single table settings. Consequently, the prevalent approach for training machine learning models on data stored in relational databases involves performing feature engineering to merge the data from multiple tables into a single table and subsequently applying single table models. This approach not only requires significant effort in feature engineering but also destroys the inherent relational structure present in the data. To address these challenges, we propose a novel framework called Graph-based Feature Synthesis (GFS). GFS formulates the relational database as a heterogeneous graph, thereby preserving the relational structure within the data. By leveraging the inductive bias from single table models, GFS effectively captures the intricate relationships inherent in each table. Additionally, the whole framework eliminates the need for manual feature engineering. In the extensive experiment over four real-world multi-table relational databases, GFS outperforms previous methods designed for relational databases, demonstrating its superior performance.", "year": 2023, "venue": "arXiv.org", "authors": [ "Han Zhang", "Quan Gan", "David Wipf", "Weinan Zhang" ], "externalIds": { "ArXiv": "2312.02037", "DBLP": "journals/corr/abs-2312-02037", "DOI": "10.48550/arXiv.2312.02037", "CorpusId": 265609360 }, "url": "https://www.semanticscholar.org/paper/6e1c9dd92d74bf6f44924b0a153f46b99f4fc344", "referenceCount": 48, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "SPARE: A Single-Pass Neural Model for Relational Databases", "abstract": "While there has been extensive work on deep neural networks for images and text, deep learning for relational databases (RDBs) is still a rather unexplored field. One direction that recently gained traction is to apply Graph Neural Networks (GNNs) to RBDs. However, training GNNs on large relational databases (i.e., data stored in multiple database tables) is rather inefficient due to multiple rounds of training and potentially large and inefficient representations. Hence, in this paper we propose SPARE (Single-Pass Relational models), a new class of neural models that can be trained efficiently on RDBs while providing similar accuracies as GNNs. For enabling efficient training, different from GNNs, SPARE makes use of the fact that data in RDBs has a regular structure, which allows one to train these models in a single pass while exploiting symmetries at the same time. Our extensive empirical evaluation demonstrates that SPARE can significantly speedup both training and inference while offering competitive predictive performance over numerous baselines.", "year": 2023, "venue": "arXiv.org", "authors": [ "Benjamin Hilprecht", "Kristian Kersting", "Carsten Binnig" ], "externalIds": { "ArXiv": "2310.13581", "DBLP": "journals/corr/abs-2310-13581", "DOI": "10.48550/arXiv.2310.13581", "CorpusId": 264406080 }, "url": "https://www.semanticscholar.org/paper/0a8f0f385dd916362be2b5c7eba8d23d1a39c17e", "referenceCount": 7, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Trompt: Towards a Better Deep Neural Network for Tabular Data", "abstract": "Tabular data is arguably one of the most commonly used data structures in various practical domains, including finance, healthcare and e-commerce. The inherent heterogeneity allows tabular data to store rich information. However, based on a recently published tabular benchmark, we can see deep neural networks still fall behind tree-based models on tabular datasets. In this paper, we propose Trompt--which stands for Tabular Prompt--a novel architecture inspired by prompt learning of language models. The essence of prompt learning is to adjust a large pre-trained model through a set of prompts outside the model without directly modifying the model. Based on this idea, Trompt separates the learning strategy of tabular data into two parts. The first part, analogous to pre-trained models, focus on learning the intrinsic information of a table. The second part, analogous to prompts, focus on learning the variations among samples. Trompt is evaluated with the benchmark mentioned above. The experimental results demonstrate that Trompt outperforms state-of-the-art deep neural networks and is comparable to tree-based models.", "year": 2023, "venue": "International Conference on Machine Learning", "authors": [ "Kuan-Yu Chen", "Ping-Han Chiang", "Hsin-Rung Chou", "Tingwei Chen", "Tien-Hao Chang" ], "externalIds": { "DBLP": "journals/corr/abs-2305-18446", "ArXiv": "2305.18446", "DOI": "10.48550/arXiv.2305.18446", "CorpusId": 258968005 }, "url": "https://www.semanticscholar.org/paper/bc9550cbc88816383a9c56f4e54b4d641bea924e", "referenceCount": 40, "citationCount": 12, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "TabGSL: Graph Structure Learning for Tabular Data Prediction", "abstract": "This work presents a novel approach to tabular data prediction leveraging graph structure learning and graph neural networks. Despite the prevalence of tabular data in real-world applications, traditional deep learning methods often overlook the potentially valuable associations between data instances. Such associations can offer beneficial insights for classification tasks, as instances may exhibit similar patterns of correlations among features and target labels. This information can be exploited by graph neural networks, necessitating robust graph structures. However, existing studies primarily focus on improving graph structure from noisy data, largely neglecting the possibility of deriving graph structures from tabular data. We present a novel solution, Tabular Graph Structure Learning (TabGSL), to enhance tabular data prediction by simultaneously learning instance correlation and feature interaction within a unified framework. This is achieved through a proposed graph contrastive learning module, along with transformer-based feature extractor and graph neural network. Comprehensive experiments conducted on 30 benchmark tabular datasets demonstrate that TabGSL markedly outperforms both tree-based models and recent deep learning-based tabular models. Visualizations of the learned instance embeddings further substantiate the effectiveness of TabGSL.", "year": 2023, "venue": "arXiv.org", "authors": [ "Jay Chiehen Liao", "Cheng Li" ], "externalIds": { "ArXiv": "2305.15843", "DBLP": "journals/corr/abs-2305-15843", "DOI": "10.48550/arXiv.2305.15843", "CorpusId": 258887527 }, "url": "https://www.semanticscholar.org/paper/b775e8ce595a5ca7a5263fdadde071087e655c5b", "referenceCount": 63, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Towards Foundation Models for Relational Databases [Vision Paper]", "abstract": "Tabular representation learning has recently gained a lot of attention. However, existing approaches only learn a representation from a single table, and thus ignore the potential to learn from the full structure of relational databases, including neighboring tables that can contain important information for a contextualized representation. Moreover, current models are significantly limited in scale, which prevents that they learn from large databases. In this paper, we thus introduce our vision of relational representation learning, that can not only learn from the full relational structure, but also can scale to larger database sizes that are commonly found in real-world. Moreover, we also discuss opportunities and challenges we see along the way to enable this vision and present initial very promising results. Overall, we argue that this direction can lead to foundation models for relational databases that are today only available for text and images.", "year": 2023, "venue": "arXiv.org", "authors": [ "Liane Vogel", "Benjamin Hilprecht", "Carsten Binnig" ], "externalIds": { "DBLP": "journals/corr/abs-2305-15321", "ArXiv": "2305.15321", "DOI": "10.48550/arXiv.2305.15321", "CorpusId": 253180413 }, "url": "https://www.semanticscholar.org/paper/e3154ffced7e2279f42b6a13c00f621f6d41c213", "referenceCount": 26, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "When Do Neural Nets Outperform Boosted Trees on Tabular Data?", "abstract": "Tabular data is one of the most commonly used types of data in machine learning. Despite recent advances in neural nets (NNs) for tabular data, there is still an active discussion on whether or not NNs generally outperform gradient-boosted decision trees (GBDTs) on tabular data, with several recent works arguing either that GBDTs consistently outperform NNs on tabular data, or vice versa. In this work, we take a step back and question the importance of this debate. To this end, we conduct the largest tabular data analysis to date, comparing 19 algorithms across 176 datasets, and we find that the 'NN vs. GBDT' debate is overemphasized: for a surprisingly high number of datasets, either the performance difference between GBDTs and NNs is negligible, or light hyperparameter tuning on a GBDT is more important than choosing between NNs and GBDTs. A remarkable exception is the recently-proposed prior-data fitted network, TabPFN: although it is effectively limited to training sets of size 3000, we find that it outperforms all other algorithms on average, even when randomly sampling 3000 training datapoints. Next, we analyze dozens of metafeatures to determine what properties of a dataset make NNs or GBDTs better-suited to perform well. For example, we find that GBDTs are much better than NNs at handling skewed or heavy-tailed feature distributions and other forms of dataset irregularities. Our insights act as a guide for practitioners to determine which techniques may work best on their dataset. Finally, with the goal of accelerating tabular data research, we release the TabZilla Benchmark Suite: a collection of the 36 'hardest' of the datasets we study. Our benchmark suite, codebase, and all raw results are available at https://github.com/naszilla/tabzilla.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Duncan C. McElfresh", "Sujay Khandagale", "Jonathan Valverde", "C. VishakPrasad", "Ben Feuer", "Chinmay Hegde", "Ganesh Ramakrishnan", "Micah Goldblum", "Colin White" ], "externalIds": { "ArXiv": "2305.02997", "DBLP": "conf/nips/McElfreshKVCRGW23", "DOI": "10.48550/arXiv.2305.02997", "CorpusId": 258479721 }, "url": "https://www.semanticscholar.org/paper/5e4125b3a2ec91e866d970498f8a138c5a5cc89b", "referenceCount": 73, "citationCount": 72, "influentialCitationCount": 14, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "A critical look at the evaluation of GNNs under heterophily: are we really making progress?", "abstract": "Node classification is a classical graph machine learning task on which Graph Neural Networks (GNNs) have recently achieved strong results. However, it is often believed that standard GNNs only work well for homophilous graphs, i.e., graphs where edges tend to connect nodes of the same class. Graphs without this property are called heterophilous, and it is typically assumed that specialized methods are required to achieve strong performance on such graphs. In this work, we challenge this assumption. First, we show that the standard datasets used for evaluating heterophily-specific models have serious drawbacks, making results obtained by using them unreliable. The most significant of these drawbacks is the presence of a large number of duplicate nodes in the datasets Squirrel and Chameleon, which leads to train-test data leakage. We show that removing duplicate nodes strongly affects GNN performance on these datasets. Then, we propose a set of heterophilous graphs of varying properties that we believe can serve as a better benchmark for evaluating the performance of GNNs under heterophily. We show that standard GNNs achieve strong results on these heterophilous graphs, almost always outperforming specialized models. Our datasets and the code for reproducing our experiments are available at https://github.com/yandex-research/heterophilous-graphs", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Oleg Platonov", "Denis Kuznedelev", "Michael Diskin", "Artem Babenko", "L. Prokhorenkova" ], "externalIds": { "DBLP": "conf/iclr/PlatonovKDBP23", "ArXiv": "2302.11640", "DOI": "10.48550/arXiv.2302.11640", "CorpusId": 257102689 }, "url": "https://www.semanticscholar.org/paper/6ef892cd47300c56a72ff67bc7b87b43b3654e16", "referenceCount": 52, "citationCount": 116, "influentialCitationCount": 35, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Characterizing Graph Datasets for Node Classification: Homophily-Heterophily Dichotomy and Beyond", "abstract": "Homophily is a graph property describing the tendency of edges to connect similar nodes; the opposite is called heterophily. It is often believed that heterophilous graphs are challenging for standard message-passing graph neural networks (GNNs), and much effort has been put into developing efficient methods for this setting. However, there is no universally agreed-upon measure of homophily in the literature. In this work, we show that commonly used homophily measures have critical drawbacks preventing the comparison of homophily levels across different datasets. For this, we formalize desirable properties for a proper homophily measure and verify which measures satisfy which properties. In particular, we show that a measure that we call adjusted homophily satisfies more desirable properties than other popular homophily measures while being rarely used in graph learning literature. Then, we go beyond the homophily-heterophily dichotomy and propose a new characteristic allowing one to further distinguish different sorts of heterophily. The proposed label informativeness (LI) characterizes how much information a neighbor's label provides about a node's label. We analyze LI via the same theoretical framework and show that it is comparable across different datasets. We also observe empirically that LI better agrees with GNN performance compared to homophily measures, which confirms that it is a useful characteristic of the graph structure.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Oleg Platonov", "Denis Kuznedelev", "Artem Babenko", "L. Prokhorenkova" ], "externalIds": { "ArXiv": "2209.06177", "DBLP": "conf/nips/PlatonovKBP23", "CorpusId": 256808725 }, "url": "https://www.semanticscholar.org/paper/0be89c19da818232684355a558fed214b0ee0f4b", "referenceCount": 52, "citationCount": 21, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "TabPFN: A Transformer That Solves Small Tabular Classification Problems in a Second", "abstract": "We present TabPFN, a trained Transformer that can do supervised classification for small tabular datasets in less than a second, needs no hyperparameter tuning and is competitive with state-of-the-art classification methods. TabPFN performs in-context learning (ICL), it learns to make predictions using sequences of labeled examples (x, f(x)) given in the input, without requiring further parameter updates. TabPFN is fully entailed in the weights of our network, which accepts training and test samples as a set-valued input and yields predictions for the entire test set in a single forward pass. TabPFN is a Prior-Data Fitted Network (PFN) and is trained offline once, to approximate Bayesian inference on synthetic datasets drawn from our prior. This prior incorporates ideas from causal reasoning: It entails a large space of structural causal models with a preference for simple structures. On the 18 datasets in the OpenML-CC18 suite that contain up to 1 000 training data points, up to 100 purely numerical features without missing values, and up to 10 classes, we show that our method clearly outperforms boosted trees and performs on par with complex state-of-the-art AutoML systems with up to 230$\\times$ speedup. This increases to a 5 700$\\times$ speedup when using a GPU. We also validate these results on an additional 67 small numerical datasets from OpenML. We provide all our code, the trained TabPFN, an interactive browser demo and a Colab notebook at https://github.com/automl/TabPFN.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Noah Hollmann", "Samuel G. Müller", "Katharina Eggensperger", "F. Hutter" ], "externalIds": { "ArXiv": "2207.01848", "DBLP": "conf/iclr/Hollmann0EH23", "CorpusId": 252683429 }, "url": "https://www.semanticscholar.org/paper/4c4f0fcf1ce04f12290d8c876abfbe57817de430", "referenceCount": 72, "citationCount": 154, "influentialCitationCount": 38, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "On Embeddings for Numerical Features in Tabular Deep Learning", "abstract": "Recently, Transformer-like deep architectures have shown strong performance on tabular data problems. Unlike traditional models, e.g., MLP, these architectures map scalar values of numerical features to high-dimensional embeddings before mixing them in the main backbone. In this work, we argue that embeddings for numerical features are an underexplored degree of freedom in tabular DL, which allows constructing more powerful DL models and competing with GBDT on some traditionally GBDT-friendly benchmarks. We start by describing two conceptually different approaches to building embedding modules: the first one is based on a piecewise linear encoding of scalar values, and the second one utilizes periodic activations. Then, we empirically demonstrate that these two approaches can lead to significant performance boosts compared to the embeddings based on conventional blocks such as linear layers and ReLU activations. Importantly, we also show that embedding numerical features is beneficial for many backbones, not only for Transformers. Specifically, after proper embeddings, simple MLP-like models can perform on par with the attention-based architectures. Overall, we highlight embeddings for numerical features as an important design aspect with good potential for further improvements in tabular DL.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Yura Gorishniy", "Ivan Rubachev", "Artem Babenko" ], "externalIds": { "ArXiv": "2203.05556", "DBLP": "journals/corr/abs-2203-05556", "DOI": "10.48550/arXiv.2203.05556", "CorpusId": 247362943 }, "url": "https://www.semanticscholar.org/paper/1057c047156594c8494b7e5f1f747d9f15dd1162", "referenceCount": 56, "citationCount": 102, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Transformers Can Do Bayesian Inference", "abstract": "Currently, it is hard to reap the benefits of deep learning for Bayesian methods, which allow the explicit specification of prior knowledge and accurately capture model uncertainty. We present Prior-Data Fitted Networks (PFNs). PFNs leverage in-context learning in large-scale machine learning techniques to approximate a large set of posteriors. The only requirement for PFNs to work is the ability to sample from a prior distribution over supervised learning tasks (or functions). Our method restates the objective of posterior approximation as a supervised classification problem with a set-valued input: it repeatedly draws a task (or function) from the prior, draws a set of data points and their labels from it, masks one of the labels and learns to make probabilistic predictions for it based on the set-valued input of the rest of the data points. Presented with a set of samples from a new supervised learning task as input, PFNs make probabilistic predictions for arbitrary other data points in a single forward propagation, having learned to approximate Bayesian inference. We demonstrate that PFNs can near-perfectly mimic Gaussian processes and also enable efficient Bayesian inference for intractable problems, with over 200-fold speedups in multiple setups compared to current methods. We obtain strong results in very diverse areas such as Gaussian process regression, Bayesian neural networks, classification for small tabular data sets, and few-shot image classification, demonstrating the generality of PFNs. Code and trained PFNs are released at https://github.com/automl/TransformersCanDoBayesianInference.", "year": 2021, "venue": "International Conference on Learning Representations", "authors": [ "Samuel Muller", "Noah Hollmann", "Sebastian Pineda Arango", "Josif Grabocka", "F. Hutter" ], "externalIds": { "DBLP": "conf/iclr/0005HPGH22", "ArXiv": "2112.10510", "CorpusId": 245334722 }, "url": "https://www.semanticscholar.org/paper/d88a5ae1673f2009704186acf2890163e6ddf4ca", "referenceCount": 68, "citationCount": 94, "influentialCitationCount": 11, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Does your graph need a confidence boost? Convergent boosted smoothing on graphs with tabular node features", "abstract": "For supervised learning with tabular data, decision tree ensembles produced via boosting techniques generally dominate real-world applications involving iid training/test sets. However for graph data where the iid assumption is violated due to structured relations between samples, it remains unclear how to best incorporate this structure within existing boosting pipelines. To this end, we propose a generalized framework for iterating boosting with graph propagation steps that share node/sample information across edges connecting related samples. Unlike previous efforts to integrate graph-based models with boosting, our approach is anchored in a principled meta loss function such that provable convergence can be guaranteed under relatively mild assumptions. Across a variety of non-iid graph datasets with tabular node features, our method achieves comparable or superior performance than both tabular and graph neural network models, as well as existing hybrid strategies that combine the two. Beyond producing better predictive performance than recently proposed graph models, our proposed techniques are easy to implement, computationally more efficient, and enjoy stronger theoretical guarantees (which make our results more reproducible).", "year": 2021, "venue": "International Conference on Learning Representations", "authors": [ "Jiuhai Chen", "Jonas W. Mueller", "V. Ioannidis", "Soji Adeshina", "Yangkun Wang", "T. Goldstein", "D. Wipf" ], "externalIds": { "ArXiv": "2110.13413", "DBLP": "conf/iclr/ChenMIAWGW22", "CorpusId": 251649129 }, "url": "https://www.semanticscholar.org/paper/80e9b6a7e9b58be3429cacabf0db0be8fe7fd379", "referenceCount": 48, "citationCount": 10, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Retrieval & Interaction Machine for Tabular Data Prediction", "abstract": "Prediction over tabular data is an essential task in many data science applications such as recommender systems, online advertising, medical treatment, etc. Tabular data is structured into rows and columns, with each row as a data sample and each column as a feature attribute. Both the columns and rows of the tabular data carry useful patterns that could improve the model prediction performance. However, most existing models focus on the cross-column patterns yet overlook the cross-rowpatterns as they deal with single samples independently. In this work, we propose a general learning framework named Retrieval & Interaction Machine (RIM) that fully exploits both cross-row and cross-column patterns among tabular data. Specifically, RIM first leverages search engine techniques to efficiently retrieve useful rows of the table to assist the label prediction of the target row, then uses feature interaction networks to capture the cross-column patterns among the target row and the retrieved rows so as to make the final label prediction. We conduct extensive experiments on 11 datasets of three important tasks, i.e., CTR prediction (classification), top-n recommendation (ranking) and rating prediction (regression). Experimental results show that RIM achieves significant improvements over the state-of-the-art and various baselines, demonstrating the superiority and efficacy of RIM.", "year": 2021, "venue": "Knowledge Discovery and Data Mining", "authors": [ "Jiarui Qin", "Weinan Zhang", "Rong Su", "Zhirong Liu", "Weiwen Liu", "Ruiming Tang", "Xiuqiang He", "Yong Yu" ], "externalIds": { "DBLP": "journals/corr/abs-2108-05252", "ArXiv": "2108.05252", "DOI": "10.1145/3447548.3467216", "CorpusId": 236976069 }, "url": "https://www.semanticscholar.org/paper/529c0c3a9f4ec801739de6946e8a9ee7e5c5ed74", "referenceCount": 41, "citationCount": 39, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Revisiting Deep Learning Models for Tabular Data", "abstract": "The existing literature on deep learning for tabular data proposes a wide range of novel architectures and reports competitive results on various datasets. However, the proposed models are usually not properly compared to each other and existing works often use different benchmarks and experiment protocols. As a result, it is unclear for both researchers and practitioners what models perform best. Additionally, the field still lacks effective baselines, that is, the easy-to-use models that provide competitive performance across different problems. In this work, we perform an overview of the main families of DL architectures for tabular data and raise the bar of baselines in tabular DL by identifying two simple and powerful deep architectures. The first one is a ResNet-like architecture which turns out to be a strong baseline that is often missing in prior works. The second model is our simple adaptation of the Transformer architecture for tabular data, which outperforms other solutions on most tasks. Both models are compared to many existing architectures on a diverse set of tasks under the same training and tuning protocols. We also compare the best DL models with Gradient Boosted Decision Trees and conclude that there is still no universally superior solution.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Yu. V. Gorishniy", "Ivan Rubachev", "Valentin Khrulkov", "Artem Babenko" ], "externalIds": { "DBLP": "conf/nips/GorishniyRKB21", "ArXiv": "2106.11959", "CorpusId": 235593213 }, "url": "https://www.semanticscholar.org/paper/5fa06d856ba6ae9cd1366888f8134d7fd0db75b9", "referenceCount": 63, "citationCount": 503, "influentialCitationCount": 109, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Well-tuned Simple Nets Excel on Tabular Datasets", "abstract": "Tabular datasets are the last\"unconquered castle\"for deep learning, with traditional ML methods like Gradient-Boosted Decision Trees still performing strongly even against recent specialized neural architectures. In this paper, we hypothesize that the key to boosting the performance of neural networks lies in rethinking the joint and simultaneous application of a large set of modern regularization techniques. As a result, we propose regularizing plain Multilayer Perceptron (MLP) networks by searching for the optimal combination/cocktail of 13 regularization techniques for each dataset using a joint optimization over the decision on which regularizers to apply and their subsidiary hyperparameters. We empirically assess the impact of these regularization cocktails for MLPs in a large-scale empirical study comprising 40 tabular datasets and demonstrate that (i) well-regularized plain MLPs significantly outperform recent state-of-the-art specialized neural network architectures, and (ii) they even outperform strong traditional ML methods, such as XGBoost.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Arlind Kadra", "M. Lindauer", "F. Hutter", "Josif Grabocka" ], "externalIds": { "DBLP": "conf/nips/KadraLHG21", "ArXiv": "2106.11189", "CorpusId": 243832591 }, "url": "https://www.semanticscholar.org/paper/d2196723bfc17837337f75aede2fb35a025349b9", "referenceCount": 67, "citationCount": 150, "influentialCitationCount": 19, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Tabular Data: Deep Learning is Not All You Need", "abstract": null, "year": 2021, "venue": "Information Fusion", "authors": [ "Ravid Shwartz-Ziv", "Amitai Armon" ], "externalIds": { "ArXiv": "2106.03253", "DBLP": "journals/inffus/Shwartz-ZivA22", "DOI": "10.1016/j.inffus.2021.11.011", "CorpusId": 260435623 }, "url": "https://www.semanticscholar.org/paper/009560d2710138a446e6e254d8ddcb65eaa0e687", "referenceCount": 59, "citationCount": 873, "influentialCitationCount": 32, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learnable Fourier Features for Multi-Dimensional Spatial Positional Encoding", "abstract": "Attentional mechanisms are order-invariant. Positional encoding is a crucial component to allow attention-based deep model architectures such as Transformer to address sequences or images where the position of information matters. In this paper, we propose a novel positional encoding method based on learnable Fourier features. Instead of hard-coding each position as a token or a vector, we represent each position, which can be multi-dimensional, as a trainable encoding based on learnable Fourier feature mapping, modulated with a multi-layer perceptron. The representation is particularly advantageous for a spatial multi-dimensional position, e.g., pixel positions on an image, where $L_2$ distances or more complex positional relationships need to be captured. Our experiments based on several public benchmark tasks show that our learnable Fourier feature representation for multi-dimensional positional encoding outperforms existing methods by both improving the accuracy and allowing faster convergence.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Yang Li", "Si Si", "Gang Li", "Cho-Jui Hsieh", "Samy Bengio" ], "externalIds": { "ArXiv": "2106.02795", "DBLP": "conf/nips/LiSLHB21", "CorpusId": 235358705 }, "url": "https://www.semanticscholar.org/paper/94576783bc73bf55a0091203a3d45a0a4665a1ae", "referenceCount": 43, "citationCount": 68, "influentialCitationCount": 7, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Self-Attention Between Datapoints: Going Beyond Individual Input-Output Pairs in Deep Learning", "abstract": "We challenge a common assumption underlying most supervised deep learning: that a model makes a prediction depending only on its parameters and the features of a single input. To this end, we introduce a general-purpose deep learning architecture that takes as input the entire dataset instead of processing one datapoint at a time. Our approach uses self-attention to reason about relationships between datapoints explicitly, which can be seen as realizing non-parametric models using parametric attention mechanisms. However, unlike conventional non-parametric models, we let the model learn end-to-end from the data how to make use of other datapoints for prediction. Empirically, our models solve cross-datapoint lookup and complex reasoning tasks unsolvable by traditional deep learning models. We show highly competitive results on tabular data, early results on CIFAR-10, and give insight into how the model makes use of the interactions between points.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Jannik Kossen", "Neil Band", "Clare Lyle", "Aidan N. Gomez", "Tom Rainforth", "Y. Gal" ], "externalIds": { "DBLP": "journals/corr/abs-2106-02584", "ArXiv": "2106.02584", "CorpusId": 235352709 }, "url": "https://www.semanticscholar.org/paper/5f340aede85050917a8a1abb6a62d958187ad5a4", "referenceCount": 114, "citationCount": 106, "influentialCitationCount": 18, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "SAINT: Improved Neural Networks for Tabular Data via Row Attention and Contrastive Pre-Training", "abstract": "Tabular data underpins numerous high-impact applications of machine learning from fraud detection to genomics and healthcare. Classical approaches to solving tabular problems, such as gradient boosting and random forests, are widely used by practitioners. However, recent deep learning methods have achieved a degree of performance competitive with popular techniques. We devise a hybrid deep learning approach to solving tabular data problems. Our method, SAINT, performs attention over both rows and columns, and it includes an enhanced embedding method. We also study a new contrastive self-supervised pre-training method for use when labels are scarce. SAINT consistently improves performance over previous deep learning methods, and it even outperforms gradient boosting methods, including XGBoost, CatBoost, and LightGBM, on average over a variety of benchmark tasks.", "year": 2021, "venue": "arXiv.org", "authors": [ "Gowthami Somepalli", "Micah Goldblum", "Avi Schwarzschild", "C. B. Bruss", "T. Goldstein" ], "externalIds": { "DBLP": "journals/corr/abs-2106-01342", "ArXiv": "2106.01342", "CorpusId": 235293989 }, "url": "https://www.semanticscholar.org/paper/5fa2103e36b3e76e49edb8433a1206a6b25e3ead", "referenceCount": 59, "citationCount": 230, "influentialCitationCount": 48, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "ATJ-Net: Auto-Table-Join Network for Automatic Learning on Relational Databases", "abstract": "A relational database, consisting of multiple tables, provides heterogeneous information across various entities, widely used in real-world services. This paper studies the supervised learning task on multiple tables, aiming to predict one label column with the help of multiple-tabular data. However, classical ML techniques mainly focus on single-tabular data. Multiple-tabular data refers to many-to-many mapping among joinable attributes and n-ary relations, which cannot be utilized directly by classical ML techniques. Besides, current graph techniques, like heterogeneous information network (HIN) and graph neural networks (GNN), are infeasible to be deployed directly and automatically in a multi-table environment, which limits the learning on databases. For automatic learning on relational databases, we propose an auto-table-join network (ATJ-Net). Multiple tables with relationships are considered as a hypergraph, where vertices are joinable attributes and hyperedges are tuples of tables. Then, ATJ-Net builds a graph neural network on the heterogeneous hypergraph, which samples and aggregates the vertices and hyperedges on n-hop sub-graphs as the receptive field. In order to enable ATJ-Net to be automatically deployed to different datasets and avoid the ”no free lunch” dilemma, we use random architecture search to select optimal aggregators and prune redundant paths in the network. For verifying the effectiveness of our methods across various tasks and schema, we conduct extensive experiments on 4 tasks, 8 various schemas, and 19 sub-datasets w.r.t. citing prediction, review classification, recommendation, and task-blind challenge. ATJ-Net achieves the best performance over state-of-the-art approaches on three tasks and is competitive with KddCup Winner solution on task-blind challenge.", "year": 2021, "venue": "The Web Conference", "authors": [ "Jinze Bai", "Jialin Wang", "Zhao Li", "Donghui Ding", "Ji Zhang", "Jun Gao" ], "externalIds": { "DBLP": "conf/www/BaiWLDZ021", "DOI": "10.1145/3442381.3449980", "CorpusId": 235324684 }, "url": "https://www.semanticscholar.org/paper/6a794b1dfd85f514bbcd7b5c7b21d261545aee02", "referenceCount": 55, "citationCount": 9, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Boost then Convolve: Gradient Boosting Meets Graph Neural Networks", "abstract": "Graph neural networks (GNNs) are powerful models that have been successful in various graph representation learning tasks. Whereas gradient boosted decision trees (GBDT) often outperform other machine learning methods when faced with heterogeneous tabular data. But what approach should be used for graphs with tabular node features? Previous GNN models have mostly focused on networks with homogeneous sparse features and, as we show, are suboptimal in the heterogeneous setting. In this work, we propose a novel architecture that trains GBDT and GNN jointly to get the best of both worlds: the GBDT model deals with heterogeneous features, while GNN accounts for the graph structure. Our model benefits from end-to-end optimization by allowing new trees to fit the gradient updates of GNN. With an extensive experimental comparison to the leading GBDT and GNN models, we demonstrate a significant increase in performance on a variety of graphs with tabular features. The code is available: https://github.com/nd7141/bgnn.", "year": 2021, "venue": "International Conference on Learning Representations", "authors": [ "Sergei Ivanov", "L. Prokhorenkova" ], "externalIds": { "DBLP": "journals/corr/abs-2101-08543", "MAG": "3129205570", "ArXiv": "2101.08543", "CorpusId": 231662243 }, "url": "https://www.semanticscholar.org/paper/07d38f062da2f13e3ff532d630aacc3e8dcaccca", "referenceCount": 44, "citationCount": 42, "influentialCitationCount": 7, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "TabTransformer: Tabular Data Modeling Using Contextual Embeddings", "abstract": "We propose TabTransformer, a novel deep tabular data modeling architecture for supervised and semi-supervised learning. The TabTransformer is built upon self-attention based Transformers. The Transformer layers transform the embeddings of categorical features into robust contextual embeddings to achieve higher prediction accuracy. Through extensive experiments on fifteen publicly available datasets, we show that the TabTransformer outperforms the state-of-the-art deep learning methods for tabular data by at least 1.0% on mean AUC, and matches the performance of tree-based ensemble models. Furthermore, we demonstrate that the contextual embeddings learned from TabTransformer are highly robust against both missing and noisy data features, and provide better interpretability. Lastly, for the semi-supervised setting we develop an unsupervised pre-training procedure to learn data-driven contextual embeddings, resulting in an average 2.1% AUC lift over the state-of-the-art methods.", "year": 2020, "venue": "arXiv.org", "authors": [ "Xin Huang", "A. Khetan", "Milan Cvitkovic", "Zohar S. Karnin" ], "externalIds": { "MAG": "3111356309", "DBLP": "journals/corr/abs-2012-06678", "ArXiv": "2012.06678", "CorpusId": 229156048 }, "url": "https://www.semanticscholar.org/paper/a2ec47b9bcc95d2456a8a42199233e5d9129ef18", "referenceCount": 56, "citationCount": 299, "influentialCitationCount": 45, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Masked Label Prediction: Unified Massage Passing Model for Semi-Supervised Classification", "abstract": "Graph neural network (GNN) and label propagation algorithm (LPA) are both message passing algorithms, which have achieved superior performance in semi-supervised classification. GNN performs feature propagation by a neural network to make predictions, while LPA uses label propagation across graph adjacency matrix to get results. However, there is still no effective way to directly combine these two kinds of algorithms. To address this issue, we propose a novel Unified Message Passaging Model (UniMP) that can incorporate feature and label propagation at both training and inference time. First, UniMP adopts a Graph Transformer network, taking feature embedding and label embedding as input information for propagation. Second, to train the network without overfitting in self-loop input label information, UniMP introduces a masked label prediction strategy, in which some percentage of input label information are masked at random, and then predicted. UniMP conceptually unifies feature propagation and label propagation and is empirically powerful. It obtains new state-of-the-art semi-supervised classification results in Open Graph Benchmark (OGB).", "year": 2020, "venue": "International Joint Conference on Artificial Intelligence", "authors": [ "Yunsheng Shi", "Zhengjie Huang", "Wenjin Wang", "Hui Zhong", "Shikun Feng", "Yu Sun" ], "externalIds": { "DBLP": "journals/corr/abs-2009-03509", "ArXiv": "2009.03509", "MAG": "3084428871", "DOI": "10.24963/ijcai.2021/214", "CorpusId": 221534325 }, "url": "https://www.semanticscholar.org/paper/f5623cd36228c0606c1dbcd3ab034df24c58312f", "referenceCount": 45, "citationCount": 532, "influentialCitationCount": 44, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Enhancing Graph Neural Network-based Fraud Detectors against Camouflaged Fraudsters", "abstract": "Graph Neural Networks (GNNs) have been widely applied to fraud detection problems in recent years, revealing the suspiciousness of nodes by aggregating their neighborhood information via different relations. However, few prior works have noticed the camouflage behavior of fraudsters, which could hamper the performance of GNN-based fraud detectors during the aggregation process. In this paper, we introduce two types of camouflages based on recent empirical studies, i.e., the feature camouflage and the relation camouflage. Existing GNNs have not addressed these two camouflages, which results in their poor performance in fraud detection problems. Alternatively, we propose a new model named CAmouflage-REsistant GNN (CARE-GNN), to enhance the GNN aggregation process with three unique modules against camouflages. Concretely, we first devise a label-aware similarity measure to find informative neighboring nodes. Then, we leverage reinforcement learning (RL) to find the optimal amounts of neighbors to be selected. Finally, the selected neighbors across different relations are aggregated together. Comprehensive experiments on two real-world fraud datasets demonstrate the effectiveness of the RL algorithm. The proposed CARE-GNN also outperforms state-of-the-art GNNs and GNN-based fraud detectors. We integrate all GNN-based fraud detectors as an opensource toolbox https://github.com/safe-graph/DGFraud. The CARE-GNN code and datasets are available at https://github.com/YingtongDou/CARE-GNN.", "year": 2020, "venue": "International Conference on Information and Knowledge Management", "authors": [ "Yingtong Dou", "Zhiwei Liu", "Li Sun", "Yutong Deng", "Hao Peng", "Philip S. Yu" ], "externalIds": { "ArXiv": "2008.08692", "MAG": "3102969158", "DBLP": "conf/cikm/DouL0DPY20", "DOI": "10.1145/3340531.3411903", "CorpusId": 221186720 }, "url": "https://www.semanticscholar.org/paper/14156438bafed28a626738630b5181b83ed5d79c", "referenceCount": 52, "citationCount": 326, "influentialCitationCount": 56, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Fourier Features Let Networks Learn High Frequency Functions in Low Dimensional Domains", "abstract": "We show that passing input points through a simple Fourier feature mapping enables a multilayer perceptron (MLP) to learn high-frequency functions in low-dimensional problem domains. These results shed light on recent advances in computer vision and graphics that achieve state-of-the-art results by using MLPs to represent complex 3D objects and scenes. Using tools from the neural tangent kernel (NTK) literature, we show that a standard MLP fails to learn high frequencies both in theory and in practice. To overcome this spectral bias, we use a Fourier feature mapping to transform the effective NTK into a stationary kernel with a tunable bandwidth. We suggest an approach for selecting problem-specific Fourier features that greatly improves the performance of MLPs for low-dimensional regression tasks relevant to the computer vision and graphics communities.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Matthew Tancik", "Pratul P. Srinivasan", "B. Mildenhall", "Sara Fridovich-Keil", "N. Raghavan", "Utkarsh Singhal", "R. Ramamoorthi", "J. Barron", "Ren Ng" ], "externalIds": { "DBLP": "conf/nips/TancikSMFRSRBN20", "ArXiv": "2006.10739", "MAG": "3036843665", "CorpusId": 219791950 }, "url": "https://www.semanticscholar.org/paper/a0dc3135c40e150f0271002a96b7c9680b6cac40", "referenceCount": 48, "citationCount": 1878, "influentialCitationCount": 210, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Implicit Neural Representations with Periodic Activation Functions", "abstract": "Implicitly defined, continuous, differentiable signal representations parameterized by neural networks have emerged as a powerful paradigm, offering many possible benefits over conventional representations. However, current network architectures for such implicit neural representations are incapable of modeling signals with fine detail, and fail to represent a signal's spatial and temporal derivatives, despite the fact that these are essential to many physical signals defined implicitly as the solution to partial differential equations. We propose to leverage periodic activation functions for implicit neural representations and demonstrate that these networks, dubbed sinusoidal representation networks or Sirens, are ideally suited for representing complex natural signals and their derivatives. We analyze Siren activation statistics to propose a principled initialization scheme and demonstrate the representation of images, wavefields, video, sound, and their derivatives. Further, we show how Sirens can be leveraged to solve challenging boundary value problems, such as particular Eikonal equations (yielding signed distance functions), the Poisson equation, and the Helmholtz and wave equations. Lastly, we combine Sirens with hypernetworks to learn priors over the space of Siren functions.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "V. Sitzmann", "Julien N. P. Martel", "Alexander W. Bergman", "David B. Lindell", "Gordon Wetzstein" ], "externalIds": { "MAG": "3103313582", "DBLP": "conf/nips/SitzmannMBLW20", "ArXiv": "2006.09661", "CorpusId": 219720931 }, "url": "https://www.semanticscholar.org/paper/43b1e34451f783fed053c1d539d7560dc4ec16a9", "referenceCount": 103, "citationCount": 2032, "influentialCitationCount": 398, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "GCN-Based User Representation Learning for Unifying Robust Recommendation and Fraudster Detection", "abstract": "In recent years, recommender system has become an indispensable function in all e-commerce platforms. The review rating data for a recommender system typically comes from open platforms, which may attract a group of malicious users to deliberately insert fake feedback in an attempt to bias the recommender system to their favour. The presence of such attacks may violate modeling assumptions that high-quality data is always available and these data truly reflect users' interests and preferences. Therefore, it is of great practical significance to construct a robust recommender system that is able to generate stable recommendations even in the presence of shilling attacks. In this paper, we propose GraphRfi - a GCN-based user representation learning framework to perform robust recommendation and fraudster detection in a unified way. In its end-to-end learning process, the probability of a user being identified as a fraudster in the fraudster detection component automatically determines the contribution of this user's rating data in the recommendation component; while the prediction error outputted in the recommendation component acts as an important feature in the fraudster detection component. Thus, these two components can mutually enhance each other. Extensive experiments have been conducted and the experimental results show the superiority of our GraphRfi in the two tasks - robust rating prediction and fraudster detection. Furthermore, the proposed GraphRfi is validated to be more robust to the various types of shilling attacks over the state-of-the-art recommender systems.", "year": 2020, "venue": "Annual International ACM SIGIR Conference on Research and Development in Information Retrieval", "authors": [ "Shijie Zhang", "Hongzhi Yin", "Tong Chen", "Quoc Viet Nguyen Hung", "Zi-Liang Huang", "Li-zhen Cui" ], "externalIds": { "DBLP": "journals/corr/abs-2005-10150", "MAG": "3028560702", "ArXiv": "2005.10150", "DOI": "10.1145/3397271.3401165", "CorpusId": 218718742 }, "url": "https://www.semanticscholar.org/paper/443c6f2a3bd2f74f1f59bea2b94d455a9c7a5c23", "referenceCount": 57, "citationCount": 143, "influentialCitationCount": 18, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The KDD Cup 2019 Report", "abstract": "The KDD Cup has been a data science competition affiliated with the ACM SIGKDD conference with more than 20 years' tradition. In 2019, we organized the KDD Cup by hosting 3 parallel tracks, each with tremendous innovation. The regular machine learning (ML) track was a context-aware travel mode recommendation problem, sponsored by Baidu.com. The automatic machine learning (AutoML) track, sponsored by 4Paradigm, was about nding cost-e ective and transferable solutions for temporal relational data represented as multiple related tables. The humanity reinforcement learning (RL) track was sponsored by IBM Africa and Hexagon-ML to determine the best policy in distribution of control measures to eradicate Malaria. In 3 competitions collectively, we had more than 2,800 registered teams from over 39 countries and 230 academic and research institutions. Among the 1,200 most actively participating teams, over 5,000 individuals participated, and more than 17,000 submissions were made. A total exceeding 100 thousand U.S. dollars were awarded to the winning teams.", "year": 2020, "venue": "SIGKDD Explorations", "authors": [ "Wenjun Zhou", "Taposh Dutta Roy", "Iryna Skrypnyk" ], "externalIds": { "MAG": "3027040504", "DBLP": "journals/sigkdd/ZhouRS20", "DOI": "10.1145/3400051.3400056", "CorpusId": 218831172 }, "url": "https://www.semanticscholar.org/paper/a9c699354634dcad76a803cd6ba05a9b99ff1ee0", "referenceCount": 11, "citationCount": 10, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Open Graph Benchmark: Datasets for Machine Learning on Graphs", "abstract": "We present the Open Graph Benchmark (OGB), a diverse set of challenging and realistic benchmark datasets to facilitate scalable, robust, and reproducible graph machine learning (ML) research. OGB datasets are large-scale (up to 100+ million nodes and 1+ billion edges), encompass multiple important graph ML tasks, and cover a diverse range of domains, ranging from social and information networks to biological networks, molecular graphs, source code ASTs, and knowledge graphs. For each dataset, we provide a unified evaluation protocol using meaningful application-specific data splits and evaluation metrics. In addition to building the datasets, we also perform extensive benchmark experiments for each dataset. Our experiments suggest that OGB datasets present significant challenges of scalability to large-scale graphs and out-of-distribution generalization under realistic data splits, indicating fruitful opportunities for future research. Finally, OGB provides an automated end-to-end graph ML pipeline that simplifies and standardizes the process of graph data loading, experimental setup, and model evaluation. OGB will be regularly updated and welcomes inputs from the community. OGB datasets as well as data loaders, evaluation scripts, baseline code, and leaderboards are publicly available at this https URL .", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Weihua Hu", "Matthias Fey", "M. Zitnik", "Yuxiao Dong", "Hongyu Ren", "Bowen Liu", "Michele Catasta", "J. Leskovec" ], "externalIds": { "MAG": "3021975806", "DBLP": "journals/corr/abs-2005-00687", "ArXiv": "2005.00687", "CorpusId": 218487328 }, "url": "https://www.semanticscholar.org/paper/597bd2e45427563cdf025e53a3239006aa364cfc", "referenceCount": 110, "citationCount": 2219, "influentialCitationCount": 467, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Gradient Boosting Neural Networks: GrowNet", "abstract": "A novel gradient boosting framework is proposed where shallow neural networks are employed as ``weak learners''. General loss functions are considered under this unified framework with specific examples presented for classification, regression, and learning to rank. A fully corrective step is incorporated to remedy the pitfall of greedy function approximation of classic gradient boosting decision tree. The proposed model rendered outperforming results against state-of-the-art boosting methods in all three tasks on multiple datasets. An ablation study is performed to shed light on the effect of each model components and model hyperparameters.", "year": 2020, "venue": "arXiv.org", "authors": [ "Sarkhan Badirli", "Xuanqing Liu", "Zhengming Xing", "Avradeep Bhowmik", "S. Keerthi" ], "externalIds": { "MAG": "3007872010", "ArXiv": "2002.07971", "DBLP": "journals/corr/abs-2002-07971", "CorpusId": 211171532 }, "url": "https://www.semanticscholar.org/paper/6a0fcd223601c1a9436d13fad45d3c60aafa8b84", "referenceCount": 33, "citationCount": 65, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "The Tree Ensemble Layer: Differentiability meets Conditional Computation", "abstract": "Neural networks and tree ensembles are state-of-the-art learners, each with its unique statistical and computational advantages. We aim to combine these advantages by introducing a new layer for neural networks, composed of an ensemble of differentiable decision trees (a.k.a. soft trees). While differentiable trees demonstrate promising results in the literature, they are typically slow in training and inference as they do not support conditional computation. We mitigate this issue by introducing a new sparse activation function for sample routing, and implement true conditional computation by developing specialized forward and backward propagation algorithms that exploit sparsity. Our efficient algorithms pave the way for jointly training over deep and wide tree ensembles using first-order methods (e.g., SGD). Experiments on 23 classification datasets indicate over 10x speed-ups compared to the differentiable trees used in the literature and over 20x reduction in the number of parameters compared to gradient boosted trees, while maintaining competitive performance. Moreover, experiments on CIFAR, MNIST, and Fashion MNIST indicate that replacing dense layers in CNNs with our tree layer reduces the test loss by 7-53% and the number of parameters by 8x. We provide an open-source TensorFlow implementation with a Keras API.", "year": 2020, "venue": "International Conference on Machine Learning", "authors": [ "Hussein Hazimeh", "N. Ponomareva", "P. Mol", "Zhenyu Tan", "R. Mazumder" ], "externalIds": { "MAG": "3034933916", "DBLP": "journals/corr/abs-2002-07772", "ArXiv": "2002.07772", "CorpusId": 211146642 }, "url": "https://www.semanticscholar.org/paper/a834fa118bb738a276ebf2756d66f11b543a63c8", "referenceCount": 45, "citationCount": 69, "influentialCitationCount": 8, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Supervised Learning on Relational Databases with Graph Neural Networks", "abstract": "The majority of data scientists and machine learning practitioners use relational data in their work [State of ML and Data Science 2017, Kaggle, Inc.]. But training machine learning models on data stored in relational databases requires significant data extraction and feature engineering efforts. These efforts are not only costly, but they also destroy potentially important relational structure in the data. We introduce a method that uses Graph Neural Networks to overcome these challenges. Our proposed method outperforms state-of-the-art automatic feature engineering methods on two out of three datasets.", "year": 2020, "venue": "arXiv.org", "authors": [ "Milan Cvitkovic" ], "externalIds": { "ArXiv": "2002.02046", "DBLP": "journals/corr/abs-2002-02046", "MAG": "3005321586", "CorpusId": 211043697 }, "url": "https://www.semanticscholar.org/paper/d6b68954d6acddce319de327af65d164bdfc5543", "referenceCount": 39, "citationCount": 32, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Siamese Graph Neural Networks for Data Integration", "abstract": "Data integration has been studied extensively for decades and approached from different angles. However, this domain still remains largely rule-driven and lacks universal automation. Recent development in machine learning and in particular deep learning has opened the way to more general and more efficient solutions to data integration problems. In this work, we propose a general approach to modeling and integrating entities from structured data, such as relational databases, as well as unstructured sources, such as free text from news articles. Our approach is designed to explicitly model and leverage relations between entities, thereby using all available information and preserving as much context as possible. This is achieved by combining siamese and graph neural networks to propagate information between connected entities and support high scalability. We evaluate our method on the task of integrating data about business entities, and we demonstrate that it outperforms standard rule-based systems, as well as other deep learning approaches that do not use graph-based representations.", "year": 2020, "venue": "arXiv.org", "authors": [ "E. Krivosheev", "Mattia Atzeni", "Katsiaryna Mirylenka", "P. Scotton", "F. Casati" ], "externalIds": { "DBLP": "journals/corr/abs-2001-06543", "ArXiv": "2001.06543", "MAG": "3001452598", "CorpusId": 210839072 }, "url": "https://www.semanticscholar.org/paper/66a7f14900bd962d412c74f512e4356dfc16f733", "referenceCount": 61, "citationCount": 11, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "PyTorch: An Imperative Style, High-Performance Deep Learning Library", "abstract": "Deep learning frameworks have often focused on either usability or speed, but not both. PyTorch is a machine learning library that shows that these two goals are in fact compatible: it was designed from first principles to support an imperative and Pythonic programming style that supports code as a model, makes debugging easy and is consistent with other popular scientific computing libraries, while remaining efficient and supporting hardware accelerators such as GPUs. In this paper, we detail the principles that drove the implementation of PyTorch and how they are reflected in its architecture. We emphasize that every aspect of PyTorch is a regular Python program under the full control of its user. We also explain how the careful and pragmatic implementation of the key components of its runtime enables them to work together to achieve compelling performance. We demonstrate the efficiency of individual subsystems, as well as the overall speed of PyTorch on several commonly used benchmarks.", "year": 2019, "venue": "Neural Information Processing Systems", "authors": [ "Adam Paszke", "Sam Gross", "Francisco Massa", "Adam Lerer", "James Bradbury", "Gregory Chanan", "Trevor Killeen", "Zeming Lin", "N. Gimelshein", "L. Antiga", "Alban Desmaison", "Andreas Köpf", "E. Yang", "Zach DeVito", "Martin Raison", "Alykhan Tejani", "Sasank Chilamkurthy", "Benoit Steiner", "Lu Fang", "Junjie Bai", "Soumith Chintala" ], "externalIds": { "MAG": "2970971581", "DBLP": "journals/corr/abs-1912-01703", "ArXiv": "1912.01703", "CorpusId": 202786778 }, "url": "https://www.semanticscholar.org/paper/3c8a456509e6c0805354bd40a35e3f2dbf8069b1", "referenceCount": 39, "citationCount": 36158, "influentialCitationCount": 3694, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Neural Oblivious Decision Ensembles for Deep Learning on Tabular Data", "abstract": "Nowadays, deep neural networks (DNNs) have become the main instrument for machine learning tasks within a wide range of domains, including vision, NLP, and speech. Meanwhile, in an important case of heterogenous tabular data, the advantage of DNNs over shallow counterparts remains questionable. In particular, there is no sufficient evidence that deep learning machinery allows constructing methods that outperform gradient boosting decision trees (GBDT), which are often the top choice for tabular problems. In this paper, we introduce Neural Oblivious Decision Ensembles (NODE), a new deep learning architecture, designed to work with any tabular data. In a nutshell, the proposed NODE architecture generalizes ensembles of oblivious decision trees, but benefits from both end-to-end gradient-based optimization and the power of multi-layer hierarchical representation learning. With an extensive experimental comparison to the leading GBDT packages on a large number of tabular datasets, we demonstrate the advantage of the proposed NODE architecture, which outperforms the competitors on most of the tasks. We open-source the PyTorch implementation of NODE and believe that it will become a universal framework for machine learning on tabular data.", "year": 2019, "venue": "International Conference on Learning Representations", "authors": [ "Sergei Popov", "S. Morozov", "Artem Babenko" ], "externalIds": { "MAG": "2972470287", "ArXiv": "1909.06312", "DBLP": "conf/iclr/PopovMB20", "CorpusId": 202573030 }, "url": "https://www.semanticscholar.org/paper/0b22dbd48ce4e13bdbf0c9d5e86a9cefdaf6d40a", "referenceCount": 35, "citationCount": 238, "influentialCitationCount": 48, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Deep Graph Library: A Graph-Centric, Highly-Performant Package for Graph Neural Networks.", "abstract": "Advancing research in the emerging field of deep graph learning requires new tools to support tensor computation over graphs. In this paper, we present the design principles and implementation of Deep Graph Library (DGL). DGL distills the computational patterns of GNNs into a few generalized sparse tensor operations suitable for extensive parallelization. By advocating graph as the central programming abstraction, DGL can perform optimizations transparently. By cautiously adopting a framework-neutral design, DGL allows users to easily port and leverage the existing components across multiple deep learning frameworks. Our evaluation shows that DGL significantly outperforms other popular GNN-oriented frameworks in both speed and memory consumption over a variety of benchmarks and has little overhead for small scale workloads.", "year": 2019, "venue": "", "authors": [ "Minjie Wang", "Da Zheng", "Zihao Ye", "Quan Gan", "Mufei Li", "Xiang Song", "Jinjing Zhou", "Chao Ma", "Lingfan Yu", "Yujie Gai", "Tianjun Xiao", "Tong He", "G. Karypis", "Jinyang Li", "Zheng Zhang" ], "externalIds": { "MAG": "3080555959", "CorpusId": 221304724 }, "url": "https://www.semanticscholar.org/paper/381411d740562de1e766dc8cc833844eb99dde01", "referenceCount": 52, "citationCount": 973, "influentialCitationCount": 133, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "TabNet: Attentive Interpretable Tabular Learning", "abstract": "We propose a novel high-performance and interpretable canonical deep tabular data learning architecture, TabNet. TabNet uses sequential attention to choose which features to reason from at each decision step, enabling interpretability and more efficient learning as the learning capacity is used for the most salient features. We demonstrate that TabNet outperforms other variants on a wide range of non-performance-saturated tabular datasets and yields interpretable feature attributions plus insights into its global behavior. Finally, we demonstrate self-supervised learning for tabular data, significantly improving performance when unlabeled data is abundant.", "year": 2019, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Sercan Ö. Arik", "Tomas Pfister" ], "externalIds": { "DBLP": "conf/aaai/ArikP21", "ArXiv": "1908.07442", "MAG": "2969724595", "DOI": "10.1609/aaai.v35i8.16826", "CorpusId": 201107047 }, "url": "https://www.semanticscholar.org/paper/9fe69cf5c104b2205cdb7908df8cdb389256b4b5", "referenceCount": 72, "citationCount": 964, "influentialCitationCount": 144, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Optuna: A Next-generation Hyperparameter Optimization Framework", "abstract": "The purpose of this study is to introduce new design-criteria for next-generation hyperparameter optimization software. The criteria we propose include (1) define-by-run API that allows users to construct the parameter search space dynamically, (2) efficient implementation of both searching and pruning strategies, and (3) easy-to-setup, versatile architecture that can be deployed for various purposes, ranging from scalable distributed computing to light-weight experiment conducted via interactive interface. In order to prove our point, we will introduce Optuna, an optimization software which is a culmination of our effort in the development of a next generation optimization software. As an optimization software designed with define-by-run principle, Optuna is particularly the first of its kind. We will present the design-techniques that became necessary in the development of the software that meets the above criteria, and demonstrate the power of our new design through experimental results and real world applications. Our software is available under the MIT license (https://github.com/pfnet/optuna/).", "year": 2019, "venue": "Knowledge Discovery and Data Mining", "authors": [ "Takuya Akiba", "Shotaro Sano", "Toshihiko Yanase", "Takeru Ohta", "Masanori Koyama" ], "externalIds": { "DBLP": "journals/corr/abs-1907-10902", "MAG": "2962897394", "ArXiv": "1907.10902", "DOI": "10.1145/3292500.3330701", "CorpusId": 196194314 }, "url": "https://www.semanticscholar.org/paper/4cdf2fad22afc865999747336c7399fe422e6e8e", "referenceCount": 34, "citationCount": 4014, "influentialCitationCount": 274, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Simplifying Graph Convolutional Networks", "abstract": "Graph Convolutional Networks (GCNs) and their variants have experienced significant attention and have become the de facto methods for learning graph representations. GCNs derive inspiration primarily from recent deep learning approaches, and as a result, may inherit unnecessary complexity and redundant computation. In this paper, we reduce this excess complexity through successively removing nonlinearities and collapsing weight matrices between consecutive layers. We theoretically analyze the resulting linear model and show that it corresponds to a fixed low-pass filter followed by a linear classifier. Notably, our experimental evaluation demonstrates that these simplifications do not negatively impact accuracy in many downstream applications. Moreover, the resulting model scales to larger datasets, is naturally interpretable, and yields up to two orders of magnitude speedup over FastGCN.", "year": 2019, "venue": "International Conference on Machine Learning", "authors": [ "Felix Wu", "Tianyi Zhang", "A. Souza", "Christopher Fifty", "Tao Yu", "Kilian Q. Weinberger" ], "externalIds": { "MAG": "2950619419", "ArXiv": "1902.07153", "DBLP": "journals/corr/abs-1902-07153", "CorpusId": 67752026 }, "url": "https://www.semanticscholar.org/paper/7e71eedb078181873a56f2adcfef9dddaeb95602", "referenceCount": 63, "citationCount": 2713, "influentialCitationCount": 597, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Pitfalls of Graph Neural Network Evaluation", "abstract": "Semi-supervised node classification in graphs is a fundamental problem in graph mining, and the recently proposed graph neural networks (GNNs) have achieved unparalleled results on this task. Due to their massive success, GNNs have attracted a lot of attention, and many novel architectures have been put forward. In this paper we show that existing evaluation strategies for GNN models have serious shortcomings. We show that using the same train/validation/test splits of the same datasets, as well as making significant changes to the training procedure (e.g. early stopping criteria) precludes a fair comparison of different architectures. We perform a thorough empirical evaluation of four prominent GNN models and show that considering different splits of the data leads to dramatically different rankings of models. Even more importantly, our findings suggest that simpler GNN architectures are able to outperform the more sophisticated ones if the hyperparameters and the training procedure are tuned fairly for all models.", "year": 2018, "venue": "arXiv.org", "authors": [ "Oleksandr Shchur", "Maximilian Mumme", "Aleksandar Bojchevski", "Stephan Günnemann" ], "externalIds": { "ArXiv": "1811.05868", "MAG": "2900470550", "DBLP": "journals/corr/abs-1811-05868", "CorpusId": 53303554 }, "url": "https://www.semanticscholar.org/paper/c2d40522eaa5523d67a0de5e4098e7031fdccb3d", "referenceCount": 22, "citationCount": 1111, "influentialCitationCount": 171, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "AutoInt: Automatic Feature Interaction Learning via Self-Attentive Neural Networks", "abstract": "Click-through rate (CTR) prediction, which aims to predict the probability of a user clicking on an ad or an item, is critical to many online applications such as online advertising and recommender systems. The problem is very challenging since (1) the input features (e.g., the user id, user age, item id, item category) are usually sparse and high-dimensional, and (2) an effective prediction relies on high-order combinatorial features (a.k.a. cross features), which are very time-consuming to hand-craft by domain experts and are impossible to be enumerated. Therefore, there have been efforts in finding low-dimensional representations of the sparse and high-dimensional raw features and their meaningful combinations. In this paper, we propose an effective and efficient method called the AutoInt to automatically learn the high-order feature interactions of input features. Our proposed algorithm is very general, which can be applied to both numerical and categorical input features. Specifically, we map both the numerical and categorical features into the same low-dimensional space. Afterwards, a multi-head self-attentive neural network with residual connections is proposed to explicitly model the feature interactions in the low-dimensional space. With different layers of the multi-head self-attentive neural networks, different orders of feature combinations of input features can be modeled. The whole model can be efficiently fit on large-scale raw data in an end-to-end fashion. Experimental results on four real-world datasets show that our proposed approach not only outperforms existing state-of-the-art approaches for prediction but also offers good explainability. Code is available at: \\urlhttps://github.com/DeepGraphLearning/RecommenderSystems.", "year": 2018, "venue": "International Conference on Information and Knowledge Management", "authors": [ "Weiping Song", "Chence Shi", "Zhiping Xiao", "Zhijian Duan", "Yewen Xu", "Ming Zhang", "Jian Tang" ], "externalIds": { "DBLP": "conf/cikm/SongS0DX0T19", "ArXiv": "1810.11921", "MAG": "2898085636", "DOI": "10.1145/3357384.3357925", "CorpusId": 53100214 }, "url": "https://www.semanticscholar.org/paper/08588a4e596b02f22ac77dc8300aaabc27cb66b4", "referenceCount": 45, "citationCount": 701, "influentialCitationCount": 107, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "LightGBM: A Highly Efficient Gradient Boosting Decision Tree", "abstract": "Gradient Boosting Decision Tree (GBDT) is a popular machine learning algorithm, and has quite a few effective implementations such as XGBoost and pGBRT. Although many engineering optimizations have been adopted in these implementations, the efficiency and scalability are still unsatisfactory when the feature dimension is high and data size is large. A major reason is that for each feature, they need to scan all the data instances to estimate the information gain of all possible split points, which is very time consuming. To tackle this problem, we propose two novel techniques: \\emph{Gradient-based One-Side Sampling} (GOSS) and \\emph{Exclusive Feature Bundling} (EFB). With GOSS, we exclude a significant proportion of data instances with small gradients, and only use the rest to estimate the information gain. We prove that, since the data instances with larger gradients play a more important role in the computation of information gain, GOSS can obtain quite accurate estimation of the information gain with a much smaller data size. With EFB, we bundle mutually exclusive features (i.e., they rarely take nonzero values simultaneously), to reduce the number of features. We prove that finding the optimal bundling of exclusive features is NP-hard, but a greedy algorithm can achieve quite good approximation ratio (and thus can effectively reduce the number of features without hurting the accuracy of split point determination by much). We call our new GBDT implementation with GOSS and EFB \\emph{LightGBM}. Our experiments on multiple public datasets show that, LightGBM speeds up the training process of conventional GBDT by up to over 20 times while achieving almost the same accuracy.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "Guolin Ke", "Qi Meng", "Thomas Finley", "Taifeng Wang", "Wei Chen", "Weidong Ma", "Qiwei Ye", "Tie-Yan Liu" ], "externalIds": { "DBLP": "conf/nips/KeMFWCMYL17", "MAG": "2753094203", "CorpusId": 3815895 }, "url": "https://www.semanticscholar.org/paper/497e4b08279d69513e4d2313a7fd9a55dfb73273", "referenceCount": 32, "citationCount": 8514, "influentialCitationCount": 931, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Graph Attention Networks", "abstract": "We present graph attention networks (GATs), novel neural network architectures that operate on graph-structured data, leveraging masked self-attentional layers to address the shortcomings of prior methods based on graph convolutions or their approximations. By stacking layers in which nodes are able to attend over their neighborhoods' features, we enable (implicitly) specifying different weights to different nodes in a neighborhood, without requiring any kind of costly matrix operation (such as inversion) or depending on knowing the graph structure upfront. In this way, we address several key challenges of spectral-based graph neural networks simultaneously, and make our model readily applicable to inductive as well as transductive problems. Our GAT models have achieved or matched state-of-the-art results across four established transductive and inductive graph benchmarks: the Cora, Citeseer and Pubmed citation network datasets, as well as a protein-protein interaction dataset (wherein test graphs remain unseen during training).", "year": 2017, "venue": "International Conference on Learning Representations", "authors": [ "Petar Velickovic", "Guillem Cucurull", "Arantxa Casanova", "Adriana Romero", "P. Lio’", "Yoshua Bengio" ], "externalIds": { "MAG": "2766453196", "ArXiv": "1710.10903", "DBLP": "journals/corr/abs-1710-10903", "DOI": "10.17863/CAM.48429", "CorpusId": 3292002 }, "url": "https://www.semanticscholar.org/paper/33998aff64ce51df8dee45989cdca4b6b1329ec4", "referenceCount": 45, "citationCount": 16828, "influentialCitationCount": 3007, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "CatBoost: unbiased boosting with categorical features", "abstract": "This paper presents the key algorithmic techniques behind CatBoost, a new gradient boosting toolkit. Their combination leads to CatBoost outperforming other publicly available boosting implementations in terms of quality on a variety of datasets. Two critical algorithmic advances introduced in CatBoost are the implementation of ordered boosting, a permutation-driven alternative to the classic algorithm, and an innovative algorithm for processing categorical features. Both techniques were created to fight a prediction shift caused by a special kind of target leakage present in all currently existing implementations of gradient boosting algorithms. In this paper, we provide a detailed analysis of this problem and demonstrate that proposed algorithms solve it effectively, leading to excellent empirical results.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "L. Ostroumova", "Gleb Gusev", "A. Vorobev", "Anna Veronika Dorogush", "Andrey Gulin" ], "externalIds": { "DBLP": "conf/nips/ProkhorenkovaGV18", "MAG": "2964022491", "CorpusId": 5044218 }, "url": "https://www.semanticscholar.org/paper/ee0a0f04d45f86bf50b24d7258e884725fcaa621", "referenceCount": 34, "citationCount": 2557, "influentialCitationCount": 280, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Attention is All you Need", "abstract": "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "Ashish Vaswani", "Noam M. Shazeer", "Niki Parmar", "Jakob Uszkoreit", "Llion Jones", "Aidan N. Gomez", "Lukasz Kaiser", "Illia Polosukhin" ], "externalIds": { "MAG": "2963403868", "DBLP": "conf/nips/VaswaniSPUJGKP17", "ArXiv": "1706.03762", "CorpusId": 13756489 }, "url": "https://www.semanticscholar.org/paper/204e3073870fae3d05bcbc2f6a8e263d9b72e776", "referenceCount": 41, "citationCount": 104988, "influentialCitationCount": 15363, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Self-Normalizing Neural Networks", "abstract": "Deep Learning has revolutionized vision via convolutional neural networks (CNNs) and natural language processing via recurrent neural networks (RNNs). However, success stories of Deep Learning with standard feed-forward neural networks (FNNs) are rare. FNNs that perform well are typically shallow and, therefore cannot exploit many levels of abstract representations. We introduce self-normalizing neural networks (SNNs) to enable high-level abstract representations. While batch normalization requires explicit normalization, neuron activations of SNNs automatically converge towards zero mean and unit variance. The activation function of SNNs are \"scaled exponential linear units\" (SELUs), which induce self-normalizing properties. Using the Banach fixed-point theorem, we prove that activations close to zero mean and unit variance that are propagated through many network layers will converge towards zero mean and unit variance -- even under the presence of noise and perturbations. This convergence property of SNNs allows to (1) train deep networks with many layers, (2) employ strong regularization, and (3) to make learning highly robust. Furthermore, for activations not close to unit variance, we prove an upper and lower bound on the variance, thus, vanishing and exploding gradients are impossible. We compared SNNs on (a) 121 tasks from the UCI machine learning repository, on (b) drug discovery benchmarks, and on (c) astronomy tasks with standard FNNs and other machine learning methods such as random forests and support vector machines. SNNs significantly outperformed all competing FNN methods at 121 UCI tasks, outperformed all competing methods at the Tox21 dataset, and set a new record at an astronomy data set. The winning SNN architectures are often very deep. Implementations are available at: this http URL.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "G. Klambauer", "Thomas Unterthiner", "Andreas Mayr", "Sepp Hochreiter" ], "externalIds": { "MAG": "2624413595", "DBLP": "journals/corr/KlambauerUMH17", "ArXiv": "1706.02515", "CorpusId": 13713980 }, "url": "https://www.semanticscholar.org/paper/424a6e62084d919bfc2e39a507c263e5991ebdad", "referenceCount": 43, "citationCount": 2267, "influentialCitationCount": 235, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Inductive Representation Learning on Large Graphs", "abstract": "Low-dimensional embeddings of nodes in large graphs have proved extremely useful in a variety of prediction tasks, from content recommendation to identifying protein functions. However, most existing approaches require that all nodes in the graph are present during training of the embeddings; these previous approaches are inherently transductive and do not naturally generalize to unseen nodes. Here we present GraphSAGE, a general, inductive framework that leverages node feature information (e.g., text attributes) to efficiently generate node embeddings for previously unseen data. Instead of training individual embeddings for each node, we learn a function that generates embeddings by sampling and aggregating features from a node's local neighborhood. Our algorithm outperforms strong baselines on three inductive node-classification benchmarks: we classify the category of unseen nodes in evolving information graphs based on citation and Reddit post data, and we show that our algorithm generalizes to completely unseen graphs using a multi-graph dataset of protein-protein interactions.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "William L. Hamilton", "Z. Ying", "J. Leskovec" ], "externalIds": { "DBLP": "conf/nips/HamiltonYL17", "MAG": "2952779545", "ArXiv": "1706.02216", "CorpusId": 4755450 }, "url": "https://www.semanticscholar.org/paper/6b7d6e6416343b2a122f8416e69059ce919026ef", "referenceCount": 42, "citationCount": 12811, "influentialCitationCount": 2278, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Neural Message Passing for Quantum Chemistry", "abstract": "Supervised learning on molecules has incredible potential to be useful in chemistry, drug discovery, and materials science. Luckily, several promising and closely related neural network models invariant to molecular symmetries have already been described in the literature. These models learn a message passing algorithm and aggregation procedure to compute a function of their entire input graph. At this point, the next step is to find a particularly effective variant of this general approach and apply it to chemical prediction benchmarks until we either solve them or reach the limits of the approach. In this paper, we reformulate existing models into a single common framework we call Message Passing Neural Networks (MPNNs) and explore additional novel variations within this framework. Using MPNNs we demonstrate state of the art results on an important molecular property prediction benchmark; these results are strong enough that we believe future work should focus on datasets with larger molecules or more accurate ground truth labels.", "year": 2017, "venue": "International Conference on Machine Learning", "authors": [ "J. Gilmer", "S. Schoenholz", "Patrick F. Riley", "O. Vinyals", "George E. Dahl" ], "externalIds": { "DBLP": "journals/corr/GilmerSRVD17", "MAG": "2952254971", "ArXiv": "1704.01212", "CorpusId": 9665943 }, "url": "https://www.semanticscholar.org/paper/e24cdf73b3e7e590c2fe5ecac9ae8aa983801367", "referenceCount": 37, "citationCount": 6566, "influentialCitationCount": 751, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Modeling Relational Data with Graph Convolutional Networks", "abstract": null, "year": 2017, "venue": "Extended Semantic Web Conference", "authors": [ "M. Schlichtkrull", "Thomas Kipf", "Peter Bloem", "Rianne van den Berg", "Ivan Titov", "M. Welling" ], "externalIds": { "DBLP": "conf/esws/SchlichtkrullKB18", "ArXiv": "1703.06103", "MAG": "2604314403", "DOI": "10.1007/978-3-319-93417-4_38", "CorpusId": 5458500 }, "url": "https://www.semanticscholar.org/paper/cd8a9914d50b0ac63315872530274d158d6aff09", "referenceCount": 54, "citationCount": 4203, "influentialCitationCount": 733, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Semi-Supervised Classification with Graph Convolutional Networks", "abstract": "We present a scalable approach for semi-supervised learning on graph-structured data that is based on an efficient variant of convolutional neural networks which operate directly on graphs. We motivate the choice of our convolutional architecture via a localized first-order approximation of spectral graph convolutions. Our model scales linearly in the number of graph edges and learns hidden layer representations that encode both local graph structure and features of nodes. In a number of experiments on citation networks and on a knowledge graph dataset we demonstrate that our approach outperforms related methods by a significant margin.", "year": 2016, "venue": "International Conference on Learning Representations", "authors": [ "Thomas Kipf", "M. Welling" ], "externalIds": { "ArXiv": "1609.02907", "MAG": "2519887557", "DBLP": "journals/corr/KipfW16", "CorpusId": 3144218 }, "url": "https://www.semanticscholar.org/paper/36eff562f65125511b5dfab68ce7f7a943c27478", "referenceCount": 38, "citationCount": 25291, "influentialCitationCount": 6216, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Layer Normalization", "abstract": "Training state-of-the-art, deep neural networks is computationally expensive. One way to reduce the training time is to normalize the activities of the neurons. A recently introduced technique called batch normalization uses the distribution of the summed input to a neuron over a mini-batch of training cases to compute a mean and variance which are then used to normalize the summed input to that neuron on each training case. This significantly reduces the training time in feed-forward neural networks. However, the effect of batch normalization is dependent on the mini-batch size and it is not obvious how to apply it to recurrent neural networks. In this paper, we transpose batch normalization into layer normalization by computing the mean and variance used for normalization from all of the summed inputs to the neurons in a layer on a single training case. Like batch normalization, we also give each neuron its own adaptive bias and gain which are applied after the normalization but before the non-linearity. Unlike batch normalization, layer normalization performs exactly the same computation at training and test times. It is also straightforward to apply to recurrent neural networks by computing the normalization statistics separately at each time step. Layer normalization is very effective at stabilizing the hidden state dynamics in recurrent networks. Empirically, we show that layer normalization can substantially reduce the training time compared with previously published techniques.", "year": 2016, "venue": "arXiv.org", "authors": [ "Jimmy Ba", "J. Kiros", "Geoffrey E. Hinton" ], "externalIds": { "MAG": "3037932933", "ArXiv": "1607.06450", "DBLP": "journals/corr/BaKH16", "CorpusId": 8236317 }, "url": "https://www.semanticscholar.org/paper/97fb4e3d45bb098e27e0071448b6152217bd35a5", "referenceCount": 33, "citationCount": 9244, "influentialCitationCount": 409, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Gaussian Error Linear Units (GELUs)", "abstract": "We propose the Gaussian Error Linear Unit (GELU), a high-performing neural network activation function. The GELU activation function is $x\\Phi(x)$, where $\\Phi(x)$ the standard Gaussian cumulative distribution function. The GELU nonlinearity weights inputs by their value, rather than gates inputs by their sign as in ReLUs ($x\\mathbf{1}_{x>0}$). We perform an empirical evaluation of the GELU nonlinearity against the ReLU and ELU activations and find performance improvements across all considered computer vision, natural language processing, and speech tasks.", "year": 2016, "venue": "", "authors": [ "Dan Hendrycks", "Kevin Gimpel" ], "externalIds": { "ArXiv": "1606.08415", "MAG": "2899663614", "CorpusId": 125617073 }, "url": "https://www.semanticscholar.org/paper/de5e7320729f5d3cbb6709eb6329ec41ace8c95d", "referenceCount": 30, "citationCount": 3995, "influentialCitationCount": 323, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Revisiting Semi-Supervised Learning with Graph Embeddings", "abstract": "We present a semi-supervised learning framework based on graph embeddings. Given a graph between instances, we train an embedding for each instance to jointly predict the class label and the neighborhood context in the graph. We develop both transductive and inductive variants of our method. In the transductive variant of our method, the class labels are determined by both the learned embeddings and input feature vectors, while in the inductive variant, the embeddings are defined as a parametric function of the feature vectors, so predictions can be made on instances not seen during training. On a large and diverse set of benchmark tasks, including text classification, distantly supervised entity extraction, and entity classification, we show improved performance over many of the existing models.", "year": 2016, "venue": "International Conference on Machine Learning", "authors": [ "Zhilin Yang", "William W. Cohen", "R. Salakhutdinov" ], "externalIds": { "MAG": "2315403234", "ArXiv": "1603.08861", "DBLP": "journals/corr/YangCS16", "CorpusId": 7008752 }, "url": "https://www.semanticscholar.org/paper/3d846cb01f6a975554035d2210b578ca61344b22", "referenceCount": 28, "citationCount": 1846, "influentialCitationCount": 274, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "XGBoost: A Scalable Tree Boosting System", "abstract": "Tree boosting is a highly effective and widely used machine learning method. In this paper, we describe a scalable end-to-end tree boosting system called XGBoost, which is used widely by data scientists to achieve state-of-the-art results on many machine learning challenges. We propose a novel sparsity-aware algorithm for sparse data and weighted quantile sketch for approximate tree learning. More importantly, we provide insights on cache access patterns, data compression and sharding to build a scalable tree boosting system. By combining these insights, XGBoost scales beyond billions of examples using far fewer resources than existing systems.", "year": 2016, "venue": "Knowledge Discovery and Data Mining", "authors": [ "Tianqi Chen", "Carlos Guestrin" ], "externalIds": { "ArXiv": "1603.02754", "DBLP": "conf/kdd/ChenG16", "MAG": "3102476541", "DOI": "10.1145/2939672.2939785", "CorpusId": 4650265 }, "url": "https://www.semanticscholar.org/paper/26bc9195c6343e4d7f434dd65b4ad67efe2be27a", "referenceCount": 26, "citationCount": 30771, "influentialCitationCount": 2876, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep Residual Learning for Image Recognition", "abstract": "Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. We provide comprehensive empirical evidence showing that these residual networks are easier to optimize, and can gain accuracy from considerably increased depth. On the ImageNet dataset we evaluate residual nets with a depth of up to 152 layers - 8× deeper than VGG nets [40] but still having lower complexity. An ensemble of these residual nets achieves 3.57% error on the ImageNet test set. This result won the 1st place on the ILSVRC 2015 classification task. We also present analysis on CIFAR-10 with 100 and 1000 layers. The depth of representations is of central importance for many visual recognition tasks. Solely due to our extremely deep representations, we obtain a 28% relative improvement on the COCO object detection dataset. Deep residual nets are foundations of our submissions to ILSVRC & COCO 2015 competitions1, where we also won the 1st places on the tasks of ImageNet detection, ImageNet localization, COCO detection, and COCO segmentation.", "year": 2015, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Kaiming He", "X. Zhang", "Shaoqing Ren", "Jian Sun" ], "externalIds": { "DBLP": "conf/cvpr/HeZRS16", "MAG": "2949650786", "ArXiv": "1512.03385", "DOI": "10.1109/cvpr.2016.90", "CorpusId": 206594692 }, "url": "https://www.semanticscholar.org/paper/2c03df8b48bf3fa39054345bafabfeff15bfd11d", "referenceCount": 53, "citationCount": 172713, "influentialCitationCount": 28229, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The CTU Prague Relational Learning Repository", "abstract": "The aim of the CTU Prague Relational Learning Repository is to support machine learning research with multi-relational data. The repository currently contains 50 SQL databases hosted on a public MySQL server located at relational.fit.cvut.cz. A searchable meta-database provides metadata (e.g., the number of tables in the database, the number of rows and columns in the tables, the number of foreign key constraints between tables).", "year": 2015, "venue": "arXiv.org", "authors": [ "Jan Motl", "O. Schulte" ], "externalIds": { "DBLP": "journals/corr/MotlS15", "ArXiv": "1511.03086", "MAG": "2270062199", "CorpusId": 9700490 }, "url": "https://www.semanticscholar.org/paper/c3d1c5237dba4d6da90cad4ffc3347dbc5dfd145", "referenceCount": 30, "citationCount": 51, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Neural Machine Translation by Jointly Learning to Align and Translate", "abstract": "Neural machine translation is a recently proposed approach to machine translation. Unlike the traditional statistical machine translation, the neural machine translation aims at building a single neural network that can be jointly tuned to maximize the translation performance. The models proposed recently for neural machine translation often belong to a family of encoder-decoders and consists of an encoder that encodes a source sentence into a fixed-length vector from which a decoder generates a translation. In this paper, we conjecture that the use of a fixed-length vector is a bottleneck in improving the performance of this basic encoder-decoder architecture, and propose to extend this by allowing a model to automatically (soft-)search for parts of a source sentence that are relevant to predicting a target word, without having to form these parts as a hard segment explicitly. With this new approach, we achieve a translation performance comparable to the existing state-of-the-art phrase-based system on the task of English-to-French translation. Furthermore, qualitative analysis reveals that the (soft-)alignments found by the model agree well with our intuition.", "year": 2014, "venue": "International Conference on Learning Representations", "authors": [ "Dzmitry Bahdanau", "Kyunghyun Cho", "Yoshua Bengio" ], "externalIds": { "MAG": "2133564696", "ArXiv": "1409.0473", "DBLP": "journals/corr/BahdanauCB14", "CorpusId": 11212020 }, "url": "https://www.semanticscholar.org/paper/fa72afa9b2cbc8f0d7b05d52548906610ffbb9c5", "referenceCount": 33, "citationCount": 25863, "influentialCitationCount": 2485, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "The structure and dynamics of multilayer networks", "abstract": null, "year": 2014, "venue": "Physics reports", "authors": [ "S. Boccaletti", "G. Bianconi", "R. Criado", "C. D. Genio", "J. Gómez-Gardeñes", "M. Romance", "I. Sendiña-Nadal", "Z. Wang", "M. Zanin" ], "externalIds": { "MAG": "2098005762", "PubMedCentral": "7332224", "ArXiv": "1407.0742", "DBLP": "journals/corr/BoccalettiBCGGRSWZ14", "DOI": "10.1016/j.physrep.2014.07.001", "CorpusId": 13961767, "PubMed": "32834429" }, "url": "https://www.semanticscholar.org/paper/90fa61aca8822ee7124bcb95f7e7e2ca9e30abc8", "referenceCount": 533, "citationCount": 2681, "influentialCitationCount": 89, "isOpenAccess": true, "fieldsOfStudy": [ "Physics", "Computer Science", "Medicine" ] }, { "title": "DeepWalk: online learning of social representations", "abstract": "We present DeepWalk, a novel approach for learning latent representations of vertices in a network. These latent representations encode social relations in a continuous vector space, which is easily exploited by statistical models. DeepWalk generalizes recent advancements in language modeling and unsupervised feature learning (or deep learning) from sequences of words to graphs. DeepWalk uses local information obtained from truncated random walks to learn latent representations by treating walks as the equivalent of sentences. We demonstrate DeepWalk's latent representations on several multi-label network classification tasks for social networks such as BlogCatalog, Flickr, and YouTube. Our results show that DeepWalk outperforms challenging baselines which are allowed a global view of the network, especially in the presence of missing information. DeepWalk's representations can provide F1 scores up to 10% higher than competing methods when labeled data is sparse. In some experiments, DeepWalk's representations are able to outperform all baseline methods while using 60% less training data. DeepWalk is also scalable. It is an online learning algorithm which builds useful incremental results, and is trivially parallelizable. These qualities make it suitable for a broad class of real world applications such as network classification, and anomaly detection.", "year": 2014, "venue": "Knowledge Discovery and Data Mining", "authors": [ "Bryan Perozzi", "Rami Al-Rfou", "S. Skiena" ], "externalIds": { "DBLP": "conf/kdd/PerozziAS14", "MAG": "2154851992", "ArXiv": "1403.6652", "DOI": "10.1145/2623330.2623732", "CorpusId": 3051291 }, "url": "https://www.semanticscholar.org/paper/fff114cbba4f3ba900f33da574283e3de7f26c83", "referenceCount": 51, "citationCount": 8938, "influentialCitationCount": 1507, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "What Yelp Fake Review Filter Might Be Doing?", "abstract": "\n \n Online reviews have become a valuable resource for decision making. However, its usefulness brings forth a curse ‒ deceptive opinion spam. In recent years, fake review detection has attracted significant attention. However, most review sites still do not publicly filter fake reviews. Yelp is an exception which has been filtering reviews over the past few years. However, Yelp’s algorithm is trade secret. In this work, we attempt to find out what Yelp might be doing by analyzing its filtered reviews. The results will be useful to other review hosting sites in their filtering effort. There are two main approaches to filtering: supervised and unsupervised learning. In terms of features used, there are also roughly two types: linguistic features and behavioral features. In this work, we will take a supervised approach as we can make use of Yelp’s filtered reviews for training. Existing approaches based on supervised learning are all based on pseudo fake reviews rather than fake reviews filtered by a commercial Web site. Recently, supervised learning using linguistic n-gram features has been shown to perform extremely well (attaining around 90% accuracy) in detecting crowdsourced fake reviews generated using Amazon Mechanical Turk (AMT). We put these existing research methods to the test and evaluate performance on the real-life Yelp data. To our surprise, the behavioral features perform very well, but the linguistic features are not as effective. To investigate, a novel information theoretic analysis is proposed to uncover the precise psycholinguistic difference between AMT reviews and Yelp reviews (crowdsourced vs. commercial fake reviews). We find something quite interesting. This analysis and experimental results allow us to postulate that Yelp’s filtering is reasonable and its filtering algorithm seems to be correlated with abnormal spamming behaviors.\n \n", "year": 2013, "venue": "International Conference on Web and Social Media", "authors": [ "Arjun Mukherjee", "V. Venkataraman", "B. Liu", "N. Glance" ], "externalIds": { "MAG": "893486657", "DBLP": "conf/icwsm/MukherjeeV0G13", "DOI": "10.1609/icwsm.v7i1.14389", "CorpusId": 5813589 }, "url": "https://www.semanticscholar.org/paper/6d3e5feee7f03606ae2fe11c165846abe5f360a3", "referenceCount": 49, "citationCount": 573, "influentialCitationCount": 92, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "From amateurs to connoisseurs: modeling the evolution of user expertise through online reviews", "abstract": "Recommending products to consumers means not only understanding their tastes, but also understanding their level of experience. For example, it would be a mistake to recommend the iconic film Seven Samurai simply because a user enjoys other action movies; rather, we might conclude that they will eventually enjoy it---once they are ready. The same is true for beers, wines, gourmet foods---or any products where users have acquired tastes: the `best' products may not be the most 'accessible'. Thus our goal in this paper is to recommend products that a user will enjoy now, while acknowledging that their tastes may have changed over time, and may change again in the future. We model how tastes change due to the very act of consuming more products---in other words, as users become more experienced. We develop a latent factor recommendation system that explicitly accounts for each user's level of experience. We find that such a model not only leads to better recommendations, but also allows us to study the role of user experience and expertise on a novel dataset of fifteen million beer, wine, food, and movie reviews.", "year": 2013, "venue": "The Web Conference", "authors": [ "Julian McAuley", "J. Leskovec" ], "externalIds": { "MAG": "2951727499", "DBLP": "conf/www/McAuleyL13", "ArXiv": "1303.4402", "DOI": "10.1145/2488388.2488466", "CorpusId": 3047006 }, "url": "https://www.semanticscholar.org/paper/4d7bb722ff0a1bd5894cd8a620d5fe0673cbf721", "referenceCount": 44, "citationCount": 517, "influentialCitationCount": 57, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Physics", "Business" ] }, { "title": "Collective Classification in Network Data", "abstract": "Many real-world applications produce networked data such as the world-wide web (hypertext documents connected via hyperlinks), social networks (for example, people connected by friendship links), communication networks (computers connected via communication links) and biological networks (for example, protein interaction networks). A recent focus in machine learning research has been to extend traditional machine learning classification techniques to classify nodes in such networks. In this article, we provide a brief introduction to this area of research and how it has progressed during the past decade. We introduce four of the most widely used inference algorithms for classifying networked data and empirically compare them on both synthetic and real-world data.", "year": 2008, "venue": "The AI Magazine", "authors": [ "Prithviraj Sen", "Galileo Namata", "M. Bilgic", "L. Getoor", "Brian Gallagher", "Tina Eliassi-Rad" ], "externalIds": { "MAG": "2403788960", "DBLP": "journals/aim/SenNBGGE08", "DOI": "10.1201/b17320-16", "CorpusId": 62016134 }, "url": "https://www.semanticscholar.org/paper/43d2ed5c3c55c1100450cd74dc1031afa24d37b2", "referenceCount": 94, "citationCount": 3575, "influentialCitationCount": 646, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Mixing patterns in networks.", "abstract": "We study assortative mixing in networks, the tendency for vertices in networks to be connected to other vertices that are like (or unlike) them in some way. We consider mixing according to discrete characteristics such as language or race in social networks and scalar characteristics such as age. As a special example of the latter we consider mixing according to vertex degree, i.e., according to the number of connections vertices have to other vertices: do gregarious people tend to associate with other gregarious people? We propose a number of measures of assortative mixing appropriate to the various mixing types, and apply them to a variety of real-world networks, showing that assortative mixing is a pervasive phenomenon found in many networks. We also propose several models of assortatively mixed networks, both analytic ones based on generating function methods, and numerical ones based on Monte Carlo graph generation techniques. We use these models to probe the properties of networks as their level of assortativity is varied. In the particular case of mixing by degree, we find strong variation with assortativity in the connectivity of the network and in the resilience of the network to the removal of vertices.", "year": 2002, "venue": "Physical review. E, Statistical, nonlinear, and soft matter physics", "authors": [ "M. Newman", "M. Newman" ], "externalIds": { "MAG": "2033193852", "ArXiv": "cond-mat/0209450", "DOI": "10.1103/PhysRevE.67.026126", "CorpusId": 15186389, "PubMed": "12636767" }, "url": "https://www.semanticscholar.org/paper/0ed877bab75b32042a887715380c84ac27e64a8b", "referenceCount": 107, "citationCount": 2809, "influentialCitationCount": 236, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Mathematics", "Physics", "Computer Science" ] }, { "title": "Greedy function approximation: A gradient boosting machine.", "abstract": "Function estimation/approximation is viewed from the perspective of numerical optimization in function space, rather than parameter space. A connection is made between stagewise additive expansions and steepest-descent minimization. A general gradient descent boosting paradigm is developed for additive expansions based on any fitting criterion. Specific algorithms are presented for least-squares, least absolute deviation, and Huber-M loss functions for regression, and multiclass logistic likelihood for classification. Special enhancements are derived for the particular case where the individual additive components are regression trees, and tools for interpreting such TreeBoost models are presented. Gradient boosting of regression trees produces competitive, highly robust, interpretable procedures for both regression and classification, especially appropriate for mining less than clean data. Connections between this approach and the boosting methods of Freund and Shapire and Friedman, Hastie and Tibshirani are discussed.", "year": 2001, "venue": "", "authors": [ "J. Friedman" ], "externalIds": { "MAG": "1678356000", "DOI": "10.1214/AOS/1013203451", "CorpusId": 39450643 }, "url": "https://www.semanticscholar.org/paper/1679beddda3a183714d380e944fe6bf586c083cd", "referenceCount": 29, "citationCount": 21211, "influentialCitationCount": 1902, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Automating the Construction of Internet Portals with Machine Learning", "abstract": null, "year": 2000, "venue": "Information retrieval (Boston)", "authors": [ "A. McCallum", "K. Nigam", "Jason D. M. Rennie", "K. Seymore" ], "externalIds": { "DBLP": "journals/ir/McCallumNRS00", "MAG": "2162630660", "DOI": "10.1023/A:1009953814988", "CorpusId": 349242 }, "url": "https://www.semanticscholar.org/paper/04f4085c0126ba29453a582cd1e62e05c8e15c82", "referenceCount": 52, "citationCount": 1251, "influentialCitationCount": 160, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "CiteSeer: an automatic citation indexing system", "abstract": "We present CiteSeer: an autonomous citation indexing system which indexes academic literature in electronic format (e.g. Postscript files on the Web). CiteSeer understands how to parse citations, identify citations to the same paper in different formats, and identify the context of citations in the body of articles. CiteSeer provides most of the advantages of traditional (manually constructed) citation indexes (e.g. the ISI citation indexes), including: literature retrieval by following citation links (e.g. by providing a list of papers that cite a given paper), the evaluation and ranking of papers, authors, journals, etc. based on the number of citations, and the identification of research trends. CiteSeer has many advantages over traditional citation indexes, including the ability to create more up-to-date databases which are not limited to a preselected set of journals or restricted by journal publication delays, completely autonomous operation with a corresponding reduction in cost, and powerful interactive browsing of the literature using the context of citations. Given a particular paper of interest, CiteSeer can display the context of how the paper is cited in subsequent publications. This context may contain a brief summary of the paper, another author’s response to the paper, or subsequent work which builds upon the original article. CiteSeer allows the location of papers by keyword search or by citation links. Papers related to a given paper can be located using common citation information or word vector similarity. CiteSeer will soon be available for public use.", "year": 1998, "venue": "Digital library", "authors": [ "C. Lee Giles", "K. Bollacker", "S. Lawrence" ], "externalIds": { "MAG": "2168190036", "DBLP": "conf/dl/GilesBL98", "DOI": "10.1145/276675.276685", "CorpusId": 514080 }, "url": "https://www.semanticscholar.org/paper/592462425a4d23547dd0f3c9318350e5dcceb1a6", "referenceCount": 38, "citationCount": 1118, "influentialCitationCount": 108, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "TabR: Tabular Deep Learning Meets Nearest Neighbors", "abstract": null, "year": 2024, "venue": "International Conference on Learning Representations", "authors": [ "Yu. V. Gorishniy", "Ivan Rubachev", "Nikolay Kartashev", "Daniil Shlenskii", "Akim Kotelnikov", "Artem Babenko" ], "externalIds": { "DBLP": "conf/iclr/GorishniyRKSKB24", "CorpusId": 271745859 }, "url": "https://www.semanticscholar.org/paper/ca5c5a0e8aa191360ff5d3b3c689239ab39d7a48", "referenceCount": 0, "citationCount": 2, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Why do tree-based models still outperform deep learning on typical tabular data?", "abstract": "While deep learning has enabled tremendous progress on text and image datasets, its superiority on tabular data is not clear. We contribute extensive benchmarks of standard and novel deep learning methods as well as tree-based models such as XGBoost and Random Forests, across a large number of datasets and hyperparameter combinations. We define a standard set of 45 datasets from varied domains with clear characteristics of tabular data and a benchmarking methodology accounting for both fitting models and finding good hyperparameters. Results show that tree-based models remain state-of-the-art on medium-sized data ( ∼ 10K samples) even without accounting for their superior speed. To understand this gap, we conduct an empirical investigation into the differing inductive biases of tree-based models and Neural Networks (NNs). This leads to a series of challenges which should guide researchers aiming to build tabular-specific NNs: 1. be robust to uninformative features, 2. preserve the orientation of the data, and 3. be able to easily learn irregular functions. To stimulate research on tabular architectures, we contribute a standard benchmark and raw data for baselines: every point of a 20 000 compute hours hyperparameter search for each learner. Results Looking at the results as a function of random search time rather than random search iterations tree-based models superiority even more striking. Neural networks and tree-based models were close for some benchmarks after a small number of iterations, but for the same amount of time spent on random search, tree-based models scores are always high above neural networks.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Léo Grinsztajn", "Edouard Oyallon", "G. Varoquaux" ], "externalIds": { "DBLP": "conf/nips/GrinsztajnOV22", "CorpusId": 252697222 }, "url": "https://www.semanticscholar.org/paper/5a00b32876f7d4869bce980500d4ccc978389315", "referenceCount": 72, "citationCount": 444, "influentialCitationCount": 47, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning Enhanced Representation for Tabular Data via Neighborhood Propagation", "abstract": null, "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Kounianhua Du", "Weinan Zhang", "Ruiwen Zhou", "Yangkun Wang", "Xilong Zhao", "Jiarui Jin", "Quan Gan", "Zheng Zhang", "David Wipf" ], "externalIds": { "DBLP": "conf/nips/Du0ZWZJG0W22", "CorpusId": 263891007 }, "url": "https://www.semanticscholar.org/paper/205ecf4503cd13d7d176fc73b89ea5df5e57196f", "referenceCount": 0, "citationCount": 12, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Benchmarking and Analyzing Unsupervised Network Representation Learning and the Illusion of Progress", "abstract": "A number of methods have been developed for unsupervised network representation learning – ranging from classical methods based on the graph spectra to recent random walk based methods and from deep learning based methods to matrix factorization based methods. Each new study inevitably seeks to establish the relative superiority of the proposed method over others. The lack of a standard assessment protocol and benchmark suite often leave practitioners wondering if a new idea represents a significant scientific advance. In this work, we articulate a clear and pressing need to systematically and rigorously benchmark such methods. Our overall assessment – a result of a careful benchmarking of 15 methods for unsupervised network representation learning on 16 non-attributed graphs (several with different characteristics)", "year": 2022, "venue": "Trans. Mach. Learn. Res.", "authors": [ "Saket Gurukar", "Priyesh Vijayan", "Srinivas Parthasarathy", "B. Ravindran", "Aakash Srinivasan", "Goonmeet Bajaj", "Chen Cai", "Moniba Keymanesh", "Saravana Kumar", "Pranav Maneriker", "Anasua Mitra", "Vedang Patel" ], "externalIds": { "DBLP": "journals/tmlr/GurukarVPRSBCKKMMP22", "CorpusId": 250150507 }, "url": "https://www.semanticscholar.org/paper/44b7903158323426053dca317b8c29d8cd64c635", "referenceCount": 117, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Click-through rate prediction", "abstract": null, "year": 2014, "venue": "kaggle.com/ competitions/avazu-ctr-prediction", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "A method for stochastic optimization", "abstract": null, "year": 2014, "venue": "arXiv", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Query-driven Active Surveying for Collective Classification", "abstract": "In network classification problems such as those found in intelligence gathering, public health, and viral marketing, one is often only interested in inferring the labels of a subset of the nodes. We refer to this subset as the query set, and define the problem as query-driven collective classification. We study this problem in a practical active learning framework, in which the learning algorithm can survey non-query nodes to obtain their labels and network structure. We derive a surveying strategy aimed toward optimal inference on the query set. Considering both feature and structural smoothness, concepts that we formally define, we develop an algorithm which adaptively selects survey nodes by estimating which form of smoothness is most appropriate. We evaluate our algorithm on several network datasets and demonstrate its improvements over standard active learning methods.", "year": 2012, "venue": "", "authors": [ "Galileo Namata", "Ben London", "L. Getoor", "Bert Huang" ], "externalIds": { "MAG": "67413104", "CorpusId": 15384610 }, "url": "https://www.semanticscholar.org/paper/efac04450c531b3769451a886ed9a42fce4754d9", "referenceCount": 19, "citationCount": 435, "influentialCitationCount": 78, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Nerf: Representing scenes as neural radiance fields for view synthesis", "abstract": null, "year": null, "venue": "Communications of the ACM", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Toloker Graph: Interaction of Crowd Annotators", "abstract": null, "year": null, "venue": "github", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Boolean Product Graph Neural Networks": { "paper_title": "Boolean Product Graph Neural Networks", "arxiv_id": "2409.14001v1", "keyword": "graph neural network", "authors": [ "Ziyan Wang", "Bin Liu", "Ling Xiang" ], "references": [ { "title": "Autistic Spectrum Disorders Diagnose with Graph Neural Networks", "abstract": "Autism spectrum disorder (ASD) is a complex neurodevelopmental disorder that affects socialization and is characterized by abnormal, restricted, or repetitive language behaviors. Symptoms typically start to appear around the age of 2, making early diagnosis essential for treatment. One standardized screening method is an autism-specific interview with children's parents. However, this diagnostic process requires highly experienced physicians, making questionnaire-based screening less effective. Recently, imaging-based diagnosis has emerged as a more objective option. In this paper, we propose a graph neural network-based model for ASD diagnosis using Diffusion Tensor Imaging (DTI) and functional Magnetic Resonance Imaging (fMRI) data. We first calculate the correlations of 90 brain regions based on the automated anatomical labeling (AAL) template using brain imaging data of DTI and fMRI. This enables the construction of a comprehensive network map that delineates the interconnections among various brain regions. Subsequently, we propose to utilize a graph neural network for the purpose of diagnosing ASD, wherein the graph derived from DTI serves as the adjacency matrix, while the map of the fMRI is utilized as the node features. To improve the performance of diagnosis, we introduce a regularization of maximum inter-class graph distance and minimum intra-class graph distance, in addition to graph classification. We then calculate the correlation matrix between functional areas based on the obtained 90 implicit features corresponding to the nodes of functional areas and their 90 eigenvalues. We also perform hypothesis tests on the 90 eigenvalues corresponding to ASD negative and positive groups in turn to discover the pathogenic functional areas by comparing the eigenvalue distributions between the two groups. Our experiments on 138 real-world samples demonstrate the superior performance of our proposed model for diagnosis.", "year": 2023, "venue": "ACM Multimedia", "authors": [ "Lu Wei", "Bin Liu", "Jiujun He", "Manxue Zhang", "Yi Huang" ], "externalIds": { "DBLP": "conf/mm/WeiLHZH23", "DOI": "10.1145/3581783.3613818", "CorpusId": 264492133 }, "url": "https://www.semanticscholar.org/paper/5f1a33c31de2e2e53702feae9451f73ef12ccfb7", "referenceCount": 41, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Latent Graph Inference with Limited Supervision", "abstract": "Latent graph inference (LGI) aims to jointly learn the underlying graph structure and node representations from data features. However, existing LGI methods commonly suffer from the issue of supervision starvation, where massive edge weights are learned without semantic supervision and do not contribute to the training loss. Consequently, these supervision-starved weights, which may determine the predictions of testing samples, cannot be semantically optimal, resulting in poor generalization. In this paper, we observe that this issue is actually caused by the graph sparsification operation, which severely destroys the important connections established between pivotal nodes and labeled ones. To address this, we propose to restore the corrupted affinities and replenish the missed supervision for better LGI. The key challenge then lies in identifying the critical nodes and recovering the corrupted affinities. We begin by defining the pivotal nodes as $k$-hop starved nodes, which can be identified based on a given adjacency matrix. Considering the high computational burden, we further present a more efficient alternative inspired by CUR matrix decomposition. Subsequently, we eliminate the starved nodes by reconstructing the destroyed connections. Extensive experiments on representative benchmarks demonstrate that reducing the starved nodes consistently improves the performance of state-of-the-art LGI methods, especially under extremely limited supervision (6.12% improvement on Pubmed with a labeling rate of only 0.3%).", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Jianglin Lu", "Yi Xu", "Huan Wang", "Yue Bai", "Yun Fu" ], "externalIds": { "DBLP": "journals/corr/abs-2310-04314", "ArXiv": "2310.04314", "DOI": "10.48550/arXiv.2310.04314", "CorpusId": 263831655 }, "url": "https://www.semanticscholar.org/paper/b1c91d3d7ad6dbd26f313254ce00124bd1c31e8b", "referenceCount": 48, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Interpret ESG Rating's Impact on the Industrial Chain Using Graph Neural Networks", "abstract": "We conduct a quantitative analysis of the development of the industry chain from the environmental, social, and governance (ESG) perspective, which is an overall measure of sustainability. Factors that may impact the performance of the industrial chain have been studied in the literature, such as government regulation, monetary policy, etc. Our interest lies in how the sustainability change (i.e., ESG shock) affects the performance of the industrial chain. To achieve this goal, we model the industrial chain with a graph neural network (GNN) and conduct node regression on two financial performance metrics, namely, the aggregated profitability ratios and operating margin. To quantify the effects of ESG, we propose to compute the interaction between ESG shocks and industrial chain features with a cross-attention module, and then filter the original node features in the graph regression. Experiments on two real datasets demonstrate that (i) there are significant effects of ESG shocks on the industrial chain, and (ii) model parameters including regression coefficients and the attention map can explain how ESG shocks affect the performance of the industrial chain.", "year": 2023, "venue": "International Joint Conference on Artificial Intelligence", "authors": [ "Bin Liu", "Jiujun He", "Zi-Yuan Li", "Xiaoyan Huang", "Xiang Zhang", "Guosheng Yin" ], "externalIds": { "DBLP": "conf/ijcai/LiuHLHZY23", "DOI": "10.2139/ssrn.4467937", "CorpusId": 259435974 }, "url": "https://www.semanticscholar.org/paper/71f2a0bf25a8fb5c311d1ac64bc475e0782db84b", "referenceCount": 48, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Robust Graph Structure Learning with the Alignment of Features and Adjacency Matrix", "abstract": "To improve the robustness of graph neural networks (GNN), graph structure learning (GSL) has attracted great interest due to the pervasiveness of noise in graph data. Many approaches have been proposed for GSL to jointly learn a clean graph structure and corresponding representations. To extend the previous work, this paper proposes a novel regularized GSL approach, particularly with an alignment of feature information and graph information, which is motivated mainly by our derived lower bound of node-level Rademacher complexity for GNNs. Additionally, our proposed approach incorporates sparse dimensional reduction to leverage low-dimensional node features that are relevant to the graph structure. To evaluate the effectiveness of our approach, we conduct experiments on real-world graphs. The results demonstrate that our proposed GSL method outperforms several competitive baselines, especially in scenarios where the graph structures are heavily affected by noise. Overall, our research highlights the importance of integrating feature and graph information alignment in GSL, as inspired by our derived theoretical result, and showcases the superiority of our approach in handling noisy graph structures through comprehensive experiments on real-world datasets.", "year": 2023, "venue": "arXiv.org", "authors": [ "Shaogao Lv", "Gang Wen", "Shiyu Liu", "Linsen Wei", "Ming Li" ], "externalIds": { "ArXiv": "2307.02126", "DBLP": "journals/corr/abs-2307-02126", "DOI": "10.48550/arXiv.2307.02126", "CorpusId": 259342133 }, "url": "https://www.semanticscholar.org/paper/8be9db705d43e2dcba6bd463534ea3e54bb6f1d2", "referenceCount": 37, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Beyond Homophily: Reconstructing Structure for Graph-agnostic Clustering", "abstract": "Graph neural networks (GNNs) based methods have achieved impressive performance on node clustering task. However, they are designed on the homophilic assumption of graph and clustering on heterophilic graph is overlooked. Due to the lack of labels, it is impossible to first identify a graph as homophilic or heterophilic before a suitable GNN model can be found. Hence, clustering on real-world graph with various levels of homophily poses a new challenge to the graph research community. To fill this gap, we propose a novel graph clustering method, which contains three key components: graph reconstruction, a mixed filter, and dual graph clustering network. To be graph-agnostic, we empirically construct two graphs which are high homophily and heterophily from each data. The mixed filter based on the new graphs extracts both low-frequency and high-frequency information. To reduce the adverse coupling between node attribute and topological structure, we separately map them into two subspaces in dual graph clustering network. Extensive experiments on 11 benchmark graphs demonstrate our promising performance. In particular, our method dominates others on heterophilic graphs.", "year": 2023, "venue": "International Conference on Machine Learning", "authors": [ "Erlin Pan", "Zhao Kang" ], "externalIds": { "ArXiv": "2305.02931", "DBLP": "conf/icml/Pan023", "DOI": "10.48550/arXiv.2305.02931", "CorpusId": 258480248 }, "url": "https://www.semanticscholar.org/paper/99c47cc8d02b2f8457e27ba4af72daa0210c55b2", "referenceCount": 47, "citationCount": 22, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MFGCN: A Multimodal Fusion Graph Convolutional Network for Online Car-Hailing Demand Prediction", "abstract": "The rapid growth of online car hailing provides an excellent opportunity to provide convenient travel services. However, with the tremendous increase of users and online taxis, online car-hailing prediction systems face several challenges: 1) the difficulty of modeling nonlinear spatiotemporal interactions between users and vehicles, 2) the difficulty of incorporating context information and multimodal attribute enhancement data, and 3) the problems of data sparsity. To cope with these challenges, we propose a novel multimodal fusion graph convolutional network (MFGCN) for online car-hailing prediction. The model consists of a multimodal origin destination graph convolutional network module that contains three graph convolutional networks to extract spatial patterns from geography, semantics, and functional correlation; a multimodal attribute enhancement module that incorporates weather and temporal activity patterns; and a temporal attention skip-long short-term memory module that captures the periodic variations. Extensive experiments conducted on real-world taxi demand datasets show that MFGCN outperforms the state-of-the-art methods.", "year": 2023, "venue": "IEEE Intelligent Systems", "authors": [ "L. Liao", "Ben Li", "Fumin Zou", "Dejuan Huang" ], "externalIds": { "DBLP": "journals/expert/LiaoLZH23", "DOI": "10.1109/MIS.2023.3250600", "CorpusId": 257326170 }, "url": "https://www.semanticscholar.org/paper/c9fe5f4d051e9f23d55ecdf5036bd303fc3c43fb", "referenceCount": 26, "citationCount": 7, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Self-organization Preserved Graph Structure Learning with Principle of Relevant Information", "abstract": "Most Graph Neural Networks follow the message-passing paradigm, assuming the observed structure depicts the ground-truth node relationships. However, this fundamental assumption cannot always be satisfied, as real-world graphs are always incomplete, noisy, or redundant. How to reveal the inherent graph structure in a unified way remains under-explored. \nWe proposed PRI-GSL, a Graph Structure Learning framework guided by the Principle of Relevant Information, providing a simple and unified framework for identifying the self-organization and revealing the hidden structure. PRI-GSL learns a structure that contains the most relevant yet least redundant information quantified by von Neumann entropy and Quantum Jensen Shannon divergence. PRI-GSL incorporates the evolution of quantum continuous walk with graph wavelets to encode node structural roles, showing in which way the nodes interplay and self-organize with the graph structure. Extensive experiments demonstrate the superior effectiveness and robustness of PRI-GSL.", "year": 2022, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Qingyun Sun", "Jianxin Li", "Beining Yang", "Xingcheng Fu", "Hao Peng", "Philip S. Yu" ], "externalIds": { "ArXiv": "2301.00015", "DBLP": "journals/corr/abs-2301-00015", "DOI": "10.48550/arXiv.2301.00015", "CorpusId": 255372572 }, "url": "https://www.semanticscholar.org/paper/b501be9cac69f1fa58ef2887017d0dd94d233b68", "referenceCount": 47, "citationCount": 8, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Latent Graph Inference using Product Manifolds", "abstract": "Graph Neural Networks usually rely on the assumption that the graph topology is available to the network as well as optimal for the downstream task. Latent graph inference allows models to dynamically learn the intrinsic graph structure of problems where the connectivity patterns of data may not be directly accessible. In this work, we generalize the discrete Differentiable Graph Module (dDGM) for latent graph learning. The original dDGM architecture used the Euclidean plane to encode latent features based on which the latent graphs were generated. By incorporating Riemannian geometry into the model and generating more complex embedding spaces, we can improve the performance of the latent graph inference system. In particular, we propose a computationally tractable approach to produce product manifolds of constant curvature model spaces that can encode latent features of varying structure. The latent representations mapped onto the inferred product manifold are used to compute richer similarity measures that are leveraged by the latent graph learning model to obtain optimized latent graphs. Moreover, the curvature of the product manifold is learned during training alongside the rest of the network parameters and based on the downstream task, rather than it being a static embedding space. Our novel approach is tested on a wide range of datasets, and outperforms the original dDGM model.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Haitz S'aez de Oc'ariz Borde", "Anees Kazi", "Federico Barbero", "Pietro Lio'" ], "externalIds": { "ArXiv": "2211.16199", "DBLP": "journals/corr/abs-2211-16199", "DOI": "10.48550/arXiv.2211.16199", "CorpusId": 254069728 }, "url": "https://www.semanticscholar.org/paper/e5c6d0f131945cf0ee1a2e8e1b4e4f91e5ed4d05", "referenceCount": 71, "citationCount": 14, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Multimodal learning with graphs", "abstract": null, "year": 2022, "venue": "Nature Machine Intelligence", "authors": [ "Yasha Ektefaie", "George Dasoulas", "Ayush Noori", "Maha Farhat", "M. Zitnik" ], "externalIds": { "ArXiv": "2209.03299", "DBLP": "journals/natmi/EktefaieDNFZ23", "DOI": "10.1038/s42256-023-00624-6", "CorpusId": 254069542, "PubMed": "38076673" }, "url": "https://www.semanticscholar.org/paper/effbb421f3e4a9318dd0e0c55805fcec2369d4f9", "referenceCount": 149, "citationCount": 53, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Multimodal Continual Graph Learning with Neural Architecture Search", "abstract": "Continual graph learning is rapidly emerging as an important role in a variety of real-world applications such as online product recommendation systems and social media. While achieving great success, existing works on continual graph learning ignore the information from multiple modalities (e.g., visual and textual features) as well as the rich dynamic structural information hidden in the ever-changing graph data and evolving tasks. However, considering multimodal continual graph learning with evolving topological structures poses great challenges: i) it is unclear how to incorporate the multimodal information into continual graph learning and ii) it is nontrivial to design models that can capture the structure-evolving dynamics in continual graph learning. To tackle these challenges, in this paper we propose a novel Multimodal Structure-evolving Continual Graph Learning (MSCGL) model, which continually learns both the model architecture and the corresponding parameters for Adaptive Multimodal Graph Neural Network (AdaMGNN). To be concrete, our proposed MSCGL model simultaneously takes social information and multimodal information into account to build the multimodal graphs. In order for continually adapting to new tasks without forgetting the old ones, our MSCGL model explores a new strategy with joint optimization of Neural Architecture Search (NAS) and Group Sparse Regularization (GSR) across different tasks. These two parts interact with each other reciprocally, where NAS is expected to explore more promising architectures and GSR is in charge of preserving important information from the previous tasks. We conduct extensive experiments over two real-world multimodal continual graph scenarios to demonstrate the superiority of the proposed MSCGL model. Empirical experiments indicate that both the architectures and weight sharing across different tasks play important roles in affecting the model performances.", "year": 2022, "venue": "The Web Conference", "authors": [ "Jie Cai", "Xin Wang", "Chaoyu Guan", "Yateng Tang", "Jin Xu", "Bin Zhong", "Wenwu Zhu" ], "externalIds": { "DBLP": "conf/www/Cai0GTXZ022", "DOI": "10.1145/3485447.3512176", "CorpusId": 248367588 }, "url": "https://www.semanticscholar.org/paper/b5af49fbcf3678d9203036fe1df8fbfa49d274c3", "referenceCount": 43, "citationCount": 29, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Understanding over-squashing and bottlenecks on graphs via curvature", "abstract": "Most graph neural networks (GNNs) use the message passing paradigm, in which node features are propagated on the input graph. Recent works pointed to the distortion of information flowing from distant nodes as a factor limiting the efficiency of message passing for tasks relying on long-distance interactions. This phenomenon, referred to as 'over-squashing', has been heuristically attributed to graph bottlenecks where the number of $k$-hop neighbors grows rapidly with $k$. We provide a precise description of the over-squashing phenomenon in GNNs and analyze how it arises from bottlenecks in the graph. For this purpose, we introduce a new edge-based combinatorial curvature and prove that negatively curved edges are responsible for the over-squashing issue. We also propose and experimentally test a curvature-based graph rewiring method to alleviate the over-squashing.", "year": 2021, "venue": "International Conference on Learning Representations", "authors": [ "Jake Topping", "Francesco Di Giovanni", "B. Chamberlain", "Xiaowen Dong", "M. Bronstein" ], "externalIds": { "DBLP": "journals/corr/abs-2111-14522", "ArXiv": "2111.14522", "CorpusId": 244713935 }, "url": "https://www.semanticscholar.org/paper/04ac11f8db83406b03d92aa4571fc3e6c176c1e3", "referenceCount": 64, "citationCount": 344, "influentialCitationCount": 54, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Graph Signal Processing, Graph Neural Network and Graph Learning on Biological Data: A Systematic Review", "abstract": "Graph networks can model data observed across different levels of biological systems that span from population graphs (with patients as network nodes) to molecular graphs that involve omics data. Graph-based approaches have shed light on decoding biological processes modulated by complex interactions. This paper systematically reviews graph-based analysis methods of Graph Signal Processing (GSP), Graph Neural Networks (GNNs) and graph topology inference, and their applications to biological data. This work focuses on the algorithms of graph-based approaches and the constructions of graph-based frameworks that are adapted to a broad range of biological data. We cover the Graph Fourier Transform and the graph filter developed in GSP, which provides tools to investigate biological signals in the graph domain that can potentially benefit from the underlying graph structures. We also review the node, graph, and interaction oriented applications of GNNs with inductive and transductive learning manners for various biological targets. As a key component of graph analysis, we provide a review of graph topology inference methods that incorporate assumptions for specific biological objectives. Finally, we discuss the biological application of graph analysis methods within this exhaustive literature collection, potentially providing insights for future research in biological sciences.", "year": 2021, "venue": "IEEE Reviews in Biomedical Engineering", "authors": [ "Rui Li", "Xin Yuan", "Mohsen Radfar", "Peter Marendy", "W. Ni", "Terence J. O’Brien", "Pablo M. Casillas-Espinosa" ], "externalIds": { "DOI": "10.1109/RBME.2021.3122522", "CorpusId": 239999892, "PubMed": "34699368" }, "url": "https://www.semanticscholar.org/paper/8c324af9e4552857353f4301fe6f74e8ad2da514", "referenceCount": 165, "citationCount": 58, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine" ] }, { "title": "A Deep Graph Neural Network-Based Mechanism for Social Recommendations", "abstract": "Nowadays, the issue of information overload is gradually gaining exposure in the Internet of Things (IoT), calling for more research on recommender system in advance for industrial IoT scenarios. With the ever-increasing prevalence of various social networks, social recommendations (SoR) will certainly become an integral application that provides more feasibly personalized information service for future IoT users. However, almost all of the existing research managed to explore and quantify correlations between user preferences and social relationships, while neglecting the correlations among item features which could further influence the topologies of some social groups. To tackle with this challenge, in this article, a deep graph neural network-based social recommendation framework (GNN-SoR) is proposed for future IoTs. First, user and item feature spaces are abstracted as two graph networks and respectively encoded via the graph neural network method. Next, two encoded spaces are embedded into two latent factors of matrix factorization to complete missing rating values in a user-item rating matrix. Finally, a large amount of experiments are conducted on three real-world data sets to verify the efficiency and stability of the proposed GNN-SoR.", "year": 2021, "venue": "IEEE Transactions on Industrial Informatics", "authors": [ "Zhiwei Guo", "Heng Wang" ], "externalIds": { "MAG": "3015616869", "DBLP": "journals/tii/GuoW21", "DOI": "10.1109/TII.2020.2986316", "CorpusId": 216503232 }, "url": "https://www.semanticscholar.org/paper/919c45fefbba662df1c83c2968fd51b395b98507", "referenceCount": 34, "citationCount": 164, "influentialCitationCount": 7, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "SLAPS: Self-Supervision Improves Structure Learning for Graph Neural Networks", "abstract": "Graph neural networks (GNNs) work well when the graph structure is provided. However, this structure may not always be available in real-world applications. One solution to this problem is to infer a task-specific latent structure and then apply a GNN to the inferred graph. Unfortunately, the space of possible graph structures grows super-exponentially with the number of nodes and so the task-specific supervision may be insufficient for learning both the structure and the GNN parameters. In this work, we propose the Simultaneous Learning of Adjacency and GNN Parameters with Self-supervision, or SLAPS, a method that provides more supervision for inferring a graph structure through self-supervision. A comprehensive experimental study demonstrates that SLAPS scales to large graphs with hundreds of thousands of nodes and outperforms several models that have been proposed to learn a task-specific graph structure on established benchmarks.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Bahare Fatemi", "Layla El Asri", "Seyed Mehran Kazemi" ], "externalIds": { "DBLP": "conf/nips/FatemiAK21", "ArXiv": "2102.05034", "CorpusId": 231855665 }, "url": "https://www.semanticscholar.org/paper/4dc3c61426a3332238ea0feb23f2113a96aef0d4", "referenceCount": 84, "citationCount": 123, "influentialCitationCount": 19, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Graph Neural Networks in Recommender Systems: A Survey", "abstract": "With the explosive growth of online information, recommender systems play a key role to alleviate such information overload. Due to the important application value of recommender systems, there have always been emerging works in this field. In recommender systems, the main challenge is to learn the effective user/item representations from their interactions and side information (if any). Recently, graph neural network (GNN) techniques have been widely utilized in recommender systems since most of the information in recommender systems essentially has graph structure and GNN has superiority in graph representation learning. This article aims to provide a comprehensive review of recent research efforts on GNN-based recommender systems. Specifically, we provide a taxonomy of GNN-based recommendation models according to the types of information used and recommendation tasks. Moreover, we systematically analyze the challenges of applying GNN on different types of data and discuss how existing works in this field address these challenges. Furthermore, we state new perspectives pertaining to the development of this field. We collect the representative papers along with their open-source implementations in https://github.com/wusw14/GNN-in-RS.", "year": 2020, "venue": "ACM Computing Surveys", "authors": [ "Shiwen Wu", "Wentao Zhang", "Fei Sun", "Bin Cui" ], "externalIds": { "ArXiv": "2011.02260", "MAG": "3097300053", "DBLP": "journals/corr/abs-2011-02260", "DOI": "10.1145/3535101", "CorpusId": 226246289 }, "url": "https://www.semanticscholar.org/paper/3443efc855cebd17d1512d1a703b6e9ee2e4da8b", "referenceCount": 231, "citationCount": 875, "influentialCitationCount": 24, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Fast and Flexible Protein Design Using Deep Graph Neural Networks.", "abstract": null, "year": 2020, "venue": "Cell Systems", "authors": [ "Alexey Strokach", "David Becerra", "Carles Corbi-Verge", "Albert Perez-Riba", "Philip M. Kim" ], "externalIds": { "MAG": "3088578860", "DOI": "10.1016/j.cels.2020.08.016", "CorpusId": 221912492, "PubMed": "32971019" }, "url": "https://www.semanticscholar.org/paper/e8942b44732730d54fd27e13868eb26b83dd51fa", "referenceCount": 54, "citationCount": 175, "influentialCitationCount": 13, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Iterative Deep Graph Learning for Graph Neural Networks: Better and Robust Node Embeddings", "abstract": "In this paper, we propose an end-to-end graph learning framework, namely Iterative Deep Graph Learning (IDGL), for jointly and iteratively learning graph structure and graph embedding. The key rationale of IDGL is to learn a better graph structure based on better node embeddings, and vice versa (i.e., better node embeddings based on a better graph structure). Our iterative method dynamically stops when the learned graph approaches close enough to the graph optimized for the prediction task. In addition, we cast the graph learning problem as a similarity metric learning problem and leverage adaptive graph regularization for controlling the quality of the learned graph. Finally, combining the anchor-based approximation technique, we further propose a scalable version of IDGL, namely IDGL-ANCH, which significantly reduces the time and space complexity of IDGL without compromising the performance. Our extensive experiments on nine benchmarks show that our proposed IDGL models can consistently outperform or match state-of-the-art baselines. Furthermore, IDGL can be more robust to adversarial graphs and cope with both transductive and inductive learning.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Yu Chen", "Lingfei Wu", "Mohammed J. Zaki" ], "externalIds": { "ArXiv": "2006.13009", "MAG": "3101979678", "DBLP": "conf/nips/0022WZ20", "CorpusId": 214003631 }, "url": "https://www.semanticscholar.org/paper/ff6a4a9a41b78c8b1fcab185db780266bbb06caf", "referenceCount": 68, "citationCount": 341, "influentialCitationCount": 49, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Traffic Flow Prediction via Spatial Temporal Graph Neural Network", "abstract": "Traffic flow analysis, prediction and management are keystones for building smart cities in the new era. With the help of deep neural networks and big traffic data, we can better understand the latent patterns hidden in the complex transportation networks. The dynamic of the traffic flow on one road not only depends on the sequential patterns in the temporal dimension but also relies on other roads in the spatial dimension. Although there are existing works on predicting the future traffic flow, the majority of them have certain limitations on modeling spatial and temporal dependencies. In this paper, we propose a novel spatial temporal graph neural network for traffic flow prediction, which can comprehensively capture spatial and temporal patterns. In particular, the framework offers a learnable positional attention mechanism to effectively aggregate information from adjacent roads. Meanwhile, it provides a sequential component to model the traffic flow dynamics which can exploit both local and global temporal dependencies. Experimental results on various real traffic datasets demonstrate the effectiveness of the proposed framework.", "year": 2020, "venue": "The Web Conference", "authors": [ "Xiaoyang Wang", "Yao Ma", "Yiqi Wang", "Wei Jin", "Xin Wang", "Jiliang Tang", "Caiyan Jia", "Jian Yu" ], "externalIds": { "DBLP": "conf/www/Wang0WJWTJY20", "MAG": "3012562343", "DOI": "10.1145/3366423.3380186", "CorpusId": 215838304 }, "url": "https://www.semanticscholar.org/paper/4a4f84992b4ee8331f1e3189f6f9b0437214035c", "referenceCount": 55, "citationCount": 430, "influentialCitationCount": 32, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Differentiable Graph Module (DGM) for Graph Convolutional Networks", "abstract": "Graph deep learning has recently emerged as a powerful ML concept allowing to generalize successful deep neural architectures to non-euclidean structured data. Such methods have shown promising results on a broad spectrum of applications ranging from social science, biomedicine, and particle physics to computer vision, graphics, and chemistry. One of the limitations of the majority of current graph neural network architectures is that they are often restricted to the transductive setting and rely on the assumption that the underlying graph is known and fixed. Often, this assumption is not true since the graph may be noisy, or partially and even completely unknown. In such cases, it would be helpful to infer the graph directly from the data, especially in inductive settings where some nodes were not present in the graph at training time. Furthermore, learning a graph may become an end in itself, as the inferred structure may provide complementary insights next to the downstream task. In this paper, we introduce Differentiable Graph Module (DGM), a learnable function that predicts edge probabilities in the graph which are optimal for the downstream task. DGM can be combined with convolutional graph neural network layers and trained in an end-to-end fashion. We provide an extensive evaluation of applications from the domains of healthcare (disease prediction), brain imaging (age prediction), computer graphics (3D point cloud segmentation), and computer vision (zero-shot learning). We show that our model provides a significant improvement over baselines both in transductive and inductive settings and achieves state-of-the-art results.", "year": 2020, "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", "authors": [ "Anees Kazi", "Luca Cosmo", "Seyed-Ahmad Ahmadi", "N. Navab", "M. Bronstein" ], "externalIds": { "ArXiv": "2002.04999", "DBLP": "journals/pami/KaziCANB23", "DOI": "10.1109/TPAMI.2022.3170249", "CorpusId": 211082692, "PubMed": "35471872" }, "url": "https://www.semanticscholar.org/paper/15510709cece022d54cf0eaf6fdff3cbd8711fc0", "referenceCount": 88, "citationCount": 111, "influentialCitationCount": 16, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics", "Medicine" ] }, { "title": "Deep Iterative and Adaptive Learning for Graph Neural Networks", "abstract": "In this paper, we propose an end-to-end graph learning framework, namely Deep Iterative and Adaptive Learning for Graph Neural Networks (DIAL-GNN), for jointly learning the graph structure and graph embeddings simultaneously. We first cast the graph structure learning problem as a similarity metric learning problem and leverage an adapted graph regularization for controlling smoothness, connectivity and sparsity of the generated graph. We further propose a novel iterative method for searching for a hidden graph structure that augments the initial graph structure. Our iterative method dynamically stops when the learned graph structure approaches close enough to the optimal graph. Our extensive experiments demonstrate that the proposed DIAL-GNN model can consistently outperform or match state-of-the-art baselines in terms of both downstream task performance and computational time. The proposed approach can cope with both transductive learning and inductive learning.", "year": 2019, "venue": "arXiv.org", "authors": [ "Yu Chen", "Lingfei Wu", "Mohammed J. Zaki" ], "externalIds": { "MAG": "2994821362", "DBLP": "journals/corr/abs-1912-07832", "ArXiv": "1912.07832", "CorpusId": 209386433 }, "url": "https://www.semanticscholar.org/paper/d513b7fdb47290a63786aea5822b8171c8926560", "referenceCount": 35, "citationCount": 45, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Modality to Modality Translation: An Adversarial Representation Learning and Graph Fusion Network for Multimodal Fusion", "abstract": "Learning joint embedding space for various modalities is of vital importance for multimodal fusion. Mainstream modality fusion approaches fail to achieve this goal, leaving a modality gap which heavily affects cross-modal fusion. In this paper, we propose a novel adversarial encoder-decoder-classifier framework to learn a modality-invariant embedding space. Since the distributions of various modalities vary in nature, to reduce the modality gap, we translate the distributions of source modalities into that of target modality via their respective encoders using adversarial training. Furthermore, we exert additional constraints on embedding space by introducing reconstruction loss and classification loss. Then we fuse the encoded representations using hierarchical graph neural network which explicitly explores unimodal, bimodal and trimodal interactions in multi-stage. Our method achieves state-of-the-art performance on multiple datasets. Visualization of the learned embeddings suggests that the joint embedding space learned by our method is discriminative.", "year": 2019, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Sijie Mai", "Haifeng Hu", "Songlong Xing" ], "externalIds": { "MAG": "2997573100", "DBLP": "journals/corr/abs-1911-07848", "ArXiv": "1911.07848", "DOI": "10.1609/AAAI.V34I01.5347", "CorpusId": 208157975 }, "url": "https://www.semanticscholar.org/paper/109a1a60abaf86603c8a9fcbe767cef13e5aadd4", "referenceCount": 40, "citationCount": 150, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning Discrete Structures for Graph Neural Networks", "abstract": "Graph neural networks (GNNs) are a popular class of machine learning models whose major advantage is their ability to incorporate a sparse and discrete dependency structure between data points. Unfortunately, GNNs can only be used when such a graph-structure is available. In practice, however, real-world graphs are often noisy and incomplete or might not be available at all. With this work, we propose to jointly learn the graph structure and the parameters of graph convolutional networks (GCNs) by approximately solving a bilevel program that learns a discrete probability distribution on the edges of the graph. This allows one to apply GCNs not only in scenarios where the given graph is incomplete or corrupted but also in those where a graph is not available. We conduct a series of experiments that analyze the behavior of the proposed method and demonstrate that it outperforms related methods by a significant margin.", "year": 2019, "venue": "International Conference on Machine Learning", "authors": [ "Luca Franceschi", "Mathias Niepert", "M. Pontil", "X. He" ], "externalIds": { "ArXiv": "1903.11960", "MAG": "2951000635", "DBLP": "conf/icml/FranceschiNPH19", "CorpusId": 85543335 }, "url": "https://www.semanticscholar.org/paper/1e059de3858c90445f8dce8a48cdb16ce3fc2af0", "referenceCount": 63, "citationCount": 359, "influentialCitationCount": 49, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Stochastic Beams and Where to Find Them: The Gumbel-Top-k Trick for Sampling Sequences Without Replacement", "abstract": "The well-known Gumbel-Max trick for sampling from a categorical distribution can be extended to sample $k$ elements without replacement. We show how to implicitly apply this 'Gumbel-Top-$k$' trick on a factorized distribution over sequences, allowing to draw exact samples without replacement using a Stochastic Beam Search. Even for exponentially large domains, the number of model evaluations grows only linear in $k$ and the maximum sampled sequence length. The algorithm creates a theoretical connection between sampling and (deterministic) beam search and can be used as a principled intermediate alternative. In a translation task, the proposed method compares favourably against alternatives to obtain diverse yet good quality translations. We show that sequences sampled without replacement can be used to construct low-variance estimators for expected sentence-level BLEU score and model entropy.", "year": 2019, "venue": "International Conference on Machine Learning", "authors": [ "W. Kool", "H. V. Hoof", "M. Welling" ], "externalIds": { "MAG": "2963247666", "DBLP": "conf/icml/KoolHW19", "ArXiv": "1903.06059", "CorpusId": 76662039 }, "url": "https://www.semanticscholar.org/paper/b462bd9e3c4c991722b77fe696fda0a0372bf6dd", "referenceCount": 48, "citationCount": 183, "influentialCitationCount": 17, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Pitfalls of Graph Neural Network Evaluation", "abstract": "Semi-supervised node classification in graphs is a fundamental problem in graph mining, and the recently proposed graph neural networks (GNNs) have achieved unparalleled results on this task. Due to their massive success, GNNs have attracted a lot of attention, and many novel architectures have been put forward. In this paper we show that existing evaluation strategies for GNN models have serious shortcomings. We show that using the same train/validation/test splits of the same datasets, as well as making significant changes to the training procedure (e.g. early stopping criteria) precludes a fair comparison of different architectures. We perform a thorough empirical evaluation of four prominent GNN models and show that considering different splits of the data leads to dramatically different rankings of models. Even more importantly, our findings suggest that simpler GNN architectures are able to outperform the more sophisticated ones if the hyperparameters and the training procedure are tuned fairly for all models.", "year": 2018, "venue": "arXiv.org", "authors": [ "Oleksandr Shchur", "Maximilian Mumme", "Aleksandar Bojchevski", "Stephan Günnemann" ], "externalIds": { "ArXiv": "1811.05868", "MAG": "2900470550", "DBLP": "journals/corr/abs-1811-05868", "CorpusId": 53303554 }, "url": "https://www.semanticscholar.org/paper/c2d40522eaa5523d67a0de5e4098e7031fdccb3d", "referenceCount": 22, "citationCount": 1111, "influentialCitationCount": 171, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Dynamic Graph CNN for Learning on Point Clouds", "abstract": "Point clouds provide a flexible geometric representation suitable for countless applications in computer graphics; they also comprise the raw output of most 3D data acquisition devices. While hand-designed features on point clouds have long been proposed in graphics and vision, however, the recent overwhelming success of convolutional neural networks (CNNs) for image analysis suggests the value of adapting insight from CNN to the point cloud world. Point clouds inherently lack topological information, so designing a model to recover topology can enrich the representation power of point clouds. To this end, we propose a new neural network module dubbed EdgeConv suitable for CNN-based high-level tasks on point clouds, including classification and segmentation. EdgeConv acts on graphs dynamically computed in each layer of the network. It is differentiable and can be plugged into existing architectures. Compared to existing modules operating in extrinsic space or treating each point independently, EdgeConv has several appealing properties: It incorporates local neighborhood information; it can be stacked applied to learn global shape properties; and in multi-layer systems affinity in feature space captures semantic characteristics over potentially long distances in the original embedding. We show the performance of our model on standard benchmarks, including ModelNet40, ShapeNetPart, and S3DIS.", "year": 2018, "venue": "ACM Transactions on Graphics", "authors": [ "Yue Wang", "Yongbin Sun", "Ziwei Liu", "S. Sarma", "M. Bronstein", "J. Solomon" ], "externalIds": { "MAG": "2785053089", "DBLP": "journals/tog/WangSLSBS19", "ArXiv": "1801.07829", "DOI": "10.1145/3326362", "CorpusId": 94822 }, "url": "https://www.semanticscholar.org/paper/e1799aaf23c12af6932dc0ef3dfb1638f01413d1", "referenceCount": 95, "citationCount": 5174, "influentialCitationCount": 1041, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Adaptive Graph Convolutional Neural Networks", "abstract": "\n \n Graph Convolutional Neural Networks (Graph CNNs) are generalizations of classical CNNs to handle graph data such as molecular data, point could and social networks. Current filters in graph CNNs are built for fixed and shared graph structure. However, for most real data, the graph structures varies in both size and connectivity. The paper proposes a generalized and flexible graph CNN taking data of arbitrary graph structure as input. In that way a task-driven adaptive graph is learned for each graph data while training. To efficiently learn the graph, a distance metric learning is proposed. Extensive experiments on nine graph-structured datasets have demonstrated the superior performance improvement on both convergence speed and predictive accuracy.\n \n", "year": 2018, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Ruoyu Li", "Sheng Wang", "Feiyun Zhu", "Junzhou Huang" ], "externalIds": { "MAG": "2953264017", "DBLP": "journals/corr/abs-1801-03226", "ArXiv": "1801.03226", "DOI": "10.1609/aaai.v32i1.11691", "CorpusId": 1415308 }, "url": "https://www.semanticscholar.org/paper/51a2bc2e8fb8ed47a085df33dd965e57335080a0", "referenceCount": 31, "citationCount": 662, "influentialCitationCount": 28, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Graph Attention Networks", "abstract": "We present graph attention networks (GATs), novel neural network architectures that operate on graph-structured data, leveraging masked self-attentional layers to address the shortcomings of prior methods based on graph convolutions or their approximations. By stacking layers in which nodes are able to attend over their neighborhoods' features, we enable (implicitly) specifying different weights to different nodes in a neighborhood, without requiring any kind of costly matrix operation (such as inversion) or depending on knowing the graph structure upfront. In this way, we address several key challenges of spectral-based graph neural networks simultaneously, and make our model readily applicable to inductive as well as transductive problems. Our GAT models have achieved or matched state-of-the-art results across four established transductive and inductive graph benchmarks: the Cora, Citeseer and Pubmed citation network datasets, as well as a protein-protein interaction dataset (wherein test graphs remain unseen during training).", "year": 2017, "venue": "International Conference on Learning Representations", "authors": [ "Petar Velickovic", "Guillem Cucurull", "Arantxa Casanova", "Adriana Romero", "P. Lio’", "Yoshua Bengio" ], "externalIds": { "MAG": "2766453196", "ArXiv": "1710.10903", "DBLP": "journals/corr/abs-1710-10903", "DOI": "10.17863/CAM.48429", "CorpusId": 3292002 }, "url": "https://www.semanticscholar.org/paper/33998aff64ce51df8dee45989cdca4b6b1329ec4", "referenceCount": 45, "citationCount": 16828, "influentialCitationCount": 3007, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Inductive Representation Learning on Large Graphs", "abstract": "Low-dimensional embeddings of nodes in large graphs have proved extremely useful in a variety of prediction tasks, from content recommendation to identifying protein functions. However, most existing approaches require that all nodes in the graph are present during training of the embeddings; these previous approaches are inherently transductive and do not naturally generalize to unseen nodes. Here we present GraphSAGE, a general, inductive framework that leverages node feature information (e.g., text attributes) to efficiently generate node embeddings for previously unseen data. Instead of training individual embeddings for each node, we learn a function that generates embeddings by sampling and aggregating features from a node's local neighborhood. Our algorithm outperforms strong baselines on three inductive node-classification benchmarks: we classify the category of unseen nodes in evolving information graphs based on citation and Reddit post data, and we show that our algorithm generalizes to completely unseen graphs using a multi-graph dataset of protein-protein interactions.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "William L. Hamilton", "Z. Ying", "J. Leskovec" ], "externalIds": { "DBLP": "conf/nips/HamiltonYL17", "MAG": "2952779545", "ArXiv": "1706.02216", "CorpusId": 4755450 }, "url": "https://www.semanticscholar.org/paper/6b7d6e6416343b2a122f8416e69059ce919026ef", "referenceCount": 42, "citationCount": 12811, "influentialCitationCount": 2278, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Semi-Supervised Classification with Graph Convolutional Networks", "abstract": "We present a scalable approach for semi-supervised learning on graph-structured data that is based on an efficient variant of convolutional neural networks which operate directly on graphs. We motivate the choice of our convolutional architecture via a localized first-order approximation of spectral graph convolutions. Our model scales linearly in the number of graph edges and learns hidden layer representations that encode both local graph structure and features of nodes. In a number of experiments on citation networks and on a knowledge graph dataset we demonstrate that our approach outperforms related methods by a significant margin.", "year": 2016, "venue": "International Conference on Learning Representations", "authors": [ "Thomas Kipf", "M. Welling" ], "externalIds": { "ArXiv": "1609.02907", "MAG": "2519887557", "DBLP": "journals/corr/KipfW16", "CorpusId": 3144218 }, "url": "https://www.semanticscholar.org/paper/36eff562f65125511b5dfab68ce7f7a943c27478", "referenceCount": 38, "citationCount": 25291, "influentialCitationCount": 6216, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Convolutional Neural Networks on Graphs with Fast Localized Spectral Filtering", "abstract": "In this work, we are interested in generalizing convolutional neural networks (CNNs) from low-dimensional regular grids, where image, video and speech are represented, to high-dimensional irregular domains, such as social networks, brain connectomes or words' embedding, represented by graphs. We present a formulation of CNNs in the context of spectral graph theory, which provides the necessary mathematical background and efficient numerical schemes to design fast localized convolutional filters on graphs. Importantly, the proposed technique offers the same linear computational complexity and constant learning complexity as classical CNNs, while being universal to any graph structure. Experiments on MNIST and 20NEWS demonstrate the ability of this novel deep learning system to learn local, stationary, and compositional features on graphs.", "year": 2016, "venue": "Neural Information Processing Systems", "authors": [ "M. Defferrard", "X. Bresson", "P. Vandergheynst" ], "externalIds": { "MAG": "2468907370", "ArXiv": "1606.09375", "DBLP": "conf/nips/DefferrardBV16", "CorpusId": 3016223 }, "url": "https://www.semanticscholar.org/paper/c41eb895616e453dcba1a70c9b942c5063cc656c", "referenceCount": 47, "citationCount": 7038, "influentialCitationCount": 762, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Deep Residual Learning for Image Recognition", "abstract": "Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. We provide comprehensive empirical evidence showing that these residual networks are easier to optimize, and can gain accuracy from considerably increased depth. On the ImageNet dataset we evaluate residual nets with a depth of up to 152 layers - 8× deeper than VGG nets [40] but still having lower complexity. An ensemble of these residual nets achieves 3.57% error on the ImageNet test set. This result won the 1st place on the ILSVRC 2015 classification task. We also present analysis on CIFAR-10 with 100 and 1000 layers. The depth of representations is of central importance for many visual recognition tasks. Solely due to our extremely deep representations, we obtain a 28% relative improvement on the COCO object detection dataset. Deep residual nets are foundations of our submissions to ILSVRC & COCO 2015 competitions1, where we also won the 1st places on the tasks of ImageNet detection, ImageNet localization, COCO detection, and COCO segmentation.", "year": 2015, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Kaiming He", "X. Zhang", "Shaoqing Ren", "Jian Sun" ], "externalIds": { "DBLP": "conf/cvpr/HeZRS16", "MAG": "2949650786", "ArXiv": "1512.03385", "DOI": "10.1109/cvpr.2016.90", "CorpusId": 206594692 }, "url": "https://www.semanticscholar.org/paper/2c03df8b48bf3fa39054345bafabfeff15bfd11d", "referenceCount": 53, "citationCount": 172713, "influentialCitationCount": 28229, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Image-Based Recommendations on Styles and Substitutes", "abstract": "Humans inevitably develop a sense of the relationships between objects, some of which are based on their appearance. Some pairs of objects might be seen as being alternatives to each other (such as two pairs of jeans), while others may be seen as being complementary (such as a pair of jeans and a matching shirt). This information guides many of the choices that people make, from buying clothes to their interactions with each other. We seek here to model this human sense of the relationships between objects based on their appearance. Our approach is not based on fine-grained modeling of user annotations but rather on capturing the largest dataset possible and developing a scalable method for uncovering human notions of the visual relationships within. We cast this as a network inference problem defined on graphs of related images, and provide a large-scale dataset for the training and evaluation of the same. The system we develop is capable of recommending which clothes and accessories will go well together (and which will not), amongst a host of other applications.", "year": 2015, "venue": "Annual International ACM SIGIR Conference on Research and Development in Information Retrieval", "authors": [ "Julian McAuley", "C. Targett", "Javen Qinfeng Shi", "A. Hengel" ], "externalIds": { "MAG": "2949765268", "DBLP": "journals/corr/McAuleyTSH15", "ArXiv": "1506.04757", "DOI": "10.1145/2766462.2767755", "CorpusId": 1012652 }, "url": "https://www.semanticscholar.org/paper/fab4d19ed77dad7c437d885eceb8aa65fae5a783", "referenceCount": 44, "citationCount": 2142, "influentialCitationCount": 190, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Large Graph Construction for Scalable Semi-Supervised Learning", "abstract": "In this paper, we address the scalability issue plaguing graph-based semi-supervised learning via a small number of anchor points which adequately cover the entire point cloud. Critically, these anchor points enable nonparametric regression that predicts the label for each data point as a locally weighted average of the labels on anchor points. Because conventional graph construction is inefficient in large scale, we propose to construct a tractable large graph by coupling anchor-based label prediction and adjacency matrix design. Contrary to the Nystrom approximation of adjacency matrices which results in indefinite graph Laplacians and in turn leads to potential non-convex optimization over graphs, the proposed graph construction approach based on a unique idea called AnchorGraph provides nonnegative adjacency matrices to guarantee positive semidefinite graph Laplacians. Our approach scales linearly with the data size and in practice usually produces a large sparse graph. Experiments on large datasets demonstrate the significant accuracy improvement and scalability of the proposed approach.", "year": 2010, "venue": "International Conference on Machine Learning", "authors": [ "W. Liu", "Junfeng He", "Shih-Fu Chang" ], "externalIds": { "DBLP": "conf/icml/LiuHC10", "MAG": "1500351990", "CorpusId": 14830880 }, "url": "https://www.semanticscholar.org/paper/537ff50f531a40067a859391524523a0319514f0", "referenceCount": 19, "citationCount": 574, "influentialCitationCount": 88, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Collective Classification in Network Data", "abstract": "Many real-world applications produce networked data such as the world-wide web (hypertext documents connected via hyperlinks), social networks (for example, people connected by friendship links), communication networks (computers connected via communication links) and biological networks (for example, protein interaction networks). A recent focus in machine learning research has been to extend traditional machine learning classification techniques to classify nodes in such networks. In this article, we provide a brief introduction to this area of research and how it has progressed during the past decade. We introduce four of the most widely used inference algorithms for classifying networked data and empirically compare them on both synthetic and real-world data.", "year": 2008, "venue": "The AI Magazine", "authors": [ "Prithviraj Sen", "Galileo Namata", "M. Bilgic", "L. Getoor", "Brian Gallagher", "Tina Eliassi-Rad" ], "externalIds": { "MAG": "2403788960", "DBLP": "journals/aim/SenNBGGE08", "DOI": "10.1201/b17320-16", "CorpusId": 62016134 }, "url": "https://www.semanticscholar.org/paper/43d2ed5c3c55c1100450cd74dc1031afa24d37b2", "referenceCount": 94, "citationCount": 3575, "influentialCitationCount": 646, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Boolean Matrix Multiplication and Transitive Closure", "abstract": "Arithmetic operations on matrices are applied to the problem of finding the transitive closure of a Boolean matrix. The best transitive closure algorithm known, due to Munro, is based on the matrix multiplication method of Strassen. We show that his method requires at most O(nα ? P(n)) bitwise operations, where α = log27 and P(n) bounds the number of bitwise operations needed for arithmetic modulo n+1. The problems of computing the transitive closure and of computing the \"and-or\" product of Boolean matrices are shown to be of the same order of difficulty. A transitive closure method based on matrix inverse is presented which can be used to derive Munro's method.", "year": 1971, "venue": "Scandinavian Workshop on Algorithm Theory", "authors": [ "M. Fischer", "A. Meyer" ], "externalIds": { "DBLP": "conf/focs/FischerM71", "MAG": "2545919726", "DOI": "10.1109/SWAT.1971.4", "CorpusId": 20639631 }, "url": "https://www.semanticscholar.org/paper/9b63266ac359fb6a0fbb00e26d7fd8e0ced7ce62", "referenceCount": 3, "citationCount": 211, "influentialCitationCount": 8, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Futures Quantitative Investment with Heterogeneous Continual Graph Neural Network", "abstract": "It is a challenging problem to predict trends of futures prices with traditional econometric models as one needs to consider not only futures’ historical data but also correlations among different futures. Spatial-temporal graph neural networks (STGNNs) have great advantages in dealing with such kind of spatial-temporal data. However, we cannot directly apply STGNNs to high-frequency future data because future investors have to consider both the long-term and short-term characteristics when doing decision-making. To capture both the long-term and short-term features, we exploit more label information by designing four heterogeneous tasks: price regression, price moving average regression, price gap regression (within a short interval), and change-point detection, which involve both long-term and short-term scenes. To make full use of these labels, we train our model in a continual manner. Traditional continual GNNs define the gradient of prices as the parameter important to overcome catastrophic forgetting (CF). Unfortunately, the losses of the four heterogeneous tasks lie in different spaces. Hence it is improper to calculate the parameter importance with their losses. We propose to calculate parameter importance with mutual information between original observations and the extracted features. The empirical results based on 49 commodity futures demonstrate that our model has higher prediction performance on capturing long-term or short-term dynamic change.", "year": 2023, "venue": "arXiv.org", "authors": [ "Z. Tan", "Min Hu", "Yixuan Wang", "Lu Wei", "B. Liu" ], "externalIds": { "DBLP": "journals/corr/abs-2303-16532", "DOI": "10.48550/arXiv.2303.16532", "CorpusId": 257804719 }, "url": "https://www.semanticscholar.org/paper/381fbeba99d1f9bb7148429aaad74a97e10f5cd6", "referenceCount": 62, "citationCount": 4, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] } ] }, "Graph Neural Network Framework for Sentiment Analysis Using Syntactic Feature": { "paper_title": "Graph Neural Network Framework for Sentiment Analysis Using Syntactic Feature", "arxiv_id": "2409.14000v1", "keyword": "graph neural network", "authors": [ "Linxiao Wu", "Yuanshuai Luo", "Binrong Zhu", "Guiran Liu", "Rui Wang", "Qian Yu" ], "references": [ { "title": "Enhancing Convolutional Neural Networks with Higher-Order Numerical Difference Methods", "abstract": "With the rise of deep learning technology in practical applications, Convolutional Neural Networks (CNNs) have been able to assist humans in solving many real-world problems. To enhance the performance of CNNs, numerous network architectures have been explored. Some of these architectures are designed based on the accumulated experience of researchers over time, while others are designed through neural architecture search methods. The improvements made to CNNs by the aforementioned methods are quite significant, but most of the improvement methods are limited in reality by model size and environmental constraints, making it difficult to fully realize the improved performance. In recent years, research has found that many CNN structures can be explained by the discretization of ordinary differential equations. This implies that we can design theoretically supported deep network structures using higher-order numerical difference methods. It should be noted that most of the previous CNN model structures are based on low-order numerical methods. Therefore, considering that the accuracy of linear multi-step numerical difference methods is higher than that of the forward Euler method, this paper proposes a stacking scheme based on the linear multi-step method. This scheme enhances the performance of ResNet without increasing the model size and compares it with the Runge-Kutta scheme. The experimental results show that the performance of the stacking scheme proposed in this paper is superior to existing stacking schemes (ResNet and HO-ResNet), and it has the capability to be extended to other types of neural networks.", "year": 2024, "venue": "", "authors": [ "Qi Wang", "Zijun Gao", "Mingxiu Sui", "Taiyuan Mei", "X. Cheng", "Iris Li" ], "externalIds": { "ArXiv": "2409.04977", "CorpusId": 272524287 }, "url": "https://www.semanticscholar.org/paper/0e75beeff2bbb85bf0f8087cc892d28907227fde", "referenceCount": 9, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Enhancing Deep Learning with Optimized Gradient Descent: Bridging Numerical Methods and Neural Network Training", "abstract": "Optimization theory serves as a pivotal scientific instrument for achieving optimal system performance, with its origins in economic applications to identify the best investment strategies for maximizing benefits. Over the centuries, from the geometric inquiries of ancient Greece to the calculus contributions by Newton and Leibniz, optimization theory has significantly advanced. The persistent work of scientists like Lagrange, Cauchy, and von Neumann has fortified its progress. The modern era has seen an unprecedented expansion of optimization theory applications, particularly with the growth of computer science, enabling more sophisticated computational practices and widespread utilization across engineering, decision analysis, and operations research. This paper delves into the profound relationship between optimization theory and deep learning, highlighting the omnipresence of optimization problems in the latter. We explore the gradient descent algorithm and its variants, which are the cornerstone of optimizing neural networks. The chapter introduces an enhancement to the SGD optimizer, drawing inspiration from numerical optimization methods, aiming to enhance interpretability and accuracy. Our experiments on diverse deep learning tasks substantiate the improved algorithm's efficacy. The paper concludes by emphasizing the continuous development of optimization theory and its expanding role in solving intricate problems, enhancing computational capabilities, and informing better policy decisions.", "year": 2024, "venue": "", "authors": [ "Yuhan Ma", "Dan Sun", "Erdi Gao", "Ningjing Sang", "Iris Li", "Guanming Huang" ], "externalIds": { "ArXiv": "2409.04707", "CorpusId": 272524395 }, "url": "https://www.semanticscholar.org/paper/2baafc233983826dc4c0d0bcda98e82fb5cae736", "referenceCount": 33, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Text classification optimization algorithm based on graph neural network", "abstract": "In the field of natural language processing, text classification, as a basic task, has important research value and application prospects. Traditional text classification methods usually rely on feature representations such as the bag of words model or TF-IDF, which overlook the semantic connections between words and make it challenging to grasp the deep structural details of the text. Recently, GNNs have proven to be a valuable asset for text classification tasks, thanks to their capability to handle non-Euclidean data efficiently. However, the existing text classification methods based on GNN still face challenges such as complex graph structure construction and high cost of model training. This paper introduces a text classification optimization algorithm utilizing graph neural networks. By introducing adaptive graph construction strategy and efficient graph convolution operation, the accuracy and efficiency of text classification are effectively improved. The experimental results demonstrate that the proposed method surpasses traditional approaches and existing GNN models across multiple public datasets, highlighting its superior performance and feasibility for text classification tasks.", "year": 2024, "venue": "", "authors": [ "Erdi Gao", "Haowei Yang", "Dan Sun", "Haohao Xia", "Yuhan Ma", "Yuanjing Zhu" ], "externalIds": { "ArXiv": "2408.15257", "CorpusId": 271974292 }, "url": "https://www.semanticscholar.org/paper/8a658ce2d28ece403df829de6b01cd67a4ab1e4f", "referenceCount": 25, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Dynamic Hypergraph-Enhanced Prediction of Sequential Medical Visits", "abstract": "This study introduces a pioneering Dynamic Hypergraph Networks (DHCE) model designed to predict future medical diagnoses from electronic health records with enhanced accuracy. The DHCE model innovates by identifying and differentiating acute and chronic diseases within a patient's visit history, constructing dynamic hypergraphs that capture the complex, high-order interactions between diseases. It surpasses traditional recurrent neural networks and graph neural networks by effectively integrating clinical event data, reflected through medical language model-assisted encoding, into a robust patient representation. Through extensive experiments on two benchmark datasets, MIMIC-III and MIMIC-IV, the DHCE model exhibits superior performance, significantly outpacing established baseline models in the precision of sequential diagnosis prediction.", "year": 2024, "venue": "arXiv.org", "authors": [ "Wangying Yang", "Zitao Zheng", "Shi Bo", "Zhizhong Wu", "Bo Zhang", "Yuanfang Yang" ], "externalIds": { "DBLP": "journals/corr/abs-2408-07084", "ArXiv": "2408.07084", "DOI": "10.48550/arXiv.2408.07084", "CorpusId": 271865540 }, "url": "https://www.semanticscholar.org/paper/30ed8ec5644f42373e660281d2d4d837b4e487fe", "referenceCount": 29, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Adaptive Friction in Deep Learning: Enhancing Optimizers with Sigmoid and Tanh Function", "abstract": "Adaptive optimizers are pivotal in guiding the weight updates of deep neural networks, yet they often face challenges such as poor generalization and oscillation issues. To counter these, we introduce sigSignGrad and tanhSignGrad, two novel optimizers that integrate adaptive friction coefficients based on the Sigmoid and Tanh functions, respectively. These algorithms leverage short-term gradient information, a feature overlooked in traditional Adam variants like diffGrad and AngularGrad, to enhance parameter updates and convergence.Our theoretical analysis demonstrates the wide-ranging adjustment capability of the friction coefficient S, which aligns with targeted parameter update strategies and outperforms existing methods in both optimization trajectory smoothness and convergence rate. Extensive experiments on CIFAR-10, CIFAR-100, and Mini-ImageNet datasets using ResNet50 and ViT architectures confirm the superior performance of our proposed optimizers, showcasing improved accuracy and reduced training time. The innovative approach of integrating adaptive friction coefficients as plug-ins into existing optimizers, exemplified by the sigSignAdamW and sigSignAdamP variants, presents a promising strategy for boosting the optimization performance of established algorithms. The findings of this study contribute to the advancement of optimizer design in deep learning.", "year": 2024, "venue": "arXiv.org", "authors": [ "Hongye Zheng", "Bingxing Wang", "Minheng Xiao", "Honglin Qin", "Zhizhong Wu", "Lianghao Tan" ], "externalIds": { "ArXiv": "2408.11839", "DBLP": "journals/corr/abs-2408-11839", "DOI": "10.48550/arXiv.2408.11839", "CorpusId": 271924433 }, "url": "https://www.semanticscholar.org/paper/fcf1c03f40dcbdb1cc707f75b87c674051416cfb", "referenceCount": 24, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A comparative study of generative adversarial networks for image recognition algorithms based on deep learning and traditional methods", "abstract": "In this paper, an image recognition algorithm based on the combination of deep learning and generative adversarial network (GAN) is studied, and compared with traditional image recognition methods. The purpose of this study is to evaluate the advantages and application prospects of deep learning technology, especially GAN, in the field of image recognition. Firstly, this paper reviews the basic principles and techniques of traditional image recognition methods, including the classical algorithms based on feature extraction such as SIFT, HOG and their combination with support vector machine (SVM), random forest, and other classifiers. Then, the working principle, network structure, and unique advantages of GAN in image generation and recognition are introduced. In order to verify the effectiveness of GAN in image recognition, a series of experiments are designed and carried out using multiple public image data sets for training and testing. The experimental results show that compared with traditional methods, GAN has excellent performance in processing complex images, recognition accuracy, and anti-noise ability. Specifically, Gans are better able to capture high-dimensional features and details of images, significantly improving recognition performance. In addition, Gans shows unique advantages in dealing with image noise, partial missing information, and generating high-quality images.", "year": 2024, "venue": "arXiv.org", "authors": [ "Yihao Zhong", "Yijing Wei", "Yingbin Liang", "Xiqing Liu", "Rongwei Ji", "Yiru Cang" ], "externalIds": { "DBLP": "journals/corr/abs-2408-03568", "ArXiv": "2408.03568", "DOI": "10.48550/arXiv.2408.03568", "CorpusId": 271745107 }, "url": "https://www.semanticscholar.org/paper/02c6cc07592b5c4e30a36e6dbffec3c7d53f5ec4", "referenceCount": 27, "citationCount": 7, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Algorithm Research of ELMo Word Embedding and Deep Learning Multimodal Transformer in Image Description", "abstract": "Zero sample learning is an effective method for data deficiency. The existing embedded zero sample learning methods only use the known classes to construct the embedded space, so there is an overfitting of the known classes in the testing process. This project uses category semantic similarity measures to classify multiple tags. This enables it to incorporate unknown classes that have the same meaning as currently known classes into the vector space when it is built. At the same time, most of the existing zero sample learning algorithms directly use the depth features of medical images as input, and the feature extraction process does not consider semantic information. This project intends to take ELMo-MCT as the main task and obtain multiple visual features related to the original image through self-attention mechanism. In this paper, a large number of experiments are carried out on three zero-shot learning reference datasets, and the best harmonic average accuracy is obtained compared with the most advanced algorithms.", "year": 2024, "venue": "arXiv.org", "authors": [ "X. Cheng", "Taiyuan Mei", "Yun Zi", "Qi Wang", "Zijun Gao", "Haowei Yang" ], "externalIds": { "DBLP": "journals/corr/abs-2408-06357", "ArXiv": "2408.06357", "DOI": "10.48550/arXiv.2408.06357", "CorpusId": 271860237 }, "url": "https://www.semanticscholar.org/paper/f58bb9371bcbabab5ebe1d4f063f8afba7c3978d", "referenceCount": 27, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Applying Conditional Generative Adversarial Networks for Imaging Diagnosis", "abstract": "This study introduces an innovative application of Conditional Generative Adversarial Networks (C-GAN) integrated with Stacked Hourglass Networks (SHGN) aimed at enhancing image segmentation, particularly in the challenging environment of medical imaging. We address the problem of overfitting, common in deep learning models applied to complex imaging datasets, by augmenting data through rotation and scaling. A hybrid loss function combining L1 and L2 reconstruction losses, enriched with adversarial training, is introduced to refine segmentation processes in intravascular ultrasound (IVUS) imaging. Our approach is unique in its capacity to accurately delineate distinct regions within medical images, such as tissue boundaries and vascular structures, without extensive reliance on domain-specific knowledge. The algorithm was evaluated using a standard medical image library, showing superior performance metrics compared to existing methods, thereby demonstrating its potential in enhancing automated medical diagnostics through deep learning", "year": 2024, "venue": "arXiv.org", "authors": [ "Haowei Yang", "Yuxiang Hu", "Shuyao He", "Ting Xu", "Jiajie Yuan", "Xingxin Gu" ], "externalIds": { "ArXiv": "2408.02074", "DBLP": "journals/corr/abs-2408-02074", "DOI": "10.48550/arXiv.2408.02074", "CorpusId": 271709689 }, "url": "https://www.semanticscholar.org/paper/690ba01bec64a3ef0b5817a1d365aecdb25d02be", "referenceCount": 21, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Engineering", "Computer Science" ] }, { "title": "Exploring Anomaly Detection and Risk Assessment in Financial Markets Using Deep Neural Networks", "abstract": "In this paper, deep learning technology, along with a Gated Recurrent Unit (GRU) combined with an attention mechanism, is used to enhance the recognition ability and risk assessment accuracy of abnormal trading behavior in financial markets. The GRU effectively solves the problem of gradient vanishing in traditional recurrent neural networks through its unique gated structure, allowing the model to learn more stable and effective feature representations in long sequence data. On this basis, the contextual attention (CA) module in the attention mechanism is introduced, enabling the model to automatically learn and assign different weights to various parts of the input sequence. Combined with bidirectional GRU and the attention mechanism, the model can not only capture temporal dependencies in the sequence, but also highlight the key features that affect market anomalies, thus improving the model's ability to understand complex market dynamics.", "year": 2024, "venue": "International Journal of Innovative Research in Computer Science & Technology", "authors": [ "Bingxing Wang", "Yuxin Dong", "Jianhua Yao", "Honglin Qin", "Jiajing Wang" ], "externalIds": { "DOI": "10.55524/ijircst.2024.12.4.15", "CorpusId": 271782868 }, "url": "https://www.semanticscholar.org/paper/4859451508e7072a33c62ccfd4b21143752292d4", "referenceCount": 0, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Research on Adverse Drug Reaction Prediction Model Combining Knowledge Graph Embedding and Deep Learning", "abstract": "In clinical treatment, identifying potential adverse reactions of drugs can help assist doctors in making medication decisions. In response to the problems in previous studies that features are high-dimensional and sparse, independent prediction models need to be constructed for each adverse reaction of drugs, and the prediction accuracy is low, this paper develops an adverse drug reaction prediction model based on knowledge graph embedding and deep learning, which can predict experimental results. Unified prediction of adverse drug reactions covered. Knowledge graph embedding technology can fuse the associated information between drugs and alleviate the shortcomings of high-dimensional sparsity in feature matrices, and the efficient training capabilities of deep learning can improve the prediction accuracy of the model. This article builds an adverse drug reaction knowledge graph based on drug feature data; by analyzing the embedding effect of the knowledge graph under different embedding strategies, the best embedding strategy is selected to obtain sample vectors; and then a convolutional neural network model is constructed to predict adverse reactions. The results show that under the DistMult embedding model and 400-dimensional embedding strategy, the convolutional neural network model has the best prediction effect; the average accuracy, F1 score, recall rate and area under the curve of repeated experiments are better than the methods reported in the literature. The obtained prediction model has good prediction accuracy and stability, and can provide an effective reference for later safe medication guidance.", "year": 2024, "venue": "2024 4th International Conference on Machine Learning and Intelligent Systems Engineering (MLISE)", "authors": [ "Yufeng Li", "Wenchao Zhao", "Bo Dang", "Xu Yan", "Weimin Wang", "Min Gao", "Mingxuan Xiao" ], "externalIds": { "ArXiv": "2407.16715", "DBLP": "journals/corr/abs-2407-16715", "DOI": "10.1109/MLISE62164.2024.10674360", "CorpusId": 271404190 }, "url": "https://www.semanticscholar.org/paper/4383242be5bdfb30ffa84e58cc252acfb58d4878", "referenceCount": 31, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Biology" ] }, { "title": "Survival Prediction Across Diverse Cancer Types Using Neural Networks", "abstract": "Gastric cancer and Colon adenocarcinoma represent widespread and challenging malignancies with high mortality rates and complex treatment landscapes. In response to the critical need for accurate prognosis in cancer patients, the medical community has embraced the 5-year survival rate as a vital metric for estimating patient outcomes. This study introduces a pioneering approach to enhance survival prediction models for gastric and Colon adenocarcinoma patients. Leveraging advanced image analysis techniques, we sliced whole slide images (WSI) of these cancers, extracting comprehensive features to capture nuanced tumor characteristics. Subsequently, we constructed patient-level graphs, encapsulating intricate spatial relationships within tumor tissues. These graphs served as inputs for a sophisticated 4-layer graph convolutional neural network (GCN), designed to exploit the inherent connectivity of the data for comprehensive analysis and prediction. By integrating patients’ total survival time and survival status, we computed C-index values for gastric cancer and Colon adenocarcinoma, yielding 0.57 and 0.64, respectively. Significantly surpassing previous convolutional neural network models, these results underscore the efficacy of our approach in accurately predicting patient survival outcomes. This research holds profound implications for both the medical and AI communities, offering insights into cancer biology and progression while advancing personalized treatment strategies. Ultimately, our study represents a significant stride in leveraging AI-driven methodologies to revolutionize cancer prognosis and improve patient outcomes on a global scale.", "year": 2024, "venue": "International Conference on Machine Vision and Applications", "authors": [ "Xu Yan", "Weimin Wang", "Mingxuan Xiao", "Yufeng Li", "Min Gao" ], "externalIds": { "DBLP": "conf/icmva/YanWXLG24", "ArXiv": "2404.08713", "DOI": "10.1145/3653946.3653966", "CorpusId": 269149062 }, "url": "https://www.semanticscholar.org/paper/ba258c711b31b547b66ebc9937fce2daf39e214e", "referenceCount": 30, "citationCount": 55, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering", "Biology" ] }, { "title": "Investigation of Creating Accessibility Linked Data Based on Publicly Available Accessibility Datasets", "abstract": "With the fast growth of web, the web is full of diverse data. Linked Data is data on the web that gives URIs to entities and links different data from different domains together, which results in the part of the web infrastructure. Since the dramatic development of Linked Data movement, a great number of accessibility datasets have been published as open data on the web by governments or organizations so that they can be accessed and reused by anyone without any restriction. However, the majority of the accessibility datasets available on the web are in diverse formats such as HTML table, Excel and CSV, rather than Linked Data format – RDF. This paper mainly investigates the method of creating accessibility Linked Data based on the publicly existing available accessibility datasets. First of all, requested accessibility data was extracted from the accessibility dataset. Moreover, Jena RDF API is applied to create RDF documents based on the extracted data from the accessibility dataset. Compared to other approaches such as transforming XML to RDF via schema, current method of this project does not have mapping work and it shows significant convenience. Finally, a triple store was set up to sever the RDF documents. In addition, the server has query interface support for test; as a result, users could query the data they need. In the future development, relevant dataset links would be added to current RDF documents. At last the accessibility RDF dump would be published on the web as a part of the web infrastructure. Index Terms—Linked Data, Open Data, Semantic Web, Accessibility Dataset.", "year": 2023, "venue": "International Conference on Communication and Network Security", "authors": [ "Yufeng Li", "Xu Yan", "Mingxuan Xiao", "Weimin Wang", "Fei Zhang" ], "externalIds": { "DBLP": "conf/iccns/LiYXWZ23", "DOI": "10.1145/3638782.3638794", "CorpusId": 269250942 }, "url": "https://www.semanticscholar.org/paper/90d4e738945215e478260b257e603c6a8dd341ae", "referenceCount": 25, "citationCount": 41, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "NDC-Scene: Boost Monocular 3D Semantic Scene Completion in Normalized Device Coordinates Space", "abstract": "Monocular 3D Semantic Scene Completion (SSC) has garnered significant attention in recent years due to its potential to predict complex semantics and geometry shapes from a single image, requiring no 3D inputs. In this paper, we identify several critical issues in current state-of-the-art methods, including the Feature Ambiguity of projected 2D features in the ray to the 3D space, the Pose Ambiguity of the 3D convolution, and the Computation Imbalance in the 3D convolution across different depth levels. To address these problems, we devise a novel Normalized Device Coordinates scene completion network (NDC-Scene) that directly extends the 2D feature map to a Normalized Device Coordinates (NDC) space, rather than to the world space directly, through progressive restoration of the dimension of depth with deconvolution operations. Experiment results demonstrate that transferring the majority of computation from the target 3D space to the proposed normalized device coordinates space benefits monocular SSC tasks. Additionally, we design a Depth-Adaptive Dual Decoder to simultaneously upsample and fuse the 2D and 3D feature maps, further improving overall performance. Our extensive experiments confirm that the proposed method consistently outperforms state-of-the-art methods on both outdoor SemanticKITTI and indoor NYUv2 datasets. Our code are available at https://github.com/Jiawei-Yao0812/NDCScene.", "year": 2023, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Jiawei Yao", "Chuming Li", "Keqiang Sun", "Yingjie Cai", "Hao Li", "Ouyang Wanli", "Hongsheng Li" ], "externalIds": { "DBLP": "journals/corr/abs-2309-14616", "ArXiv": "2309.14616", "DOI": "10.1109/ICCV51070.2023.00867", "CorpusId": 262825616 }, "url": "https://www.semanticscholar.org/paper/ba69198ad585898dd304ff0cd296ea4cea033e20", "referenceCount": 52, "citationCount": 57, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "SE4SA: a deep syntactical contextualized text representation learning approach for sentiment analysis", "abstract": "Recently, many pre-trained text embedding models have been applied to effectively extract latent features from texts and achieve remarkable performance in various downstream tasks of sentiment analysis domain. However, these pre-trained text embedding models also encounter limitations related to the capability preserving the syntactical structure as well as the global long-range dependent relationships of words. Thus, they might fail to recognize the relevant syntactical features of words as valuable evidences for analyzing sentiment aspects. To overcome these limitations, we proposed a novel deep semantic contextual embedding technique for sentiment analysis, called as: SE4SA. Our proposed SE4SA is a multi-level text embedding model which enables to jointly exploit the long-range syntactical and sequential representations of texts. Then, these achieved rich semantic textual representations can support to have a better understanding on the sentiment aspects of the given text corpus, thereby resulting the better performance on sentiment analysis task. Extensive experiments in several benchmark datasets demonstrate the effectiveness or our proposed SE4SA model in comparing with recent state-of-the-art model.", "year": 2021, "venue": "Journal of Intelligent & Fuzzy Systems", "authors": [ "Tham Vo" ], "externalIds": { "MAG": "3197612334", "DBLP": "journals/jifs/Vo21", "DOI": "10.3233/jifs-211535", "CorpusId": 239720225 }, "url": "https://www.semanticscholar.org/paper/06ec38df6f92b697e28bc97a9268f85a59ccb3a1", "referenceCount": 13, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Survey on Aspect-Level Sentiment Analysis", "abstract": "The field of sentiment analysis, in which sentiment is gathered, analyzed, and aggregated from text, has seen a lot of attention in the last few years. The corresponding growth of the field has resulted in the emergence of various subareas, each addressing a different level of analysis or research question. This survey focuses on aspect-level sentiment analysis, where the goal is to find and aggregate sentiment on entities mentioned within documents or aspects of them. An in-depth overview of the current state-of-the-art is given, showing the tremendous progress that has already been made in finding both the target, which can be an entity as such, or some aspect of it, and the corresponding sentiment. Aspect-level sentiment analysis yields very fine-grained sentiment information which can be useful for applications in various domains. Current solutions are categorized based on whether they provide a method for aspect detection, sentiment analysis, or both. Furthermore, a breakdown based on the type of algorithm used is provided. For each discussed study, the reported performance is included. To facilitate the quantitative evaluation of the various proposed methods, a call is made for the standardization of the evaluation methodology that includes the use of shared data sets. Semanticallyrich concept-centric aspect-level sentiment analysis is discussed and identified as one of the most promising future research direction.", "year": 2016, "venue": "IEEE Transactions on Knowledge and Data Engineering", "authors": [ "K. Schouten", "Flavius Frasincar" ], "externalIds": { "DBLP": "journals/tkde/SchoutenF16", "MAG": "2253519362", "DOI": "10.1109/TKDE.2015.2485209", "CorpusId": 5417681 }, "url": "https://www.semanticscholar.org/paper/f90531de9e3f62c7c493a48cf33fba5b8fa661c4", "referenceCount": 98, "citationCount": 633, "influentialCitationCount": 21, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Application of Semi-Supervised Learning in Image Classification: Research on Fusion of Labeled and Unlabeled Data", "abstract": "Deep learning has attracted wide attention recently because of its excellent feature representation ability and end-to-end automatic learning method. Especially in clinical medical imaging diagnosis, the semi-supervised deep learning model is favored and widely used because it can make maximum use of a limited number of labeled data and combine it with a large number of unlabeled data to extract more information and knowledge from it. However, the scarcity of medical image data, the vast image size, and the instability of image quality directly affect the model’s robustness, generalization, and image classification performance. Therefore, this paper proposes a new semi-supervised learning model, which uses quadratic neurons instead of traditional neurons, aiming to use quadratic convolution instead of the conventional convolution layer to improve the feature extraction ability of the model. In addition, we introduce two Dropout layers and two fully connected layers at the end of the model to enhance the nonlinear fitting ability of the network. Experiments on two large medical public data sets - ISIC 2019 and Retinopathy OCT - show that our method can improve the model’s generalization performance and image classification accuracy.", "year": 2024, "venue": "IEEE Access", "authors": [ "Sai Li", "Peng Kou", "Miao Ma", "Haoyu Yang", "Shuo Huang", "Zhengyi Yang" ], "externalIds": { "DBLP": "journals/access/LiKMYHY24", "DOI": "10.1109/ACCESS.2024.3367772", "CorpusId": 267965670 }, "url": "https://www.semanticscholar.org/paper/b350e764266473c8d9c54ac822f8dd2cf71c0186", "referenceCount": 51, "citationCount": 17, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Unveiling Patterns: A Study on Semi-Supervised Classification of Strip Surface Defects", "abstract": "As a critical intermediate material in the iron and steel industry, strip steel will inevitably have various surface defects during its processing, which directly affects the service performance and life of the material. Therefore, the classification technology of strip surface defects has always been the focus of research. Currently, combining computer vision with deep learning is often used to classify the surface defects of strip steel, which usually runs in full supervision mode. However, the performance of the complete supervision method depends mainly on the quality and quantity of labeled samples. At the same time, in industrial scenes, there are few labeled samples available, and most even have no labels, which seriously restricts the performance of the traditional full supervision model. This paper introduces the idea of semi-supervised learning, and a new semi-supervised classification model of strip surface defects is proposed to alleviate the degradation of model classification performance caused by insufficient labeled samples. Specifically, a new image synthesis model (ISM) is proposed in this paper. By improving the loss function of the discriminator, the generated false samples are more realistic. In addition, this paper also presents a double uncertainty weighting technique (DUW), which weighs the loss of misclassified samples in a more detailed way, thus realizing fine adjustment of the model. This method can fully mine the potential feature information in unlabeled samples and further improve the performance and generalization ability of the model. In this paper, we use the NEU-CLS dataset to test our model. When only 10% and 90% of labeled and unlabeled samples are used for training, the classification accuracy reaches 91.14%, fully proving this method’s practicability and superiority.", "year": 2023, "venue": "IEEE Access", "authors": [ "Yongfei Liu", "Haoyu Yang", "Chenwei Wu" ], "externalIds": { "DBLP": "journals/access/LiuYW23", "DOI": "10.1109/ACCESS.2023.3326843", "CorpusId": 264465656 }, "url": "https://www.semanticscholar.org/paper/24c840b4b4065b876eb614859affe49d58699370", "referenceCount": 49, "citationCount": 25, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Adaptive Spatio-Temporal Aggregation for Temporal Dynamic Graph-Based Fraud Risk Detection", "abstract": null, "year": null, "venue": "Journal of Computer Technology and Software", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Enhanced Unsupervised Image Registration via Dense U-Net and Channel Attention", "abstract": null, "year": null, "venue": "Journal of Computer Science and Software Applications", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Feature Extraction and Model Optimization of Deep Learning in Stock Market Prediction", "abstract": null, "year": null, "venue": "Journal of Computer Technology and Software", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Hybrid LSTM-GARCH Framework for Financial Market Volatility Risk Prediction", "abstract": null, "year": null, "venue": "Journal of Computer Science and Software Applications", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Advancing Emotional Analysis with Large Language Models", "abstract": null, "year": null, "venue": "Journal of Computer Science and Software Applications", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Named Entity Recognition: A Comparative Study of Advanced Pre-trained Model", "abstract": null, "year": null, "venue": "Journal of Computer Technology and Software", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Early diagnosis of Alzheimer's disease from MRI images with deep learning model": { "paper_title": "Early diagnosis of Alzheimer's disease from MRI images with deep learning model", "arxiv_id": "2409.18814v1", "keyword": "deep learning", "authors": [ "Sajjad Aghasi Javid", "Mahmood Mohassel Feghhi" ], "references": [ { "title": "COVID-19 Diagnosis: ULGFBP-ResNet51 approach on the CT and the Chest X-ray Images Classification", "abstract": "The contagious and pandemic COVID-19 disease is currently considered as the main health concern and posed widespread panic across human-beings. It affects the human respiratory tract and lungs intensely. So that it has imposed significant threats for premature death. Although, its early diagnosis can play a vital role in revival phase, the radiography tests with the manual intervention are a time-consuming process. Time is also limited for such manual inspecting of numerous patients in the hospitals. Thus, the necessity of automatic diagnosis on the chest X-ray or the CT images with a high efficient performance is urgent. Toward this end, we propose a novel method, named as the ULGFBP-ResNet51 to tackle with the COVID-19 diagnosis in the images. In fact, this method includes Uniform Local Binary Pattern (ULBP), Gabor Filter (GF), and ResNet51. According to our results, this method could offer superior performance in comparison with the other methods, and attain maximum accuracy.", "year": 2023, "venue": "Scientia Iranica. International Journal of Science and Technology", "authors": [ "V. Esmaeili", "Mahmood Mohassel Feghhi", "S. Shahdi" ], "externalIds": { "DBLP": "journals/corr/abs-2312-12876", "ArXiv": "2312.12876", "DOI": "10.48550/arXiv.2312.12876", "CorpusId": 266375222 }, "url": "https://www.semanticscholar.org/paper/4067377317eb462b3760c470fd790e06751f5617", "referenceCount": 82, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Engineering", "Computer Science" ] }, { "title": "A deep learning based convolutional neural network model with VGG16 feature extractor for the detection of Alzheimer Disease using MRI scans", "abstract": null, "year": 2022, "venue": "Measurement: Sensors", "authors": [ "Shagun Sharma", "Kalpna Guleria", "S. Tiwari", "Sushil Kumar" ], "externalIds": { "DOI": "10.1016/j.measen.2022.100506", "CorpusId": 252739902 }, "url": "https://www.semanticscholar.org/paper/75192e7a3a15485902cc6800f7b2cfd8830a8e27", "referenceCount": 23, "citationCount": 70, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "Recognizing irrelevant faces in short-form videos based on feature fusion and active learning", "abstract": null, "year": 2022, "venue": "Neurocomputing", "authors": [ "Mingcheng Zhu", "Rongchuan Zhang", "Haizhou Wang" ], "externalIds": { "DBLP": "journals/ijon/ZhuZW22", "DOI": "10.1016/j.neucom.2022.06.064", "CorpusId": 249938308 }, "url": "https://www.semanticscholar.org/paper/5f1e8298ee3772d78f54f6f49d6765132af9d594", "referenceCount": 0, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Early COVID-19 Diagnosis from Lung Ultrasound Images Combining RIULBP-TP and 3D-DenseNet", "abstract": "The pandemic of COVID-19 has affected the world with the high deaths rate. Early diagnosis of this disease is the bottleneck to the patient's health recovery. Its symptoms appear through the wide range of experiments especially accompany with the severe lung lesions. These lesions could be spotted on the lung ultrasound data. Being non-intrusive, low cost, portable, and accurate enough are among the main pros of ultrasound imaging. However, this imaging modality most often contain variety of noises. In order to overcome this challenge, we propose a novel approach combining Rotation Invariant Uniform LBP on 3 Planes (RIULBP-TP) and 3D-DenseNet. These methods are proved to be robust against various noises. Accordingly, our method reaches outstanding results comparing to related most state-of-the-art methods.", "year": 2022, "venue": "Iranian Joint Congress on Fuzzy and Intelligent Systems", "authors": [ "V. Esmaeili", "Mahmood Mohassel Feghhi", "Seyed Omid Shahdi" ], "externalIds": { "DOI": "10.1109/CFIS54774.2022.9756430", "CorpusId": 248269514 }, "url": "https://www.semanticscholar.org/paper/ae4022f74be75ce24ccc5ad432eecc9d9c48f7b2", "referenceCount": 0, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Hybrid Inception v3 XGBoost Model for Acute Lymphoblastic Leukemia Classification", "abstract": "Acute lymphoblastic leukemia (ALL) is the most common type of pediatric malignancy which accounts for 25% of all pediatric cancers. It is a life-threatening disease which if left untreated can cause death within a few weeks. Many computerized methods have been proposed for the detection of ALL from microscopic cell images. In this paper, we propose a hybrid Inception v3 XGBoost model for the classification of acute lymphoblastic leukemia (ALL) from microscopic white blood cell images. In the proposed model, Inception v3 acts as the image feature extractor and the XGBoost model acts as the classification head. Experiments indicate that the proposed model performs better than the other methods identified in literature. The proposed hybrid model achieves a weighted F1 score of 0.986. Through experiments, we demonstrate that using an XGBoost classification head instead of a softmax classification head improves classification performance for this dataset for several different CNN backbones (feature extractors). We also visualize the attention map of the features extracted by Inception v3 to interpret the features learnt by the proposed model.", "year": 2021, "venue": "Computational and Mathematical Methods in Medicine", "authors": [ "S. Ramaneswaran", "Kathiravan Srinivasan", "P. M. Vincent", "Chuan-Yu Chang" ], "externalIds": { "MAG": "3183312143", "DOI": "10.1155/2021/2577375", "CorpusId": 237691607 }, "url": "https://www.semanticscholar.org/paper/f5a00a52547e3db5faf0f151d3a392def3c1b31a", "referenceCount": 31, "citationCount": 62, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Automated Classification of Alzheimer’s Disease Based on MRI Image Processing using Convolutional Neural Network (CNN) with AlexNet Architecture", "abstract": "Alzheimer’s disease is a type of brain disease that indicate with memory impairment as the early symptoms. These symptoms occur because the nerve in the brain involved in learning, thinking and memory as cognitive function have been damaged. Alzheimer is one of diseases as the leading cause of death and cannot be cured, but the proper medical treatment can delay the severity of the disease. This study proposes the Convolutional Neural Network (CNN) using AlexNet architecture as a method to develop automated classification system of Alzheimer’s disease. The experiment is conducted using Magnetic Resonance Imaging (MRI) datasets to classify Non-Demented, Very Mild Demented, Mild Demented, and Moderate Demented from 664 MRI datasets. From the experiment, this study achieved 95% of accuracy. The automated Alzheimer’s disease classification can be helpful as assisting tool for medical personnel to diagnose the stage of Alzheimer’s disease so that the appropriate medical treatment can be provided.", "year": 2021, "venue": "Journal of Physics: Conference Series", "authors": [ "Y. Fuadah", "I. Wijayanto", "Nor Kumalasari Caecar Pratiwi", "F. F. Taliningsih", "Syamsul Rizal", "M. A. Pramudito" ], "externalIds": { "MAG": "3136687799", "DOI": "10.1088/1742-6596/1844/1/012020", "CorpusId": 233823505 }, "url": "https://www.semanticscholar.org/paper/bb1a82463394a150da7ff3afc53818a6eadd131e", "referenceCount": 11, "citationCount": 41, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Physics" ] }, { "title": "Multimodal deep learning models for early detection of Alzheimer’s disease stage", "abstract": null, "year": 2021, "venue": "Scientific Reports", "authors": [ "Janani Venugopalan", "L. Tong", "H. Hassanzadeh", "May D. Wang" ], "externalIds": { "PubMedCentral": "7864942", "DOI": "10.1038/s41598-020-74399-w", "CorpusId": 231856277, "PubMed": "33547343" }, "url": "https://www.semanticscholar.org/paper/665a2d3289252d8f86c2ff8f98410dce2d787bed", "referenceCount": 55, "citationCount": 337, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "A deep feature-based real-time system for Alzheimer disease stage detection", "abstract": null, "year": 2020, "venue": "Multimedia tools and applications", "authors": [ "Hina Nawaz", "Muazzam Maqsood", "Sitara Afzal", "Farhan Aadil", "I. Mehmood", "Seungmin Rho" ], "externalIds": { "DBLP": "journals/mta/NawazMAAMR21", "MAG": "3035162004", "DOI": "10.1007/s11042-020-09087-y", "CorpusId": 219543678 }, "url": "https://www.semanticscholar.org/paper/63b153ce02ad10dc076c1c9b03aa2da09eb46047", "referenceCount": 44, "citationCount": 74, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Hierarchical Fully Convolutional Network for Joint Atrophy Localization and Alzheimer's Disease Diagnosis Using Structural MRI", "abstract": "Structural magnetic resonance imaging (sMRI) has been widely used for computer-aided diagnosis of neurodegenerative disorders, e.g., Alzheimer's disease (AD), due to its sensitivity to morphological changes caused by brain atrophy. Recently, a few deep learning methods (e.g., convolutional neural networks, CNNs) have been proposed to learn task-oriented features from sMRI for AD diagnosis, and achieved superior performance than the conventional learning-based methods using hand-crafted features. However, these existing CNN-based methods still require the pre-determination of informative locations in sMRI. That is, the stage of discriminative atrophy localization is isolated to the latter stages of feature extraction and classifier construction. In this paper, we propose a hierarchical fully convolutional network (H-FCN) to automatically identify discriminative local patches and regions in the whole brain sMRI, upon which multi-scale feature representations are then jointly learned and fused to construct hierarchical classification models for AD diagnosis. Our proposed H-FCN method was evaluated on a large cohort of subjects from two independent datasets (i.e., ADNI-1 and ADNI-2), demonstrating good performance on joint discriminative atrophy localization and brain disease diagnosis.", "year": 2020, "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", "authors": [ "C. Lian", "Mingxia Liu", "Jun Zhang", "D. Shen" ], "externalIds": { "DBLP": "journals/pami/LianLZS20", "MAG": "2906155095", "DOI": "10.1109/TPAMI.2018.2889096", "CorpusId": 58558019, "PubMed": "30582529" }, "url": "https://www.semanticscholar.org/paper/261507db4679ebe05d2c5e797cacc0ba8de2f9e0", "referenceCount": 74, "citationCount": 347, "influentialCitationCount": 29, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Early diagnosis of Alzheimer’s disease using combined features from voxel-based morphometry and cortical, subcortical, and hippocampus regions of MRI T1 brain images", "abstract": "In recent years, several high-dimensional, accurate, and effective classification methods have been proposed for the automatic discrimination of the subject between Alzheimer’s disease (AD) or its prodromal phase {i.e., mild cognitive impairment (MCI)} and healthy control (HC) persons based on T1-weighted structural magnetic resonance imaging (sMRI). These methods emphasis only on using the individual feature from sMRI images for the classification of AD, MCI, and HC subjects and their achieved classification accuracy is low. However, latest multimodal studies have shown that combining multiple features from different sMRI analysis techniques can improve the classification accuracy for these types of subjects. In this paper, we propose a novel classification technique that precisely distinguishes individuals with AD, aAD (stable MCI, who had not converted to AD within a 36-month time period), and mAD (MCI caused by AD, who had converted to AD within a 36-month time period) from HC individuals. The proposed method combines three different features extracted from structural MR (sMR) images using voxel-based morphometry (VBM), hippocampal volume (HV), and cortical and subcortical segmented region techniques. Three classification experiments were performed (AD vs. HC, aAD vs. mAD, and HC vs. mAD) with 326 subjects (171 elderly controls and 81 AD, 35 aAD, and 39 mAD patients). For the development and validation of the proposed classification method, we acquired the sMR images from the dataset of the National Research Center for Dementia (NRCD). A five-fold cross-validation technique was applied to find the optimal hyperparameters for the classifier, and the classification performance was compared by using three well-known classifiers: K-nearest neighbor, support vector machine, and random forest. Overall, the proposed model with the SVM classifier achieved the best performance on the NRCD dataset. For the individual feature, the VBM technique provided the best results followed by the HV technique. However, the use of combined features improved the classification accuracy and predictive power for the early classification of AD compared to the use of individual features. The most stable and reliable classification results were achieved when combining all extracted features. Additionally, to analyze the efficiency of the proposed model, we used the Alzheimer’s Disease Neuroimaging Initiative (ADNI) dataset to compare the classification performance of the proposed model with those of several state-of-the-art methods.", "year": 2019, "venue": "PLoS ONE", "authors": [ "Yubraj Gupta", "K. Lee", "K. Y. Choi", "J. Lee", "Byeong-Chae Kim", "G. Kwon" ], "externalIds": { "MAG": "2978201752", "PubMedCentral": "6777799", "DOI": "10.1371/journal.pone.0222446", "CorpusId": 203720802, "PubMed": "31584953" }, "url": "https://www.semanticscholar.org/paper/d11b74abda0ec0b80978085dd784f550b0f7a204", "referenceCount": 84, "citationCount": 64, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Using Deep Siamese Neural Networks for Detection of Brain Asymmetries Associated with Alzheimer's Disease and Mild Cognitive Impairment.", "abstract": null, "year": 2019, "venue": "Magnetic Resonance Imaging", "authors": [ "Chin-Fu Liu", "Shreyas Padhy", "Sandhya Ramachandran", "Victor Wang", "Andrew Efimov", "Alonso Bernal", "Linyuan Shi", "M. Vaillant", "J. Ratnanather", "A. Faria", "B. Caffo", "M. Albert", "M. Miller" ], "externalIds": { "MAG": "2957629889", "DOI": "10.1016/j.mri.2019.07.003", "CorpusId": 197665701, "PubMed": "31319126" }, "url": "https://www.semanticscholar.org/paper/edd75198bdade57a98e5cc6adb61ed5e97b180cc", "referenceCount": 61, "citationCount": 54, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Ensembles of Patch-Based Classifiers for Diagnosis of Alzheimer Diseases", "abstract": "There is ongoing research for the automatic diagnosis of Alzheimer’s disease (AD) based on traditional machine learning techniques, and deep learning-based approaches are becoming a popular choice for AD diagnosis. The state-of-the-art techniques that consider multimodal diagnosis have been shown to have accuracy better than a manual diagnosis. However, collecting data from different modalities is time-consuming and expensive, and some modalities may have radioactive side effects. Our study is confined to structural magnetic resonance imaging (sMRI). The objectives of our attempt are as follows: 1) to increase the accuracy level that is comparable to the state-of-the-art methods; 2) to overcome the overfitting problem, and; 3) to analyze proven landmarks of the brain that provide discernible features for AD diagnosis. Here, we focused specifically on both the left and right hippocampus areas. To achieve the objectives, at first, we incorporate ensembles of simple convolutional neural networks (CNNs) as feature extractors and softmax cross-entropy as the classifier. Then, considering the scarcity of data, we deployed a patch-based approach. We have performed our experiment on the Gwangju Alzheimer’s and Related Dementia (GARD) cohort dataset prepared by the National Research Center for Dementia (GARD), Gwangju, South Korea. We manually localized the left and right hippocampus and fed three view patches (TVPs) to the CNN after the preprocessing steps. We achieve 90.05% accuracy. We have compared our model with the state-of-the-art methods on the same dataset they have used and found our result comparable.", "year": 2019, "venue": "IEEE Access", "authors": [ "Samsuddin Ahmed", "K. Y. Choi", "J. Lee", "Byeong C. Kim", "G. Kwon", "K. Lee", "H. Jung" ], "externalIds": { "MAG": "2947823562", "DBLP": "journals/access/AhmedCLKKLJ19", "DOI": "10.1109/ACCESS.2019.2920011", "CorpusId": 192600380 }, "url": "https://www.semanticscholar.org/paper/9a62d8d1587b92fee2058426100f6d352ec22d6b", "referenceCount": 36, "citationCount": 70, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Convolutional neural network based Alzheimer’s disease classification from magnetic resonance brain images", "abstract": null, "year": 2019, "venue": "Cognitive Systems Research", "authors": [ "Rachna Jain", "N. Jain", "Akshay Aggarwal", "D. Hemanth" ], "externalIds": { "MAG": "2907148404", "DBLP": "journals/cogsr/JainJAH19", "DOI": "10.1016/j.cogsys.2018.12.015", "CorpusId": 150142426 }, "url": "https://www.semanticscholar.org/paper/63e13545e992830c08737ad77831376af298dbe4", "referenceCount": 26, "citationCount": 276, "influentialCitationCount": 13, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Convolutional Neural Network approach for classification of dementia stages based on 2D-spectral representation of EEG recordings", "abstract": null, "year": 2019, "venue": "Neurocomputing", "authors": [ "C. Ieracitano", "N. Mammone", "A. Bramanti", "A. Hussain", "F. Morabito" ], "externalIds": { "MAG": "2894895904", "DBLP": "journals/ijon/IeracitanoMBHM19", "DOI": "10.1016/J.NEUCOM.2018.09.071", "CorpusId": 53242523 }, "url": "https://www.semanticscholar.org/paper/63f8cf549cbf8a93a6f4a2ffd83645b8da994ec1", "referenceCount": 36, "citationCount": 189, "influentialCitationCount": 12, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Early Diagnosis of Alzheimer's Disease from MRI Images Using PNN", "abstract": "Here we propose a new method for the diagnosis of Alzheimer's disease from MRI images. The most prevalent, reliable and result-oriented methods for diagnosis of Alzheimer's disease are: Firstly, measuring the rate of atrophy of the hippocampus and total brain volume; Secondly, extracting information from the gray matter, white matter and cerebro spinal fluid (CSF) of the brain. In this work, different statistical features such as contrast, homogeneity, correlation, energy and also shape features are extracted from the MRI images. For every image a number of features are extracted and these are fed to the classifier. The images are classified into any of the three classes: Alzheimer's Disease (AD), Mild Cognitive Impairment (MCI) or Normal Control (NC). The proposed approach compares the performances of Probabilistic Neural Network(PNN), Support Vector Machines(SVM) and K-Nearest Neighbour(KNN) in terms of their accuracy, specificity and sensitivity. The MRI images are obtained from the ADNI database. ADNI is a clinically validated database of MRI, PET and FMRI images of subjects.", "year": 2018, "venue": "2018 International CET Conference on Control, Communication, and Computing (IC4)", "authors": [ "N. Mathew", "R. Vivek", "P. R. Anurenjan" ], "externalIds": { "MAG": "2900458402", "DOI": "10.1109/CETIC4.2018.8530910", "CorpusId": 53282150 }, "url": "https://www.semanticscholar.org/paper/4b183366863a5dbc89edd1c25dca40f6b3f450af", "referenceCount": 7, "citationCount": 16, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Multimodal and Multiscale Deep Neural Networks for the Early Diagnosis of Alzheimer’s Disease using structural MR and FDG-PET images", "abstract": null, "year": 2017, "venue": "Scientific Reports", "authors": [ "Donghuan Lu", "K. Popuri", "G. Ding", "R. Balachandar", "M. Beg" ], "externalIds": { "ArXiv": "1710.04782", "MAG": "2950246085", "PubMedCentral": "5890270", "DBLP": "journals/corr/abs-1710-04782", "DOI": "10.1038/s41598-018-22871-z", "CorpusId": 4697592, "PubMed": "29632364" }, "url": "https://www.semanticscholar.org/paper/4dc5cbc458298ade91b8f87f2d2cc50d4cd764ec", "referenceCount": 59, "citationCount": 291, "influentialCitationCount": 13, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Rethinking the Inception Architecture for Computer Vision", "abstract": "Convolutional networks are at the core of most state of-the-art computer vision solutions for a wide variety of tasks. Since 2014 very deep convolutional networks started to become mainstream, yielding substantial gains in various benchmarks. Although increased model size and computational cost tend to translate to immediate quality gains for most tasks (as long as enough labeled data is provided for training), computational efficiency and low parameter count are still enabling factors for various use cases such as mobile vision and big-data scenarios. Here we are exploring ways to scale up networks in ways that aim at utilizing the added computation as efficiently as possible by suitably factorized convolutions and aggressive regularization. We benchmark our methods on the ILSVRC 2012 classification challenge validation set demonstrate substantial gains over the state of the art: 21:2% top-1 and 5:6% top-5 error for single frame evaluation using a network with a computational cost of 5 billion multiply-adds per inference and with using less than 25 million parameters. With an ensemble of 4 models and multi-crop evaluation, we report 3:5% top-5 error and 17:3% top-1 error on the validation set and 3:6% top-5 error on the official test set.", "year": 2015, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Christian Szegedy", "Vincent Vanhoucke", "Sergey Ioffe", "Jonathon Shlens", "Z. Wojna" ], "externalIds": { "MAG": "2183341477", "DBLP": "conf/cvpr/SzegedyVISW16", "ArXiv": "1512.00567", "DOI": "10.1109/CVPR.2016.308", "CorpusId": 206593880 }, "url": "https://www.semanticscholar.org/paper/23ffaa0fe06eae05817f527a47ac3291077f9e58", "referenceCount": 24, "citationCount": 25057, "influentialCitationCount": 3043, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Multimodal Neuroimaging Feature Learning for Multiclass Diagnosis of Alzheimer's Disease", "abstract": "The accurate diagnosis of Alzheimer's disease (AD) is essential for patient care and will be increasingly important as disease modifying agents become available, early in the course of the disease. Although studies have applied machine learning methods for the computer-aided diagnosis of AD, a bottleneck in the diagnostic performance was shown in previous methods, due to the lacking of efficient strategies for representing neuroimaging biomarkers. In this study, we designed a novel diagnostic framework with deep learning architecture to aid the diagnosis of AD. This framework uses a zero-masking strategy for data fusion to extract complementary information from multiple data modalities. Compared to the previous state-of-the-art workflows, our method is capable of fusing multimodal neuroimaging features in one setting and has the potential to require less labeled data. A performance gain was achieved in both binary classification and multiclass classification of AD. The advantages and limitations of the proposed framework are discussed.", "year": 2015, "venue": "IEEE Transactions on Biomedical Engineering", "authors": [ "Siqi Liu", "Sidong Liu", "Weidong (Tom) Cai", "Hangyu Che", "Sonia Pujol", "R. Kikinis", "D. Feng", "M. Fulham" ], "externalIds": { "DBLP": "journals/tbe/LiuLCCPKFFA15", "MAG": "2130371234", "DOI": "10.1109/TBME.2014.2372011", "CorpusId": 7183410, "PubMed": "25423647" }, "url": "https://www.semanticscholar.org/paper/101aad03db53cac05e89e5d474fe68948afae09d", "referenceCount": 66, "citationCount": 437, "influentialCitationCount": 29, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Psychology", "Medicine" ] }, { "title": "Magnetic resonance imaging methodology", "abstract": null, "year": 2009, "venue": "European Journal of Nuclear Medicine and Molecular Imaging", "authors": [ "E. Moser", "A. Stadlbauer", "C. Windischberger", "H. Quick", "M. Ladd" ], "externalIds": { "MAG": "2107496735", "DOI": "10.1007/s00259-008-0938-3", "CorpusId": 2803993, "PubMed": "19104805" }, "url": "https://www.semanticscholar.org/paper/98fa1ada5b13ea0557e3083c2a20c5712384a72b", "referenceCount": 32, "citationCount": 52, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "AlzheimerNet: An Effective Deep Learning Based Proposition for Alzheimer’s Disease Stages Classification From Functional Brain Changes in Magnetic Resonance Images", "abstract": "Alzheimer’s disease is largely the underlying cause of dementia due to its progressive neurodegenerative nature among the elderly. The disease can be divided into five stages: Subjective Memory Concern (SMC), Mild Cognitive Impairment (MCI), Early MCI (EMCI), Late MCI (LMCI), and Alzheimer’s Disease (AD). Alzheimer’s disease is conventionally diagnosed using an MRI scan of the brain. In this research, we propose a fine-tuned convolutional neural network (CNN) classifier called AlzheimerNet, which can identify all five stages of Alzheimer’s disease and the Normal Control (NC) class. The ADNI database’s MRI scan dataset is obtained for use in training and testing the proposed model. To prepare the raw data for analysis, we applied the CLAHE image enhancement method. Data augmentation was used to remedy the unbalanced nature of the dataset and the resultant dataset consisted of 60000 image data on the 6 classes. Initially, five existing models including VGG16, MobileNetV2, AlexNet, ResNet50 and InceptionV3 were trained and tested to achieve test accuracies of 78.84%, 86.85%, 78.87%, 80.98% and 96.31% respectively. Since InceptionV3 provides the highest accuracy, this model is later modified to design the AlzheimerNet using RMSprop optimizer and learning rate 0.00001 to achieve the highest test accuracy of 98.67%. The five pre-trained models and the proposed fine-tuned model were compared in terms of various performance matrices to demonstrate whether the AlzheimerNet model is in fact performing better in classifying and detecting the six classes. An ablation study shows the hyperparameters used in the experiment. The suggested model outperforms the traditional methods for classifying Alzheimer’s disease stages from brain MRI, as measured by a two-tailed Wilcoxon signed-rank test, with a significance of <0.05.", "year": 2023, "venue": "IEEE Access", "authors": [ "F M Javed Mehedi Shamrat", "Shamima Akter", "S. Azam", "Asif Karim", "Pronab Ghosh", "Zarrin Tasnim", "Khan Md Hasib", "F. De Boer", "Kawsar Ahmed" ], "externalIds": { "DBLP": "journals/access/ShamratAAKGTHBA23", "DOI": "10.1109/ACCESS.2023.3244952", "CorpusId": 256888721 }, "url": "https://www.semanticscholar.org/paper/81831456b12b2f7a8861199484720720e9282f8d", "referenceCount": 60, "citationCount": 50, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DeepCurvMRI: Deep Convolutional Curvelet Transform-Based MRI Approach for Early Detection of Alzheimer’s Disease", "abstract": "Alzheimer’s Disease (AD) is the most common form of dementia. It usually manifests through progressive loss of cognitive function and memory, subsequently impairing the person’s ability to live without assistance and causing a tremendous impact on the affected individuals and society. Currently, AD diagnosis relies on cognitive tests, blood tests, behavior assessments, brain imaging, and medical history analysis. However, these procedures are subjective and inconsistent, making an accurate prediction for the early stages of AD difficult. This paper introduces a curvelet transform (CT) based-convolutional neural network (CNN) (DeepCurvMRI) model for improving the accuracy of early-stage AD disease detection using from Magnetic resonance imaging (MRI) images. The MRI images were first pre-processed using CT, and then a CNN model was trained using the new image representation. In this study, we used Alzheimer’s MRI images dataset hosted on the Kaggle platform to train DeepCurvMRI for multi and binary classification tasks. DeepCurvMRI achieved an accuracy, sensitivity, specificity, and F1 score of $98.62\\% \\pm 0.10\\%$ , $99.05\\% \\pm 0.10\\%$ , $98.50\\% \\pm 0.03\\%$ , and 99.21± 0.08, respectively, using the leave-one-group-out (LOGO) cross-validation approach in multi-classification task. The highest accuracy obtained in binary classification is $98.71\\% \\pm 0.05\\%$ . In addition to LOGO, DeepCurvMRI was tested using randomly stratified 10-fold and 5-fold cross validation. These encouraging results are superior to the ones reported in related methods, showcasing the potentiality of DeepCurvMRI in capturing the key anatomical changes in MRI images that can be differentiated between various staged of Alzheimer’s disease classes.", "year": 2023, "venue": "IEEE Access", "authors": [ "Chahd Chabib", "L. Hadjileontiadis", "Aamna Al Shehhi" ], "externalIds": { "DBLP": "journals/access/ChabibHS23", "DOI": "10.1109/ACCESS.2023.3272482", "CorpusId": 258472753 }, "url": "https://www.semanticscholar.org/paper/62001a266d0b01dd070bad030a8b4ac56ddb5a94", "referenceCount": 69, "citationCount": 10, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "COVID-19 Diagnosis: ULBPFP-Net Approach on Lung Ultrasound Data.", "abstract": null, "year": 2023, "venue": "Iranian Journal of Electrical & Electronic Engineering", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Diagnosis of Covid-19 Disease by Combining Hand-crafted and Deep-learning Methods on Ultrasound Data.", "abstract": null, "year": 2022, "venue": "Journal of Machine Vision and Image Processing", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Volumetric Feature-Based Alzheimer’s Disease Diagnosis From sMRI Data Using a Convolutional Neural Network and a Deep Neural Network", "abstract": "Alzheimer’s disease (AD) is a progressive neurodegenerative disorder that is mostly prevalent in people older than 65 years. The hippocampus is a widely studied region of interest (ROI) for a number of reasons, such as memory function analysis, stress development observation and neurological disorder investigation. Moreover, hippocampal volume atrophy is known to be linked with Alzheimer’s disease. On the other hand, several biomarkers, such as amyloid beta ( $\\text{a}\\beta _{42}$ ) protein, tau, phosphorylated tau and hippocampal volume atrophy, are being used to diagnose AD. In this research work, we have proposed a method to diagnose AD based on slice-wise volumetric features extracted from the left and right hippocampi of structural magnetic resonance imaging (sMRI) data. The proposed method is an aggregation of a convolutional neural network (CNN) model with a deep neural network (DNN) model. The left and right hippocampi have been localized automatically using a two-stage ensemble Hough-CNN. The localized hippocampal positions are used to extract ( $80\\times 80\\times $ 80 voxels) 3-D patches. The 2-D slices are then separated from the 3-D patches along axial, sagittal, and coronal views. The pre-processed 2-D patches are used to extract volumetric features from each slice by using a discrete volume estimation convolutional neural network (DVE-CNN) model. The extracted volumetric features have been used to train and test the classification network. The proposed approach has achieved average weighted classification accuracies of 94.82% and 94.02% based on the extracted volumetric features attributed to the left and right hippocampi, respectively. In addition, it has achieved area under the curve (AUC) values of 92.54% and 90.62% for the left and right hippocampi, respectively. Our method has outperformed the other methods by a certain margin in the same dataset.", "year": 2021, "venue": "IEEE Access", "authors": [ "Abol Basher", "Byeong C. Kim", "K. Lee", "H. Jung" ], "externalIds": { "DBLP": "journals/access/BasherKLJ21", "DOI": "10.1109/ACCESS.2021.3059658", "CorpusId": 232062480 }, "url": "https://www.semanticscholar.org/paper/5f477c895769db7f1bd097661622ec9801b49536", "referenceCount": 66, "citationCount": 45, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DEMNET: A Deep Learning Model for Early Diagnosis of Alzheimer Diseases and Dementia From MR Images", "abstract": "Alzheimer’s Disease (AD) is the most common cause of dementia globally. It steadily worsens from mild to severe, impairing one’s ability to complete any work without assistance. It begins to outstrip due to the population ages and diagnosis timeline. For classifying cases, existing approaches incorporate medical history, neuropsychological testing, and Magnetic Resonance Imaging (MRI), but efficient procedures remain inconsistent due to lack of sensitivity and precision. The Convolutional Neural Network (CNN) is utilized to create a framework that can be used to detect specific Alzheimer’s disease characteristics from MRI images. By considering four stages of dementia and conducting a particular diagnosis, the proposed model generates high-resolution disease probability maps from the local brain structure to a multilayer perceptron and provides accurate, intuitive visualizations of individual Alzheimer’s disease risk. To avoid the problem of class imbalance, the samples should be evenly distributed among the classes. The obtained MRI image dataset from Kaggle has a major class imbalance problem. A DEMentia NETwork (DEMNET) is proposed to detect the dementia stages from MRI. The DEMNET achieves an accuracy of 95.23%, Area Under Curve (AUC) of 97% and Cohen’s Kappa value of 0.93 from the Kaggle dataset, which is superior to existing methods. We also used the Alzheimer’s Disease Neuroimaging Initiative (ADNI) dataset to predict AD classes in order to assess the efficacy of the proposed model.", "year": 2021, "venue": "IEEE Access", "authors": [ "S. Murugan", "C. Venkatesan", "M. Sumithra", "Xiao-zhi Gao", "B. Elakkiya", "M. Akila", "Dr. MANOHARAN SUBRAMANIAN" ], "externalIds": { "DBLP": "journals/access/MuruganVSGEAM21", "DOI": "10.1109/ACCESS.2021.3090474", "CorpusId": 235748626 }, "url": "https://www.semanticscholar.org/paper/ec60d2d33db3ee4bb8a20f7693d26e5ad09b58bf", "referenceCount": 0, "citationCount": 131, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "An Analytical Study of Covid-19 Pandemic: Fatality Rate and Influential Factors", "abstract": null, "year": 2021, "venue": "Design Engineering", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "A Data Augmentation-Based Framework to Handle Class Imbalance Problem for Alzheimer’s Stage Detection", "abstract": "Alzheimer’s Disease (AD) is the most common form of dementia. It gradually increases from mild stage to severe, affecting the ability to perform common daily tasks without assistance. It is a neurodegenerative illness, presently having no specified cure. Computer-Aided Diagnostic Systems have played an important role to help physicians to identify AD. However, the diagnosis of AD into its four stages; No Dementia, Very Mild Dementia, Mild Dementia, and Moderate Dementia remains an open research area. Deep learning assisted computer-aided solutions are proved to be more useful because of their high accuracy. However, the most common problem with deep learning architecture is that large training data is required. Furthermore, the samples should be evenly distributed among the classes to avoid the class imbalance problem. The publicly available dataset (OASIS) has serious class imbalance problem. In this research, we employed a transfer learning-based technique using data augmentation for 3D Magnetic Resonance Imaging (MRI) views from OASIS dataset. The accuracy of the proposed model utilizing a single view of the brain MRI is 98.41% while using 3D-views is 95.11%. The proposed system outperformed the existing techniques for Alzheimer disease stages.", "year": 2019, "venue": "IEEE Access", "authors": [ "Sitara Afzal", "Muazzam Maqsood", "Faria Nazir", "Umair Khan", "Farhan Aadil", "K. Awan", "I. Mehmood", "Oh-Young Song" ], "externalIds": { "MAG": "2965102627", "DBLP": "journals/access/AfzalMNKAAMS19", "DOI": "10.1109/ACCESS.2019.2932786", "CorpusId": 201137828 }, "url": "https://www.semanticscholar.org/paper/ffe0ed985400ff3825b5e2fe95bfa796f7c11c99", "referenceCount": 49, "citationCount": 75, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Multimodal Neuroimaging Feature Learning With Multimodal Stacked Deep Polynomial Networks for Diagnosis of Alzheimer's Disease", "abstract": "The accurate diagnosis of Alzheimer's disease (AD) and its early stage, i.e., mild cognitive impairment, is essential for timely treatment and possible delay of AD. Fusion of multimodal neuroimaging data, such as magnetic resonance imaging (MRI) and positron emission tomography (PET), has shown its effectiveness for AD diagnosis. The deep polynomial networks (DPN) is a recently proposed deep learning algorithm, which performs well on both large-scale and small-size datasets. In this study, a multimodal stacked DPN (MM-SDPN) algorithm, which MM-SDPN consists of two-stage SDPNs, is proposed to fuse and learn feature representation from multimodal neuroimaging data for AD diagnosis. Specifically speaking, two SDPNs are first used to learn high-level features of MRI and PET, respectively, which are then fed to another SDPN to fuse multimodal neuroimaging information. The proposed MM-SDPN algorithm is applied to the ADNI dataset to conduct both binary classification and multiclass classification tasks. Experimental results indicate that MM-SDPN is superior over the state-of-the-art multimodal feature-learning-based algorithms for AD diagnosis.", "year": 2018, "venue": "IEEE journal of biomedical and health informatics", "authors": [ "Jun Shi", "Xiao Zheng", "Yan Li", "Qi Zhang", "Shihui Ying" ], "externalIds": { "DBLP": "journals/titb/ShiZLZY18", "MAG": "2574038793", "DOI": "10.1109/JBHI.2017.2655720", "CorpusId": 13791763, "PubMed": "28113353" }, "url": "https://www.semanticscholar.org/paper/4f61779a850eff1fe544613a1769f02f0b145441", "referenceCount": 58, "citationCount": 318, "influentialCitationCount": 13, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "World Alzheimer report 2018 the state of the art of demental research: New frontiers", "abstract": null, "year": 2018, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "on Computer Vision and Pattern Recognition (CVPR), Las Vegas, NV, USA", "abstract": null, "year": 2015, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Disease classification.", "abstract": null, "year": 1968, "venue": "Canadian Medical Association journal", "authors": [ "F. Harris" ], "externalIds": { "CorpusId": 13063401, "PubMed": "20329191" }, "url": "https://www.semanticscholar.org/paper/ca50710cb54410e73265998ea14f2c2765b25c81", "referenceCount": 0, "citationCount": 20, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine" ] }, { "title": "applicable license agreement with IEEE. Restrictions apply", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Revolutionizing Payload Inspection: A Self-Supervised Journey to Precision with Few Shots": { "paper_title": "Revolutionizing Payload Inspection: A Self-Supervised Journey to Precision with Few Shots", "arxiv_id": "2409.18219v1", "keyword": "deep learning", "authors": [ "Kyle Stein", "Arash Mahyari", "Guillermo Francia III", "Eman El-Sheikh" ], "references": [ { "title": "A Transformer-Based Framework for Payload Malware Detection and Classification", "abstract": "As malicious cyber threats become more sophisticated in breaching computer networks, the need for effective intrusion detection systems (IDSs) becomes crucial. Techniques such as Deep Packet Inspection (DPI) have been introduced to allow IDSs analyze the content of network packets, providing more context for identifying potential threats. IDSs traditionally rely on using anomaly-based and signature-based detection techniques to detect unrecognized and suspicious activity. Deep learning techniques have shown great potential in DPI for IDSs due to their efficiency in learning intricate patterns from the packet content being transmitted through the network. In this paper, we propose an accurate DPI algorithm based on transformers adapted for the purpose of detecting malicious traffic with a classifier head. Transformers learn the complex content of sequence data and generalize them well to similar scenarios thanks to their self-attention mechanism. Our proposed method uses the raw payload bytes that represent the packet contents and is deployed as man-in-the-middle. The payload bytes are used to detect malicious packets and classify their types. Experimental results on the UNSW-NB15 and CIC-IOT23 datasets demonstrate that our transformer-based model is effective in distinguishing malicious from benign traffic in the test dataset, attaining an average accuracy of 79% using binary classification and 72% on the multi-classification experiment, both using solely payload bytes.", "year": 2024, "venue": "2024 IEEE World AI IoT Congress (AIIoT)", "authors": [ "Kyle Stein", "A. Mahyari", "Guillermo Francia", "Eman El-Sheikh" ], "externalIds": { "DBLP": "conf/aiiot/SteinMFE24", "ArXiv": "2403.18223", "DOI": "10.1109/AIIoT61789.2024.10579000", "CorpusId": 268724227 }, "url": "https://www.semanticscholar.org/paper/1d1174945da923954d01fa83632f4d49cdb2bfae", "referenceCount": 22, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "CICIoT2023: A Real-Time Dataset and Benchmark for Large-Scale Attacks in IoT Environment", "abstract": "Nowadays, the Internet of Things (IoT) concept plays a pivotal role in society and brings new capabilities to different industries. The number of IoT solutions in areas such as transportation and healthcare is increasing and new services are under development. In the last decade, society has experienced a drastic increase in IoT connections. In fact, IoT connections will increase in the next few years across different areas. Conversely, several challenges still need to be faced to enable efficient and secure operations (e.g., interoperability, security, and standards). Furthermore, although efforts have been made to produce datasets composed of attacks against IoT devices, several possible attacks are not considered. Most existing efforts do not consider an extensive network topology with real IoT devices. The main goal of this research is to propose a novel and extensive IoT attack dataset to foster the development of security analytics applications in real IoT operations. To accomplish this, 33 attacks are executed in an IoT topology composed of 105 devices. These attacks are classified into seven categories, namely DDoS, DoS, Recon, Web-based, brute force, spoofing, and Mirai. Finally, all attacks are executed by malicious IoT devices targeting other IoT devices. The dataset is available on the CIC Dataset website.", "year": 2023, "venue": "Italian National Conference on Sensors", "authors": [ "E. P. Neto", "Sajjad Dadkhah", "Raphael Ferreira", "Alireza Zohourian", "Rongxing Lu", "A. Ghorbani" ], "externalIds": { "DBLP": "journals/sensors/NetoDFZLG23", "PubMedCentral": "10346235", "DOI": "10.3390/s23135941", "CorpusId": 259694870, "PubMed": "37447792" }, "url": "https://www.semanticscholar.org/paper/4618d69b16ad46ecb59a1c693bb42fa7d3502a95", "referenceCount": 105, "citationCount": 119, "influentialCitationCount": 20, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "A Survey on Self-supervised Learning: Algorithms, Applications, and Future Trends.", "abstract": "Deep supervised learning algorithms typically require a large volume of labeled data to achieve satisfactory performance. However, the process of collecting and labeling such data can be expensive and time-consuming. Self-supervised learning (SSL), a subset of unsupervised learning, aims to learn discriminative features from unlabeled data without relying on human-annotated labels. SSL has garnered significant attention recently, leading to the development of numerous related algorithms. However, there is a dearth of comprehensive studies that elucidate the connections and evolution of different SSL variants. This paper presents a review of diverse SSL methods, encompassing algorithmic aspects, application domains, three key trends, and open research questions. Firstly, we provide a detailed introduction to the motivations behind most SSL algorithms and compare their commonalities and differences. Secondly, we explore representative applications of SSL in domains such as image processing, computer vision, and natural language processing. Lastly, we discuss the three primary trends observed in SSL research and highlight the open questions that remain. A curated collection of valuable resources can be accessed at https://github.com/guijiejie/SSL.", "year": 2023, "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", "authors": [ "Jie Gui", "Tuo Chen", "Jing Zhang", "Qiong Cao", "Zhe Sun", "Haoran Luo", "Dacheng Tao" ], "externalIds": { "ArXiv": "2301.05712", "DOI": "10.1109/tpami.2024.3415112", "CorpusId": 261046875, "PubMed": "38885108" }, "url": "https://www.semanticscholar.org/paper/0b2134e5ae6f62d66686d5ca9bbbaadc1ddce61e", "referenceCount": 304, "citationCount": 26, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Transport and Application Layer DDoS Attacks Detection to IoT Devices by Using Machine Learning and Deep Learning Models", "abstract": "From smart homes to industrial environments, the IoT is an ally to easing daily activities, where some of them are critical. More and more devices are connected to and through the Internet, which, given the large amount of different manufacturers, may lead to a lack of security standards. Denial of service attacks (DDoS, DoS) represent the most common and critical attack against and from these networks, and in the third quarter of 2021, there was an increase of 31% (compared to the same period of 2020) in the total number of advanced DDoS targeted attacks. This work uses the Bot-IoT dataset, addressing its class imbalance problem, to build a novel Intrusion Detection System based on Machine Learning and Deep Learning models. In order to evaluate how the records timestamps affect the predictions, we used three different feature sets for binary and multiclass classifications; this helped us avoid feature dependencies, as produced by the Argus flow data generator, whilst achieving an average accuracy >99%. Then, we conducted comprehensive experimentation, including time performance evaluation, matching and exceeding the results of the current state-of-the-art for identifying denial of service attacks, where the Decision Tree and Multi-layer Perceptron models were the best performing methods to identify DDoS and DoS attacks over IoT networks.", "year": 2022, "venue": "Italian National Conference on Sensors", "authors": [ "Josue Genaro Almaraz-Rivera", "J. A. P. Díaz", "J. A. Cantoral-Ceballos" ], "externalIds": { "PubMedCentral": "9103313", "DBLP": "journals/sensors/Almaraz-RiveraD22", "DOI": "10.3390/s22093367", "CorpusId": 248463449, "PubMed": "35591056" }, "url": "https://www.semanticscholar.org/paper/e84895c16ce97f5eec10f80938d0a0997098dbe2", "referenceCount": 34, "citationCount": 58, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Behaviour Analysis of Open-Source Firewalls Under Security Crisis", "abstract": "Nowadays, in this COVID era, work from home is quietly more preferred than work from the office. Due to this, the need for a firewall has been increased day by day. Every organization uses the firewall to secure their network and create VPN servers to allow their employees to work from home. Due to this, the security of the firewall plays a crucial role. In this paper, we have compared the two most popular open-source firewalls named pfSense and OPNSense. We have examined the security they provide by default without any other attachment. To do this, we performed four different attacks on the firewalls and compared the results. As a result, we have observed that both provide the same security still pfSense has a slight edge when an attacker tries to perform a Brute force attack over OPNSense.", "year": 2022, "venue": "2022 International Conference on Wireless Communications Signal Processing and Networking (WiSPNET)", "authors": [ "Harsh J. Kiratsata", "Deep P. Raval", "Payal K. Viras", "Punit Lalwani", "Himanshu Patel", "Panchal S. D." ], "externalIds": { "DOI": "10.1109/wispnet54241.2022.9767176", "CorpusId": 248687795 }, "url": "https://www.semanticscholar.org/paper/188fe6ad6200a70ff4b95f6ccd31a9474dcb045d", "referenceCount": 0, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Adversarial Masking for Self-Supervised Learning", "abstract": "We propose ADIOS, a masked image model (MIM) framework for self-supervised learning, which simultaneously learns a masking function and an image encoder using an adversarial objective. The image encoder is trained to minimise the distance between representations of the original and that of a masked image. The masking function, conversely, aims at maximising this distance. ADIOS consistently improves on state-of-the-art self-supervised learning (SSL) methods on a variety of tasks and datasets -- including classification on ImageNet100 and STL10, transfer learning on CIFAR10/100, Flowers102 and iNaturalist, as well as robustness evaluated on the backgrounds challenge (Xiao et al., 2021) -- while generating semantically meaningful masks. Unlike modern MIM models such as MAE, BEiT and iBOT, ADIOS does not rely on the image-patch tokenisation construction of Vision Transformers, and can be implemented with convolutional backbones. We further demonstrate that the masks learned by ADIOS are more effective in improving representation learning of SSL methods than masking schemes used in popular MIM models. Code is available at https://github.com/YugeTen/adios.", "year": 2022, "venue": "International Conference on Machine Learning", "authors": [ "Yuge Shi", "N. Siddharth", "Philip H. S. Torr", "Adam R. Kosiorek" ], "externalIds": { "DBLP": "journals/corr/abs-2201-13100", "ArXiv": "2201.13100", "CorpusId": 246430501 }, "url": "https://www.semanticscholar.org/paper/76d3b9d8685b88866abd19615ac0868061ced7e6", "referenceCount": 43, "citationCount": 70, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Diverse Distributions of Self-Supervised Tasks for Meta-Learning in NLP", "abstract": "Meta-learning considers the problem of learning an efficient learning process that can leverage its past experience to accurately solve new tasks. However, the efficacy of meta-learning crucially depends on the distribution of tasks available for training, and this is often assumed to be known a priori or constructed from limited supervised datasets. In this work, we aim to provide task distributions for meta-learning by considering self-supervised tasks automatically proposed from unlabeled text, to enable large-scale meta-learning in NLP. We design multiple distributions of self-supervised tasks by considering important aspects of task diversity, difficulty, type, domain, and curriculum, and investigate how they affect meta-learning performance. Our analysis shows that all these factors meaningfully alter the task distribution, some inducing significant improvements in downstream few-shot accuracy of the meta-learned models. Empirically, results on 20 downstream tasks show significant improvements in few-shot learning – adding up to +4.2% absolute accuracy (on average) to the previous unsupervised meta-learning method, and perform comparably to supervised methods on the FewRel 2.0 benchmark.", "year": 2021, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Trapit Bansal", "K. Gunasekaran", "Tong Wang", "Tsendsuren Munkhdalai", "A. McCallum" ], "externalIds": { "ACL": "2021.emnlp-main.469", "ArXiv": "2111.01322", "DBLP": "conf/emnlp/BansalGWMM21", "DOI": "10.18653/v1/2021.emnlp-main.469", "CorpusId": 240419611 }, "url": "https://www.semanticscholar.org/paper/43b0f0d2abcafabb31222a6b5b44a085019057b5", "referenceCount": 53, "citationCount": 17, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "w2v-BERT: Combining Contrastive Learning and Masked Language Modeling for Self-Supervised Speech Pre-Training", "abstract": "Motivated by the success of masked language modeling (MLM) in pre-training natural language processing models, we propose w2v-BERT that explores MLM for self-supervised speech representation learning. w2v-BERT is a framework that combines contrastive learning and MLM, where the former trains the model to discretize input continuous speech signals into a finite set of discriminative speech tokens, and the latter trains the model to learn contextualized speech representations via solving a masked prediction task consuming the discretized tokens. In contrast to existing MLM-based speech pre-training frameworks such as HuBERT, which relies on an iterative re-clustering and re-training process, or vq-wav2vec, which concatenates two separately trained modules, w2v-BERT can be optimized in an end-to-end fashion by solving the two self-supervised tasks (the contrastive task and MLM) simultaneously. Our experiments show that w2v-BERT achieves competitive results compared to current state-of-the-art pre-trained models on the LibriSpeech benchmarks when using the Libri-Light 60k corpus as the unsupervised data. In particular, when compared to published models such as conformer-based wav2vec 2.0 and HuBERT, our model shows 5% to 10% relative WER reduction on the test-clean and test-other subsets. When applied to the Google's Voice Search traffic dataset, w2v-BERT outperforms our internal conformer-based wav2vec 2.0 by more than 30% relatively.", "year": 2021, "venue": "Automatic Speech Recognition & Understanding", "authors": [ "Yu-An Chung", "Yu Zhang", "Wei Han", "Chung-Cheng Chiu", "James Qin", "Ruoming Pang", "Yonghui Wu" ], "externalIds": { "DBLP": "conf/asru/ChungZHCQPW21", "ArXiv": "2108.06209", "DOI": "10.1109/ASRU51503.2021.9688253", "CorpusId": 237048255 }, "url": "https://www.semanticscholar.org/paper/ebe259796870ebccf26577044d0087884209b884", "referenceCount": 42, "citationCount": 330, "influentialCitationCount": 32, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "MST: Masked Self-Supervised Transformer for Visual Representation", "abstract": "Transformer has been widely used for self-supervised pre-training in Natural Language Processing (NLP) and achieved great success. However, it has not been fully explored in visual self-supervised learning. Meanwhile, previous methods only consider the high-level feature and learning representation from a global perspective, which may fail to transfer to the downstream dense prediction tasks focusing on local features. In this paper, we present a novel Masked Self-supervised Transformer approach named MST, which can explicitly capture the local context of an image while preserving the global semantic information. Specifically, inspired by the Masked Language Modeling (MLM) in NLP, we propose a masked token strategy based on the multi-head self-attention map, which dynamically masks some tokens of local patches without damaging the crucial structure for self-supervised learning. More importantly, the masked tokens together with the remaining tokens are further recovered by a global image decoder, which preserves the spatial information of the image and is more friendly to the downstream dense prediction tasks. The experiments on multiple datasets demonstrate the effectiveness and generality of the proposed method. For instance, MST achieves Top-1 accuracy of 76.9% with DeiT-S only using 300-epoch pre-training by linear evaluation, which outperforms supervised methods with the same epoch by 0.4% and its comparable variant DINO by 1.0\\%. For dense prediction tasks, MST also achieves 42.7% mAP on MS COCO object detection and 74.04% mIoU on Cityscapes segmentation only with 100-epoch pre-training.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Zhaowen Li", "Zhiyang Chen", "F. Yang", "Wei Li", "Yousong Zhu", "Chaoyang Zhao", "Rui Deng", "Liwei Wu", "Rui Zhao", "Ming Tang", "Jinqiao Wang" ], "externalIds": { "DBLP": "journals/corr/abs-2106-05656", "ArXiv": "2106.05656", "CorpusId": 235390630 }, "url": "https://www.semanticscholar.org/paper/36b9d0f8610a82fd25854889d9327a04da4ff8fd", "referenceCount": 38, "citationCount": 138, "influentialCitationCount": 7, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Comprehensive Study on Malware Detection and Prevention Techniques used by Anti-Virus", "abstract": "This paper aims to explain and discuss advanced technology used by anti-virus. In this era of the digital world, technology is developing rapidly day by day. Along with technology, Along with the development, cyber risk also increasing; thousands of cyber attacks are taking place every day. Malware (Viruses, worms, Trojans, rootkits, ransomware, Adware, Spyware) is one of the most common cyber-attack. An operating system that has been infected with malware (malicious software) can experience damage. As the name implies, malicious software is a computer program that can infect applications or documents stored in storage media and systems and manipulate applications and data on a computer. In 2020 there are 700 million new malware emerged and attacked billions of electronic devices. To prevent malware attacks, we need anti-virus/Antimalware Software. In this paper, we discussed various methods of how anti-virus work? What are the advanced techniques used by anti-virus software in this digital era? Comparison between various antivirus and their methods of detecting malware", "year": 2021, "venue": "2021 2nd International Conference on Intelligent Engineering and Management (ICIEM)", "authors": [ "Cheerala Rohith", "Gagandeep Kaur" ], "externalIds": { "DOI": "10.1109/ICIEM51511.2021.9445322", "CorpusId": 235340409 }, "url": "https://www.semanticscholar.org/paper/46d3066b20a54411f1a0223a06343cbe74ac324a", "referenceCount": 22, "citationCount": 7, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Machine learning raw network traffic detection", "abstract": "Increasingly cyber-attacks are sophisticated and occur rapidly, necessitating the use of machine learning techniques for detection at machine speed. However, the use of machine learning techniques in cyber security requires the extraction of features from the raw network traffic. Thus, subject matter expertise is essential to analyze the network traffic and extract optimum features to detect a cyber-attack. Consequently, we propose a novel machine learning algorithm for malicious network traffic detection using only the bytes of the raw network traffic. The feature vector in our machine learning method is a structure containing the headers and a variable number of payload bytes. We propose a 1D-Convolutional Neural Network (1D-CNN) and Feed Forward Network for detection of malicious packets using raw network bytes.", "year": 2021, "venue": "Defense + Commercial Sensing", "authors": [ "Michael J. de Lucia", "P. Maxwell", "Nathaniel D. Bastian", "A. Swami", "Brian Jalaian", "Nandi O. Leslie" ], "externalIds": { "MAG": "3155868884", "DOI": "10.1117/12.2586114", "CorpusId": 234889246 }, "url": "https://www.semanticscholar.org/paper/a7117057f65e61352d2596334323829758bb7ccd", "referenceCount": 28, "citationCount": 28, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "IP Traffic Classification of 4G Network using Machine Learning Techniques", "abstract": "In today's world, the number of internet services and users is increasing rapidly. This leads to a significant rise in the internet traffic. Thus, the task of classifying IP traffic is essential for internet service providers or ISP, as well as various government and private organizations in order to have better network management and security. IP traffic classification involves identification of user activity using network traffic flowing through the system. This will also help in enhancing the performance of the network. The use of traditional IP traffic classification mechanisms which are based on inspection of packet payload and port numbers has decreased drastically because there are many internet applications nowadays which use port numbers which are dynamic in nature rather than well-known port numbers. Also, there are several encryption techniques nowadays due to which the inspection of packet payload is hindered. Presently, various machine learning techniques are generally used for classifying IP traffic. However, not much research has been conducted for the classification of IP traffic for a 4G network. During this research, we developed a new dataset by capturing packets of real-time internet traffic data of a 4G network using a tool named Wireshark. After that, we extracted the inferred features of the captured packets by using a python script. Then we applied five machine learning models, i.e., Decision Tree, Support Vector Machines, K Nearest Neighbours, Random Forest, and Naive Bayes for classifying IP traffic. It was observed that Random Forest gave the best accuracy of approximately 87%.", "year": 2021, "venue": "International Conference Computing Methodologies and Communication", "authors": [ "Rahul", "Amit Gupta", "A. Raj", "Mayank Arora" ], "externalIds": { "DOI": "10.1109/ICCMC51019.2021.9418397", "CorpusId": 233991335 }, "url": "https://www.semanticscholar.org/paper/f9c43e7bb6671247d7e378bf1885194e58ae1743", "referenceCount": 26, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "On Episodes, Prototypical Networks, and Few-shot Learning", "abstract": "Episodic learning is a popular practice among researchers and practitioners interested in few-shot learning. It consists of organising training in a series of learning problems, each relying on small \"support\" and \"query\" sets to mimic the few-shot circumstances encountered during evaluation. In this paper, we investigate the usefulness of episodic learning in Prototypical Networks and Matching Networks, two of the most popular algorithms making use of this practice. Surprisingly, in our experiments we found that, for Prototypical and Matching Networks, it is detrimental to use the episodic learning strategy of separating training samples between support and query set, as it is a data-inefficient way to exploit training batches. These \"non-episodic\" variants, which are closely related to the classic Neighbourhood Component Analysis, reliably improve over their episodic counterparts in multiple datasets, achieving an accuracy that (in the case of Prototypical Networks) is competitive with the state-of-the-art, despite being extremely simple.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Steinar Laenen", "Luca Bertinetto" ], "externalIds": { "DBLP": "journals/corr/abs-2012-09831", "ArXiv": "2012.09831", "MAG": "3110874594", "CorpusId": 229165454 }, "url": "https://www.semanticscholar.org/paper/17e742d2d48f0103664a9468454807076b41bd44", "referenceCount": 66, "citationCount": 85, "influentialCitationCount": 7, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Laplacian Regularized Few-Shot Learning", "abstract": "We propose a transductive Laplacian-regularized inference for few-shot tasks. Given any feature embedding learned from the base classes, we minimize a quadratic binary-assignment function containing two terms: (1) a unary term assigning query samples to the nearest class prototype, and (2) a pairwise Laplacian term encouraging nearby query samples to have consistent label assignments. Our transductive inference does not re-train the base model, and can be viewed as a graph clustering of the query set, subject to supervision constraints from the support set. We derive a computationally efficient bound optimizer of a relaxation of our function, which computes independent (parallel) updates for each query sample, while guaranteeing convergence. Following a simple cross-entropy training on the base classes, and without complex meta-learning strategies, we conducted comprehensive experiments over five few-shot learning benchmarks. Our LaplacianShot consistently outperforms state-of-the-art methods by significant margins across different models, settings, and data sets. Furthermore, our transductive inference is very fast, with computational times that are close to inductive inference, and can be used for large-scale few-shot tasks.", "year": 2020, "venue": "International Conference on Machine Learning", "authors": [ "Imtiaz Masud Ziko", "J. Dolz", "Eric Granger", "Ismail Ben Ayed" ], "externalIds": { "ArXiv": "2006.15486", "DBLP": "conf/icml/ZikoDGA20", "MAG": "3037570585", "CorpusId": 220250262 }, "url": "https://www.semanticscholar.org/paper/5242a0e057364817c4adc4c02029588e315c7b50", "referenceCount": 52, "citationCount": 149, "influentialCitationCount": 19, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Self-Supervised Learning: Generative or Contrastive", "abstract": "Deep supervised learning has achieved great success in the last decade. However, its defects of heavy dependence on manual labels and vulnerability to attacks have driven people to find other paradigms. As an alternative, self-supervised learning (SSL) attracts many researchers for its soaring performance on representation learning in the last several years. Self-supervised representation learning leverages input data itself as supervision and benefits almost all types of downstream tasks. In this survey, we take a look into new self-supervised learning methods for representation in computer vision, natural language processing, and graph learning. We comprehensively review the existing empirical methods and summarize them into three main categories according to their objectives: generative, contrastive, and generative-contrastive (adversarial). We further collect related theoretical analysis on self-supervised learning to provide deeper thoughts on why self-supervised learning works. Finally, we briefly discuss open problems and future directions for self-supervised learning. An outline slide for the survey is provided$^1$1.", "year": 2020, "venue": "IEEE Transactions on Knowledge and Data Engineering", "authors": [ "Xiao Liu", "Fanjin Zhang", "Zhenyu Hou", "Zhaoyu Wang", "Li Mian", "Jing Zhang", "Jie Tang" ], "externalIds": { "MAG": "3035725276", "DBLP": "journals/tkde/LiuZHMWZT23", "ArXiv": "2006.08218", "DOI": "10.1109/TKDE.2021.3090866", "CorpusId": 219687051 }, "url": "https://www.semanticscholar.org/paper/706f756b71f0bf51fc78d98f52c358b1a3aeef8e", "referenceCount": 184, "citationCount": 1283, "influentialCitationCount": 69, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Deep Packet Inspection in Industrial Automation Control System to Mitigate Attacks Exploiting Modbus/TCP Vulnerabilities", "abstract": "Modbus TCP/IP protocol is a commonly used protocol in industrial automation control systems, systems responsible for sensitive operations such as gas turbine operation and refinery control. The protocol was designed decades ago with no security features in mind. Denial of service attack and malicious parameter command injection are examples of attacks that can exploit vulnerabilities in industrial control systems that use Modbus/TCP protocol. This paper discusses and explores the use of intrusion detection and prevention systems (IDPS) with deep packet inspection (DPI) capabilities and DPI industrial firewalls that have capability to detect and stop highly specialized attacks hidden deep in the communication flow. The paper has the following objectives: (i) to develop signatures for IDPS for common attacks on Modbus/TCP based network architectures; (ii) to evaluate performance of three IDPS - Snort, Suricata and Bro – in detecting and preventing common attacks on Modbus/TCP based control systems; and (iii) to illustrate and emphasize that the IDPS and industrial firewalls with DPI capabilities are not preventing but only mitigating likelihood of exploitation of Modbus/TCP vulnerabilities in the industrial and automation control systems. The results presented in the paper illustrate that it might be challenging task to achieve requirements on real-time communication in some industrial and automation control systems in case the DPI is implemented because of the latency and jitter introduced by these IDPS and DPI industrial firewall.", "year": 2020, "venue": "2020 IEEE 6th Intl Conference on Big Data Security on Cloud (BigDataSecurity), IEEE Intl Conference on High Performance and Smart Computing, (HPSC) and IEEE Intl Conference on Intelligent Data and Security (IDS)", "authors": [ "Osborn N. Nyasore", "P. Zavarsky", "Bobby Swar", "Raphael Naiyeju", "Shubham Dabra" ], "externalIds": { "MAG": "3036994727", "DBLP": "conf/bigdatasec/NyasoreZSND20", "DOI": "10.1109/BigDataSecurity-HPSC-IDS49724.2020.00051", "CorpusId": 220071644 }, "url": "https://www.semanticscholar.org/paper/56bcf5c91d0b403245aefd9d1bbdd0a17cba74e8", "referenceCount": 20, "citationCount": 25, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "What the [MASK]? Making Sense of Language-Specific BERT Models", "abstract": "Recently, Natural Language Processing (NLP) has witnessed an impressive progress in many areas, due to the advent of novel, pretrained contextual representation models. In particular, Devlin et al. (2019) proposed a model, called BERT (Bidirectional Encoder Representations from Transformers), which enables researchers to obtain state-of-the art performance on numerous NLP tasks by fine-tuning the representations on their data set and task, without the need for developing and training highly-specific architectures. The authors also released multilingual BERT (mBERT), a model trained on a corpus of 104 languages, which can serve as a universal language model. This model obtained impressive results on a zero-shot cross-lingual natural inference task. Driven by the potential of BERT models, the NLP community has started to investigate and generate an abundant number of BERT models that are trained on a particular language, and tested on a specific data domain and task. This allows us to evaluate the true potential of mBERT as a universal language model, by comparing it to the performance of these more specific models. This paper presents the current state of the art in language-specific BERT models, providing an overall picture with respect to different dimensions (i.e. architectures, data domains, and tasks). Our aim is to provide an immediate and straightforward overview of the commonalities and differences between Language-Specific (language-specific) BERT models and mBERT. We also provide an interactive and constantly updated website that can be used to explore the information we have collected, at this https URL.", "year": 2020, "venue": "arXiv.org", "authors": [ "Debora Nozza", "Federico Bianchi", "Dirk Hovy" ], "externalIds": { "MAG": "3009095382", "ArXiv": "2003.02912", "DBLP": "journals/corr/abs-2003-02912", "CorpusId": 212628707 }, "url": "https://www.semanticscholar.org/paper/6551f742b825561d26242ca8a646ba0e33fb109f", "referenceCount": 33, "citationCount": 95, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Traffic Classification Method Based on Packet Transport Layer Payload by Ensemble Learning", "abstract": "Network traffic classification is an important research topic for computer network, such as QoS detection and admission monitoring. Traditional classification methods, such as port-based and DPI(deep packet inspect)-based, are out-of-date due to the computational expensiveness and inaccuracy. In this paper, we propose a novel traffic classification approach based on packet transport layer payload by ensemble learning. We use three kinds of base neural networks to form a strong classifier. Each model is trained separately and the final prediction result is decided by weight voting. The raw traffic data are reshaped into the format of sequence and matrix as the input, which avoids the TCP stream feature selection and extraction process. Our approach is applicable to both TCP and UDP, which means that it doesn’t require a distinction between transport layer protocols. The experiment results show that our approach can reach the high accuracy of 96.38%, and is better than the state-of-the-art methods based on the same dataset. Besides, our proposed model can select packet samples randomly avoiding tracing the whole stream and the model works well even there’s packet loss and disorder.", "year": 2019, "venue": "International Symposium on Computers and Communications", "authors": [ "Luyang Xu", "Xu Zhou", "Yongmao Ren", "Yifang Qin" ], "externalIds": { "MAG": "3004414291", "DBLP": "conf/iscc/XuZRQ19", "DOI": "10.1109/ISCC47284.2019.8969702", "CorpusId": 210971675 }, "url": "https://www.semanticscholar.org/paper/f52d1cdab3a4a2f04feb0b572c34ff942e1f0562", "referenceCount": 15, "citationCount": 18, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Encrypted Traffic Classification Based ML for Identifying Different Social Media Applications", "abstract": "increasing the deployment of encryption in network protocols and applications poses a challenge for traditional traffic classification approaches. Social media applications such as Skype, WhatsApp, Facebook, YouTube etc. as popular representatives of encrypted traffics have attracted big attention to communication and entertainment. Therefore, the accurate identification of them within encrypted traffic has become a big issue and a hot topic to explore them in detail. In this context, Machine Learning (ML) approaches have shown promise in this area especially for detecting and classifying the encrypted traffic data. Therefore, this work is concentrated on the challenges and has explored the ability to use ML algorithms for social media classification from traffic traces and provides a developed solution, which is able to identify the social media sub-class.", "year": 2019, "venue": "Canadian Conference on Electrical and Computer Engineering", "authors": [ "Furat Al-Obaidy", "Shadi Momtahen", "Md. Foysal Hossain", "F. Mohammadi" ], "externalIds": { "MAG": "2979687053", "DBLP": "conf/ccece/Al-ObaidyMHM19", "DOI": "10.1109/CCECE.2019.8861934", "CorpusId": 204231643 }, "url": "https://www.semanticscholar.org/paper/3cd72116b10fb52b9203a0a390e8a77580a33fe2", "referenceCount": 13, "citationCount": 24, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Cryptography", "abstract": "As there is no manual of cryptography in English, this book, which is translated from the French, will be welcomed by all who wish to make a serious study of the subject, either for practical purposes or as an intellectual exercise. The author deals with his subject under three heads. Under the first he. gives a brief history of the methods of conveying information secretly, beginning with the Greeks, Egyptians, and Romans; under the second he gives examples of cryptographical writings of which he himself has found the solution, for the most part, during the War; and under the third he gives lists and tables of frequency of single letters, bigrams, and other combinations in English and other languages. This section will naturally be one of the most frequently consulted in the book, as a knowledge of the relative frequency of occurrence of the different letters and combinations is essential in all decipherment. The translator adds a supplementary chapter dealing with methods of conveying information secretly, such as the use of sympathetic inks, tramps' signs, the marking of cards by cardsharpers, and the like, and describes the Playfair cipher, a substitution system extensively used for military purposes, Commander W. W. Smith, United States Navy, adding a note on its solution.Cryptography.AndréLangieBy. Translated from the French by J. C. H. Macbeth. Pp. viii + 192. (London, Bombay and Sydney: Constable and Co., Ltd., 1922.) 9s. net.", "year": 2019, "venue": "Nature", "authors": [ "Jonathan Katz" ], "externalIds": { "DBLP": "reference/wiley/Katz08", "DOI": "10.1002/9780470050118.ecse089", "CorpusId": 2765155 }, "url": "https://www.semanticscholar.org/paper/d939180e72e74fcbd978ad4ffd9fab98363b9ae4", "referenceCount": 26, "citationCount": 907, "influentialCitationCount": 21, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Mobile Encrypted Traffic Classification Using Deep Learning: Experimental Evaluation, Lessons Learned, and Challenges", "abstract": "The massive adoption of hand-held devices has led to the explosion of mobile traffic volumes traversing home and enterprise networks, as well as the Internet. Traffic classification (TC), i.e., the set of procedures for inferring (mobile) applications generating such traffic, has become nowadays the enabler for highly valuable profiling information (with certain privacy downsides), other than being the workhorse for service differentiation/blocking. Nonetheless, the design of accurate classifiers is exacerbated by the raising adoption of encrypted protocols (such as TLS), hindering the suitability of (effective) deep packet inspection approaches. Also, the fast-expanding set of apps and the moving-target nature of mobile traffic makes design solutions with usual machine learning, based on manually and expert-originated features, outdated and unable to keep the pace. For these reasons deep learning (DL) is here proposed, for the first time, as a viable strategy to design practical mobile traffic classifiers based on automatically extracted features, able to cope with encrypted traffic, and reflecting their complex traffic patterns. To this end, different state-of-the-art DL techniques from (standard) TC are here reproduced, dissected (highlighting critical choices), and set into a systematic framework for comparison, including also a performance evaluation workbench. The latter outcome, although declined in the mobile context, has the applicability appeal to the wider umbrella of encrypted TC tasks. Finally, the performance of these DL classifiers is critically investigated based on an exhaustive experimental validation (based on three mobile datasets of real human users’ activity), highlighting the related pitfalls, design guidelines, and challenges.", "year": 2019, "venue": "IEEE Transactions on Network and Service Management", "authors": [ "Giuseppe Aceto", "D. Ciuonzo", "Antonio Montieri", "A. Pescapé" ], "externalIds": { "MAG": "2912386632", "DBLP": "journals/tnsm/AcetoCMP19", "DOI": "10.1109/TNSM.2019.2899085", "CorpusId": 86425807 }, "url": "https://www.semanticscholar.org/paper/49e7383d7704fbf4294562f6441d84add0af806e", "referenceCount": 48, "citationCount": 361, "influentialCitationCount": 17, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Handbook of Applied Cryptography", "abstract": "From the Publisher: \nA valuable reference for the novice as well as for the expert who needs a wider scope of coverage within the area of cryptography, this book provides easy and rapid access of information and includes more than 200 algorithms and protocols; more than 200 tables and figures; more than 1,000 numbered definitions, facts, examples, notes, and remarks; and over 1,250 significant references, including brief comments on each paper.", "year": 2018, "venue": "", "authors": [ "A. Menezes", "P. V. Oorschot", "S. Vanstone" ], "externalIds": { "DBLP": "books/crc/MenezesOV96", "MAG": "1660562555", "DOI": "10.1201/9781439821916", "CorpusId": 13601326 }, "url": "https://www.semanticscholar.org/paper/83721103a6fd5535e943b1b575cf70862c2322a8", "referenceCount": 676, "citationCount": 12453, "influentialCitationCount": 1033, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Machine Learning DDoS Detection for Consumer Internet of Things Devices", "abstract": "An increasing number of Internet of Things (IoT) devices are connecting to the Internet, yet many of these devices are fundamentally insecure, exposing the Internet to a variety of attacks. Botnets such as Mirai have used insecure consumer IoT devices to conduct distributed denial of service (DDoS) attacks on critical Internet infrastructure. This motivates the development of new techniques to automatically detect consumer IoT attack traffic. In this paper, we demonstrate that using IoT-specific network behaviors (e.g., limited number of endpoints and regular time intervals between packets) to inform feature selection can result in high accuracy DDoS detection in IoT network traffic with a variety of machine learning algorithms, including neural networks. These results indicate that home gateway routers or other network middleboxes could automatically detect local IoT device sources of DDoS attacks using low-cost machine learning algorithms and traffic data that is flow-based and protocol-agnostic.", "year": 2018, "venue": "2018 IEEE Security and Privacy Workshops (SPW)", "authors": [ "Rohan Doshi", "Noah J. Apthorpe", "N. Feamster" ], "externalIds": { "MAG": "3105750153", "DBLP": "journals/corr/abs-1804-04159", "ArXiv": "1804.04159", "DOI": "10.1109/SPW.2018.00013", "CorpusId": 206581423 }, "url": "https://www.semanticscholar.org/paper/400081ba358663ca24365044e5e818638c439e1d", "referenceCount": 25, "citationCount": 548, "influentialCitationCount": 46, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A survey on deep packet inspection", "abstract": "Deep Packet Inspection (DPI) considered as one of the most important parts in content-aware network applications such as copyright enforcement, Intrusion detection system (IDS) and other applications will be discussed later. DPI rely on comparing to parts payload and signature (IP header). IT compares them with known signatures to decide if the packet is harmful (similar to any of attacks database signatures) and delete it or pass it through the network flow. it deals with the content below the 4th layer of the IP packet that includes source and destination ports, source and destination addresses and type of protocol. It classifies type of the application depending on its port number. For signature comparison, many algorithms are applied such as regular expressions (most popular) and others discussed later. Nowadays many applications rely on DPI for inspecting packets in network stream. This survey gives a brief idea about challenges in DPI and some of the design objectives. Then explaining in short words different matching algorithms with their limitations. At the end, some of the most popular techniques using DPI.", "year": 2017, "venue": "International Conference on Communication and Electronics Systems", "authors": [ "Reham Taher El-Maghraby", "Nada Mostafa Abd Elazim", "Ayman M. Bahaa-Eldin" ], "externalIds": { "MAG": "2785474195", "DOI": "10.1109/ICCES.2017.8275301", "CorpusId": 12299378 }, "url": "https://www.semanticscholar.org/paper/d708b75cab0d5e27c68e9246e5e99dd41a9b5623", "referenceCount": 0, "citationCount": 45, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Decoupled Weight Decay Regularization", "abstract": "L$_2$ regularization and weight decay regularization are equivalent for standard stochastic gradient descent (when rescaled by the learning rate), but as we demonstrate this is \\emph{not} the case for adaptive gradient algorithms, such as Adam. While common implementations of these algorithms employ L$_2$ regularization (often calling it \"weight decay\" in what may be misleading due to the inequivalence we expose), we propose a simple modification to recover the original formulation of weight decay regularization by \\emph{decoupling} the weight decay from the optimization steps taken w.r.t. the loss function. We provide empirical evidence that our proposed modification (i) decouples the optimal choice of weight decay factor from the setting of the learning rate for both standard SGD and Adam and (ii) substantially improves Adam's generalization performance, allowing it to compete with SGD with momentum on image classification datasets (on which it was previously typically outperformed by the latter). Our proposed decoupled weight decay has already been adopted by many researchers, and the community has implemented it in TensorFlow and PyTorch; the complete source code for our experiments is available at this https URL", "year": 2017, "venue": "International Conference on Learning Representations", "authors": [ "I. Loshchilov", "F. Hutter" ], "externalIds": { "MAG": "2950541952", "DBLP": "conf/iclr/LoshchilovH19", "CorpusId": 53592270 }, "url": "https://www.semanticscholar.org/paper/d07284a6811f1b2745d91bdb06b040b57f226882", "referenceCount": 35, "citationCount": 17312, "influentialCitationCount": 3078, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Detecting Android Malware Leveraging Text Semantics of Network Flows", "abstract": "The emergence of malicious apps poses a serious threat to the Android platform. Most types of mobile malware rely on network interface to coordinate operations, steal users’ private information, and launch attack activities. In this paper, we propose an effective and automatic malware detection method using the text semantics of network traffic. In particular, we consider each HTTP flow generated by mobile apps as a text document, which can be processed by natural language processing to extract text-level features. Then, we use the text semantic features of network traffic to develop an effective malware detection model. In an evaluation using 31 706 benign flows and 5258 malicious flows, our method outperforms the existing approaches, and gets an accuracy of 99.15%. We also conduct experiments to verify that the method is effective in detecting newly discovered malware, and requires only a few samples to achieve a good detection result. When the detection model is applied to the real environment to detect unknown applications in the wild, the experimental results show that our method performs significantly better than other popular anti-virus scanners with a detection rate of 54.81%. Our method also reveals certain malware types that can avoid the detection of anti-virus scanners. In addition, we design a detection system on encrypted traffic for bring-your-own-device enterprise network, home network, and 3G/4G mobile network. The detection model is integrated into the system to discover suspicious network behaviors.", "year": 2017, "venue": "IEEE Transactions on Information Forensics and Security", "authors": [ "Shanshan Wang", "Qiben Yan", "Zhenxiang Chen", "Bo Yang", "Chuan Zhao", "M. Conti" ], "externalIds": { "MAG": "2768073432", "DBLP": "journals/tifs/WangYCYZC18", "DOI": "10.1109/TIFS.2017.2771228", "CorpusId": 38054745 }, "url": "https://www.semanticscholar.org/paper/c279ab612db467f6d934a707c58c98b610288ae8", "referenceCount": 35, "citationCount": 120, "influentialCitationCount": 12, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep packet: a novel approach for encrypted traffic classification using deep learning", "abstract": null, "year": 2017, "venue": "Soft Computing - A Fusion of Foundations, Methodologies and Applications", "authors": [ "M. Lotfollahi", "Mahdi Jafari Siavoshani", "Ramin Shirali Hossein Zade", "Mohammdsadegh Saberian" ], "externalIds": { "DBLP": "journals/soco/LotfollahiSZS20", "MAG": "2963516518", "ArXiv": "1709.02656", "DOI": "10.1007/s00500-019-04030-2", "CorpusId": 35187639 }, "url": "https://www.semanticscholar.org/paper/36f13179cdfc13017df535fdee582d58067301f3", "referenceCount": 76, "citationCount": 711, "influentialCitationCount": 90, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Attention is All you Need", "abstract": "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "Ashish Vaswani", "Noam M. Shazeer", "Niki Parmar", "Jakob Uszkoreit", "Llion Jones", "Aidan N. Gomez", "Lukasz Kaiser", "Illia Polosukhin" ], "externalIds": { "MAG": "2963403868", "DBLP": "conf/nips/VaswaniSPUJGKP17", "ArXiv": "1706.03762", "CorpusId": 13756489 }, "url": "https://www.semanticscholar.org/paper/204e3073870fae3d05bcbc2f6a8e263d9b72e776", "referenceCount": 41, "citationCount": 104988, "influentialCitationCount": 15363, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Prototypical Networks for Few-shot Learning", "abstract": "We propose Prototypical Networks for the problem of few-shot classification, where a classifier must generalize to new classes not seen in the training set, given only a small number of examples of each new class. Prototypical Networks learn a metric space in which classification can be performed by computing distances to prototype representations of each class. Compared to recent approaches for few-shot learning, they reflect a simpler inductive bias that is beneficial in this limited-data regime, and achieve excellent results. We provide an analysis showing that some simple design decisions can yield substantial improvements over recent approaches involving complicated architectural choices and meta-learning. We further extend Prototypical Networks to zero-shot learning and achieve state-of-the-art results on the CU-Birds dataset.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "Jake Snell", "Kevin Swersky", "R. Zemel" ], "externalIds": { "DBLP": "conf/nips/SnellSZ17", "ArXiv": "1703.05175", "MAG": "2950537964", "CorpusId": 309759 }, "url": "https://www.semanticscholar.org/paper/c269858a7bb34e8350f2442ccf37797856ae9bca", "referenceCount": 38, "citationCount": 7165, "influentialCitationCount": 1578, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Matching Networks for One Shot Learning", "abstract": "Learning from a few examples remains a key challenge in machine learning. Despite recent advances in important domains such as vision and language, the standard supervised deep learning paradigm does not offer a satisfactory solution for learning new concepts rapidly from little data. In this work, we employ ideas from metric learning based on deep neural features and from recent advances that augment neural networks with external memories. Our framework learns a network that maps a small labelled support set and an unlabelled example to its label, obviating the need for fine-tuning to adapt to new class types. We then define one-shot learning problems on vision (using Omniglot, ImageNet) and language tasks. Our algorithm improves one-shot accuracy on ImageNet from 87.6% to 93.2% and from 88.0% to 93.8% on Omniglot compared to competing approaches. We also demonstrate the usefulness of the same model on language modeling by introducing a one-shot task on the Penn Treebank.", "year": 2016, "venue": "Neural Information Processing Systems", "authors": [ "O. Vinyals", "C. Blundell", "T. Lillicrap", "K. Kavukcuoglu", "Daan Wierstra" ], "externalIds": { "MAG": "2432717477", "DBLP": "journals/corr/VinyalsBLKW16", "ArXiv": "1606.04080", "CorpusId": 8909022 }, "url": "https://www.semanticscholar.org/paper/be1bb4e4aa1fcf70281b4bd24d8cd31c04864bb6", "referenceCount": 30, "citationCount": 6670, "influentialCitationCount": 1348, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "A Deep Learning Approach for Network Intrusion Detection System", "abstract": "A Network Intrusion Detection System (NIDS) helps system administrators to detect network security breaches in \n \ntheir organizations. However, many challenges arise while \n \ndeveloping a flexible and efficient NIDS for unforeseen and unpredictable attacks. We propose a deep learning based approach for developing such an efficient and flexible NIDS. \n \nWe use Self-taught Learning (STL), a deep learning based technique, on NSL-KDD - a benchmark dataset for network \n \nintrusion. We present the performance of our approach and compare it with a few previous work. Compared metrics include accuracy, precision, recall, and f-measure values.", "year": 2016, "venue": "EAI Endorsed Trans. Security Safety", "authors": [ "A. Javaid", "Quamar Niyaz", "Weiqing Sun", "Mansoor Alam" ], "externalIds": { "MAG": "2584770732", "DBLP": "journals/sesa/JavaidNSA16", "DOI": "10.4108/eai.3-12-2015.2262516", "CorpusId": 31835902 }, "url": "https://www.semanticscholar.org/paper/25deaf0adeb9ea455a2f2d211ee86890eb64b69b", "referenceCount": 19, "citationCount": 942, "influentialCitationCount": 50, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "UNSW-NB15: a comprehensive data set for network intrusion detection systems (UNSW-NB15 network data set)", "abstract": "One of the major research challenges in this field is the unavailability of a comprehensive network based data set which can reflect modern network traffic scenarios, vast varieties of low footprint intrusions and depth structured information about the network traffic. Evaluating network intrusion detection systems research efforts, KDD98, KDDCUP99 and NSLKDD benchmark data sets were generated a decade ago. However, numerous current studies showed that for the current network threat environment, these data sets do not inclusively reflect network traffic and modern low footprint attacks. Countering the unavailability of network benchmark data set challenges, this paper examines a UNSW-NB15 data set creation. This data set has a hybrid of the real modern normal and the contemporary synthesized attack activities of the network traffic. Existing and novel methods are utilised to generate the features of the UNSWNB15 data set. This data set is available for research purposes and can be accessed from the link.", "year": 2015, "venue": "Military Communications and Information Systems Conference", "authors": [ "Nour Moustafa", "J. Slay" ], "externalIds": { "MAG": "2296509296", "DBLP": "conf/milcis/MoustafaS15", "DOI": "10.1109/MilCIS.2015.7348942", "CorpusId": 18965349 }, "url": "https://www.semanticscholar.org/paper/0e7af8e91b8cb2cea1164be5ac5d280b0d12c153", "referenceCount": 11, "citationCount": 2108, "influentialCitationCount": 252, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A detailed analysis of the KDD CUP 99 data set", "abstract": "During the last decade, anomaly detection has attracted the attention of many researchers to overcome the weakness of signature-based IDSs in detecting novel attacks, and KDDCUP'99 is the mostly widely used data set for the evaluation of these systems. Having conducted a statistical analysis on this data set, we found two important issues which highly affects the performance of evaluated systems, and results in a very poor evaluation of anomaly detection approaches. To solve these issues, we have proposed a new data set, NSL-KDD, which consists of selected records of the complete KDD data set and does not suffer from any of mentioned shortcomings.", "year": 2009, "venue": "IEEE Symposium on Computational Intelligence for Security and Defense Applications", "authors": [ "Mahbod Tavallaee", "E. Bagheri", "Wei Lu", "A. Ghorbani" ], "externalIds": { "DBLP": "conf/cisda/TavallaeeBLG09", "MAG": "2099940443", "DOI": "10.1109/CISDA.2009.5356528", "CorpusId": 4528546 }, "url": "https://www.semanticscholar.org/paper/fc3eb090e39d71295c362458b8a0c48d2c5d8377", "referenceCount": 25, "citationCount": 3806, "influentialCitationCount": 432, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "WF-Transformer: Learning Temporal Features for Accurate Anonymous Traffic Identification by Using Transformer Networks", "abstract": "Website Fingerprinting (WF) is a network traffic mining technique for anonymous traffic identification, which enables a local adversary to identify the target website that an anonymous network user is browsing. WF attacks based on deep convolutional neural networks (CNN) get the state-of-the-art anonymous traffic classification performance. However, due to the locality restriction of CNN architecture for feature extraction on sequence data, these methods ignore the temporal feature extraction in the anonymous traffic analysis. In this paper, we present Website Fingerprinting Transformer (WF-Transformer), a novel anonymous network traffic analysis method that leverages Transformer networks for temporal feature extraction of traffic traces and improves the classification performance of Tor encrypted traffic. The architecture of WF-Transformer is specially designed for traffic trace processing and can classify anonymous traffic effectively. Furthermore, we evaluate the performance of WF-Transformer in both closed-world and open-world scenarios. In the closed-world scenario, WF-Transformer attains 99.1% accuracy on Tor traffic without defenses, better than state-or-the-art attacks, and archives 92.1% accuracy on the traces defended by WTF-PAD method. In the open-world scenario, WF-Transformer has better precision and recall on both defended and non-defended traces. Furthermore, WF-Transformer with a short input length (2000 cells) outperforms the DF method with a long input length (5000 cells).", "year": 2024, "venue": "IEEE Transactions on Information Forensics and Security", "authors": [ "Qiang Zhou", "Liangmin Wang", "Huijuan Zhu", "Tong Lu", "Victor S. Sheng" ], "externalIds": { "DBLP": "journals/tifs/ZhouWZLS24", "DOI": "10.1109/TIFS.2023.3318966", "CorpusId": 263094796 }, "url": "https://www.semanticscholar.org/paper/ce448a554800334134c7a28b35e92c8f8d46a996", "referenceCount": 47, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Overcoming Data Limitations: A Few-Shot Specific Emitter Identification Method Using Self-Supervised Learning and Adversarial Augmentation", "abstract": "Specific emitter identification (SEI) based on radio frequency fingerprinting (RFF) is a physical layer authentication method in the field of wireless network security. RFFs are unique features embedded in the electromagnetic waves, which come from the hard imperfections in the wireless devices. Deep learning has been applied to many SEI tasks due to its powerful feature extraction capabilities. However, the success of most methods hinges on massive and labeled samples, and few methods focus on a realistic scenario, where few samples are available and labeled. In this paper, to overcome data limitations, we propose a few-shot SEI (FS-SEI) method based on self-supervised learning and adversarial augmentation (SA2SEI). Specifically, to overcome the limitation of label dependence for auxiliary dataset, a novelty adversarial augmentation (Adv-Aug)-powered self-supervised learning is designed to pre-train a RFF extractor using unlabeled auxiliary dataset. Subsequently, to overcome the limitation of sample dependence, knowledge transfer is introduced to fine-tune the extractor and a classifier with target dataset including few samples (5-30 samples per emitter in this paper) and corresponding labels. In addition, auxiliary dataset and target dataset are come from different emitters. An open-source large-scale real-world automatic-dependent surveillance-broadcast (ADS-B) dataset and a Wi-Fi dataset are used to evaluate the proposed SA2SEI method. The simulation results show that the proposed method can extract more discriminative RFF features and obtain higher identification performance in the FS-SEI. Specifically, when there are only 5 samples per Wi-Fi device, it can achieve $83.40\\%$ identification accuracy, in which $38.63\\%$ identification accuracy improvement comes from the Adv-Aug of pre-training process. The codes are available at https://github.com/LIUC-000/SA2SEI.", "year": 2024, "venue": "IEEE Transactions on Information Forensics and Security", "authors": [ "Chao Liu", "Xue Fu", "Yu Wang", "Lantu Guo", "Yuchao Liu", "Yun Lin", "Haitao Zhao", "Guan Gui" ], "externalIds": { "DBLP": "journals/tifs/LiuFWGLLZG24", "DOI": "10.1109/TIFS.2023.3324394", "CorpusId": 264109640 }, "url": "https://www.semanticscholar.org/paper/cae767bba11993fa32428017e42a889580dbb8ac", "referenceCount": 57, "citationCount": 28, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ResNeXt+: Attention Mechanisms Based on ResNeXt for Malware Detection and Classification", "abstract": "Malware detection and classification are crucial for protecting digital devices and information systems. Accurate identification of malware enables researchers and incident responders to take prompt measures against malware and mitigate its damage. With the development of attention mechanisms in the field of computer vision, attention mechanism-based malware detection techniques are also rapidly evolving. The essence of the attention mechanism is to focus on the information of interest and suppress the useless information. In this paper, we develop different plug-and-play attention mechanisms based on the ResNeXt tagging model, where the designed model is trained to focus on the malware features by capturing the malware image channel perception field of view and is also able to provide more helpful and flexible information than other methods. We have named this designed neural network ResNeXt+, and its core modules are built with different plug-and-play attention mechanisms. Extensive experimental results show that ResNeXt+ is effective and efficient in malware detection and classification with high classification accuracy. The proposed methods outperform the state-of-the-art techniques with seven benchmark datasets. Cross-dataset experiments conducted on the Windows and Android datasets, with an accuracy of 90.64% on cross-dataset detection of the android. Ablation experiments are also conducted on seven datasets, which demonstrate that attention mechanisms can improve malware detection and classification accuracy.", "year": 2024, "venue": "IEEE Transactions on Information Forensics and Security", "authors": [ "Yuewang He", "Xiangui Kang", "Qiben Yan", "Enping Li" ], "externalIds": { "DBLP": "journals/tifs/HeKYL24", "DOI": "10.1109/TIFS.2023.3328431", "CorpusId": 264801501 }, "url": "https://www.semanticscholar.org/paper/8375c29a7a48efb459825d667420c19c53390470", "referenceCount": 65, "citationCount": 7, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Vehicle Controller Area Network Inspection Using Recurrent Neural Networks", "abstract": null, "year": 2023, "venue": "ACR", "authors": [ "Kyle Stein", "A. Mahyari", "E. El-Sheikh" ], "externalIds": { "DBLP": "conf/acr/SteinME23", "DOI": "10.1007/978-3-031-33743-7_40", "CorpusId": 260971691 }, "url": "https://www.semanticscholar.org/paper/16941f66a2248f80aa15ad7bbceb3885477748c6", "referenceCount": 0, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "PyCryptodome", "abstract": null, "year": 2021, "venue": "pycryptodome.readthedocs.io/en/latest/src/cipher/aes.html", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding", "abstract": "We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models (Peters et al., 2018a; Radford et al., 2018), BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5 (7.7 point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).", "year": 2019, "venue": "North American Chapter of the Association for Computational Linguistics", "authors": [ "Jacob Devlin", "Ming-Wei Chang", "Kenton Lee", "Kristina Toutanova" ], "externalIds": { "MAG": "2951055169", "ACL": "N19-1423", "DBLP": "journals/corr/abs-1810-04805", "ArXiv": "1810.04805", "DOI": "10.18653/v1/N19-1423", "CorpusId": 52967399 }, "url": "https://www.semanticscholar.org/paper/df2b0e26d0599ce3e70df8a9da02e51594e0e992", "referenceCount": 63, "citationCount": 81678, "influentialCitationCount": 19056, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "”KDD Cup 1999 Data,”", "abstract": null, "year": 1999, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "”Main classes - optimizer and schedules,”", "abstract": null, "year": null, "venue": "Transformers Documentation", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "”TCP VS UDP: What’s the main difference?”", "abstract": null, "year": null, "venue": "NordVPN", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "”Masked Language Modeling,”", "abstract": null, "year": null, "venue": "Trans-formers Documentation", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "“Malware Statistics,”", "abstract": null, "year": null, "venue": "DataProt.", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Visual Data Diagnosis and Debiasing with Concept Graphs": { "paper_title": "Visual Data Diagnosis and Debiasing with Concept Graphs", "arxiv_id": "2409.18055v1", "keyword": "deep learning", "authors": [ "Rwiddhi Chakraborty", "Yinong Wang", "Jialu Gao", "Runkai Zheng", "Cheng Zhang", "Fernando De la Torre" ], "references": [ { "title": "Unsupervised Concept Discovery Mitigates Spurious Correlations", "abstract": "Models prone to spurious correlations in training data often produce brittle predictions and introduce unintended biases. Addressing this challenge typically involves methods relying on prior knowledge and group annotation to remove spurious correlations, which may not be readily available in many applications. In this paper, we establish a novel connection between unsupervised object-centric learning and mitigation of spurious correlations. Instead of directly inferring subgroups with varying correlations with labels, our approach focuses on discovering concepts: discrete ideas that are shared across input samples. Leveraging existing object-centric representation learning, we introduce CoBalT: a concept balancing technique that effectively mitigates spurious correlations without requiring human labeling of subgroups. Evaluation across the benchmark datasets for sub-population shifts demonstrate superior or competitive performance compared state-of-the-art baselines, without the need for group annotation. Code is available at https://github.com/rarefin/CoBalT.", "year": 2024, "venue": "International Conference on Machine Learning", "authors": [ "Md Rifat Arefin", "Yan Zhang", "A. Baratin", "Francesco Locatello", "Irina Rish", "Dianbo Liu", "Kenji Kawaguchi" ], "externalIds": { "ArXiv": "2402.13368", "DBLP": "journals/corr/abs-2402-13368", "DOI": "10.48550/arXiv.2402.13368", "CorpusId": 267770437 }, "url": "https://www.semanticscholar.org/paper/2a870dec6c17331b2c6eadd6a4687b035ecdf796", "referenceCount": 55, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Shape-biased CNNs are Not Always Superior in Out-of-Distribution Robustness", "abstract": "In recent years, Out-of-Distribution (o.o.d) Robustness has garnered increasing attention in Deep Learning, and shape-biased Convolutional Neural Networks (CNNs) are believed to exhibit higher robustness, attributed to the inherent shape-based decision rule of human cognition. In this work, we delve deeper into the intricate relationship between shape/texture information and o.o.d robustness by leveraging a carefully curated \"Category-Balanced ImageNet\" dataset. We find that shape information is not always superior in distinguishing distinct categories and shape-biased model is not always superior across various o.o.d scenarios. Motivated by these insightful findings, we design a novel method named Shape-Texture Adaptive Recombination (STAR) to achieve higher o.o.d robustness. A category-balanced dataset is firstly used to pretrain a debiased backbone and three specialized heads, each adept at robustly extracting shape, texture, and debiased features. Subsequently, an instance-adaptive recombination head is trained to adaptively adjust the contributions of these distinctive features for each given instance. Through comprehensive experiments, our proposed method achieves state-of-the-art o.o.d robustness across various scenarios such as image corruptions, adversarial attacks, style shifts, and dataset shifts, demonstrating its effectiveness.", "year": 2024, "venue": "IEEE Workshop/Winter Conference on Applications of Computer Vision", "authors": [ "Xinkuan Qiu", "Meina Kan", "Yongbin Zhou", "Yanchao Bi", "Shiguang Shan" ], "externalIds": { "DBLP": "conf/wacv/QiuKZBS24", "DOI": "10.1109/WACV57701.2024.00232", "CorpusId": 269035253 }, "url": "https://www.semanticscholar.org/paper/10c52a986a231ffdafd3d1ab5a7f3e02ca54e638", "referenceCount": 53, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FACTS: First Amplify Correlations and Then Slice to Discover Bias", "abstract": "Computer vision datasets frequently contain spurious correlations between task-relevant labels and (easy to learn) latent task-irrelevant attributes (e.g. context). Models trained on such datasets learn \"shortcuts\" and underperform on bias-conflicting slices of data where the correlation does not hold. In this work, we study the problem of identifying such slices to inform downstream bias mitigation strategies. We propose First Amplify Correlations and Then Slice (FACTS), wherein we first amplify correlations to fit a simple bias-aligned hypothesis via strongly regularized empirical risk minimization. Next, we perform correlation-aware slicing via mixture modeling in bias-aligned feature space to discover underperforming data slices that capture distinct correlations. Despite its simplicity, our method considerably improves over prior work (by as much as 35% precision@10) in correlation bias identification across a range of diverse evaluation settings. Code: https://github.com/yvsriram/FACTS.", "year": 2023, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Sriram Yenamandra", "Pratik Ramesh", "Viraj Prabhu", "Judy Hoffman" ], "externalIds": { "ArXiv": "2309.17430", "DBLP": "journals/corr/abs-2309-17430", "DOI": "10.1109/ICCV51070.2023.00442", "CorpusId": 263310991 }, "url": "https://www.semanticscholar.org/paper/363d760d0b7f727e83bb9df4086fc4be4712abee", "referenceCount": 43, "citationCount": 11, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FACET: Fairness in Computer Vision Evaluation Benchmark", "abstract": "Computer vision models have known performance disparities across attributes such as gender and skin tone. This means during tasks such as classification and detection, model performance differs for certain classes based on the demographics of the people in the image. These disparities have been shown to exist, but until now there has not been a unified approach to measure these differences for common use-cases of computer vision models. We present a new benchmark named FACET (FAirness in Computer Vision EvaluaTion), a large, publicly available evaluation set of 32k images for some of the most common vision tasks - image classification, object detection and segmentation. For every image in FACET, we hired expert reviewers to manually annotate person-related attributes such as perceived skin tone and hair type, manually draw bounding boxes and label fine-grained person-related classes such as disk jockey or guitarist. In addition, we use FACET to benchmark state-of-the-art vision models and present a deeper understanding of potential performance disparities and challenges across sensitive demographic attributes. With the exhaustive annotations collected, we probe models using single demographics attributes as well as multiple attributes using an intersectional approach (e.g. hair color and perceived skin tone). Our results show that classification, detection, segmentation, and visual grounding models exhibit performance disparities across demographic attributes and intersections of attributes. These harms suggest that not all people represented in datasets receive fair and equitable treatment in these vision tasks. We hope current and future results using our benchmark will contribute to fairer, more robust vision models. FACET is available publicly at https://facet.metademolab.com.", "year": 2023, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Laura Gustafson", "Chloe Rolland", "Nikhila Ravi", "Quentin Duval", "Aaron B. Adcock", "Cheng-Yang Fu", "Melissa Hall", "Candace Ross" ], "externalIds": { "ArXiv": "2309.00035", "DBLP": "journals/corr/abs-2309-00035", "DOI": "10.1109/ICCV51070.2023.01863", "CorpusId": 261494264 }, "url": "https://www.semanticscholar.org/paper/7143623c7f1886ba67cc71692cc963741306ae4f", "referenceCount": 103, "citationCount": 18, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "LANCE: Stress-testing Visual Models by Generating Language-guided Counterfactual Images", "abstract": "We propose an automated algorithm to stress-test a trained visual model by generating language-guided counterfactual test images (LANCE). Our method leverages recent progress in large language modeling and text-based image editing to augment an IID test set with a suite of diverse, realistic, and challenging test images without altering model weights. We benchmark the performance of a diverse set of pre-trained models on our generated data and observe significant and consistent performance drops. We further analyze model sensitivity across different types of edits, and demonstrate its applicability at surfacing previously unknown class-level model biases in ImageNet. Code is available at https://github.com/virajprabhu/lance.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Viraj Prabhu", "Sriram Yenamandra", "Prithvijit Chattopadhyay", "Judy Hoffman" ], "externalIds": { "DBLP": "conf/nips/PrabhuYCH23", "ArXiv": "2305.19164", "DOI": "10.48550/arXiv.2305.19164", "CorpusId": 258967791 }, "url": "https://www.semanticscholar.org/paper/5d36d8817fb0ab69ee396250aed204dea70da9f8", "referenceCount": 74, "citationCount": 23, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Diversify Your Vision Datasets with Automatic Diffusion-Based Augmentation", "abstract": "Many fine-grained classification tasks, like rare animal identification, have limited training data and consequently classifiers trained on these datasets often fail to generalize to variations in the domain like changes in weather or location. As such, we explore how natural language descriptions of the domains seen in training data can be used with large vision models trained on diverse pretraining datasets to generate useful variations of the training data. We introduce ALIA (Automated Language-guided Image Augmentation), a method which utilizes large vision and language models to automatically generate natural language descriptions of a dataset's domains and augment the training data via language-guided image editing. To maintain data integrity, a model trained on the original dataset filters out minimal image edits and those which corrupt class-relevant information. The resulting dataset is visually consistent with the original training data and offers significantly enhanced diversity. We show that ALIA is able to surpasses traditional data augmentation and text-to-image generated data on fine-grained classification tasks, including cases of domain generalization and contextual bias. Code is available at https://github.com/lisadunlap/ALIA.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Lisa Dunlap", "Alyssa Umino", "Han Zhang", "Jiezhi Yang", "Joseph E. Gonzalez", "Trevor Darrell" ], "externalIds": { "DBLP": "conf/nips/DunlapUZYGD23", "ArXiv": "2305.16289", "DOI": "10.48550/arXiv.2305.16289", "CorpusId": 258887976 }, "url": "https://www.semanticscholar.org/paper/c346bfb9b2e2730ee3aa392fd17956416191e38d", "referenceCount": 44, "citationCount": 42, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning Bottleneck Concepts in Image Classification", "abstract": "Interpreting and explaining the behavior of deep neural networks is critical for many tasks. Explainable AI provides a way to address this challenge, mostly by providing per-pixel relevance to the decision. Yet, interpreting such explanations may require expert knowledge. Some recent attempts toward interpretability adopt a concept-based framework, giving a higher-level relationship between some concepts and model decisions. This paper proposes Bottleneck Concept Learner (BotCL), which represents an image solely by the presence/absence of concepts learned through training over the target task without explicit supervision over the concepts. It uses self-supervision and tailored regularizers so that learned concepts can be human-understandable. Using some image classification tasks as our testbed, we demonstrate BotCL's potential to rebuild neural networks for better interpretability11Code is avaliable at https://github.com/wbw520/BotCL and a simple demo is available at https://botcl.liangzhili.com/.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Bowen Wang", "Liangzhi Li", "Yuta Nakashima", "Hajime Nagahara" ], "externalIds": { "DBLP": "journals/corr/abs-2304-10131", "ArXiv": "2304.10131", "DOI": "10.48550/arXiv.2304.10131", "CorpusId": 258236219 }, "url": "https://www.semanticscholar.org/paper/4833b15d617ee2a44bfe326bb397e7424a0a8e21", "referenceCount": 51, "citationCount": 26, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Visual Instruction Tuning", "abstract": "Instruction tuning large language models (LLMs) using machine-generated instruction-following data has improved zero-shot capabilities on new tasks, but the idea is less explored in the multimodal field. In this paper, we present the first attempt to use language-only GPT-4 to generate multimodal language-image instruction-following data. By instruction tuning on such generated data, we introduce LLaVA: Large Language and Vision Assistant, an end-to-end trained large multimodal model that connects a vision encoder and LLM for general-purpose visual and language understanding.Our early experiments show that LLaVA demonstrates impressive multimodel chat abilities, sometimes exhibiting the behaviors of multimodal GPT-4 on unseen images/instructions, and yields a 85.1% relative score compared with GPT-4 on a synthetic multimodal instruction-following dataset. When fine-tuned on Science QA, the synergy of LLaVA and GPT-4 achieves a new state-of-the-art accuracy of 92.53%. We make GPT-4 generated visual instruction tuning data, our model and code base publicly available.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Haotian Liu", "Chunyuan Li", "Qingyang Wu", "Yong Jae Lee" ], "externalIds": { "DBLP": "journals/corr/abs-2304-08485", "ArXiv": "2304.08485", "DOI": "10.48550/arXiv.2304.08485", "CorpusId": 258179774 }, "url": "https://www.semanticscholar.org/paper/a5036f31f0e629dc661f120b8c3b1f374d479ab8", "referenceCount": 63, "citationCount": 2100, "influentialCitationCount": 564, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Uncurated Image-Text Datasets: Shedding Light on Demographic Bias", "abstract": "The increasing tendency to collect large and uncurated datasets to train vision-and-language models has raised concerns about fair representations. It is known that even small but manually annotated datasets, such as MSCOCO, are affected by societal bias. This problem, far from being solved, may be getting worse with data crawled from the Internet without much control. In addition, the lack of tools to analyze societal bias in big collections of images makes addressing the problem extremely challenging. Our first contribution is to annotate part of the Google Conceptual Captions dataset, widely used for training vision-and-language models, with four demographic and two contextual attributes. Our second contribution is to conduct a comprehensive analysis of the annotations, focusing on how different demographic groups are represented. Our last contribution lies in evaluating three prevailing vision-and-language tasks: image captioning, text-image CLIP embeddings, and text-to-image generation, showing that societal bias is a persistent problem in all of them. https://github.com/noagarcia/phase", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Noa García", "Yusuke Hirota", "Yankun Wu", "Yuta Nakashima" ], "externalIds": { "DBLP": "conf/cvpr/GarciaHWN23", "ArXiv": "2304.02828", "DOI": "10.1109/CVPR52729.2023.00672", "CorpusId": 257985091 }, "url": "https://www.semanticscholar.org/paper/4aedabc733e033b23eee068311f64366403c98db", "referenceCount": 62, "citationCount": 40, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Detecting Everything in the Open World: Towards Universal Object Detection", "abstract": "In this paper, we formally address universal object detection, which aims to detect every scene and predict every category. The dependence on human annotations, the limited visual information, and the novel categories in the open world severely restrict the universality of traditional detectors. We propose UniDetector, a universal object detector that has the ability to recognize enormous categories in the open world. The critical points for the universality of UniDetector are: 1) it leverages images of multiple sources and heterogeneous label spaces for training through the alignment of image and text spaces, which guarantees sufficient information for universal representations. 2) it generalizes to the open world easily while keeping the balance between seen and unseen classes, thanks to abundant information from both vision and language modalities. 3) it further promotes the generalization ability to novel categories through our proposed decoupling training manner and probability calibration. These contributions allow UniDetector to detect over 7k categories, the largest measurable category size so far, with only about 500 classes participating in training. Our UniDetector behaves the strong zero-shot generalization ability on largevocabulary datasets - it surpasses the traditional supervised baselines by more than 4% on average without seeing any corresponding images. On 13 public detection datasets with various scenes, UniDetector also achieves state-of-the-art performance with only a 3% amount of training data. 11Codes are available at https://github.com/zhenyuw16/UniDetector.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Zhenyu Wang", "Yali Li", "Xi Chen", "S. Lim", "A. Torralba", "Hengshuang Zhao", "Shengjin Wang" ], "externalIds": { "DBLP": "journals/corr/abs-2303-11749", "ArXiv": "2303.11749", "DOI": "10.1109/CVPR52729.2023.01100", "CorpusId": 257636989 }, "url": "https://www.semanticscholar.org/paper/af6db7ae134ebad3fc12c34ff3a3c2139aa97bd8", "referenceCount": 73, "citationCount": 53, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Overwriting Pretrained Bias with Finetuning Data", "abstract": "Transfer learning is beneficial by allowing the expressive features of models pretrained on large-scale datasets to be finetuned for the target task of smaller, more domain-specific datasets. However, there is a concern that these pretrained models may come with their own biases which would propagate into the finetuned model. In this work, we investigate bias when conceptualized as both spurious correlations between the target task and a sensitive attribute as well as underrepresentation of a particular group in the dataset. Under both notions of bias, we find that (1) models finetuned on top of pretrained models can indeed inherit their biases, but (2) this bias can be corrected for through relatively minor interventions to the finetuning dataset, and often with a negligible impact to performance. Our findings imply that careful curation of the finetuning dataset is important for reducing biases on a downstream task, and doing so can even compensate for bias in the pretrained model.", "year": 2023, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Angelina Wang", "Olga Russakovsky" ], "externalIds": { "DBLP": "conf/iccv/WangR23", "ArXiv": "2303.06167", "DOI": "10.1109/ICCV51070.2023.00366", "CorpusId": 257496198 }, "url": "https://www.semanticscholar.org/paper/17133d143f15b5ee549952c0b880d0f76024c4d8", "referenceCount": 68, "citationCount": 20, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Grounding DINO: Marrying DINO with Grounded Pre-Training for Open-Set Object Detection", "abstract": "In this paper, we present an open-set object detector, called Grounding DINO, by marrying Transformer-based detector DINO with grounded pre-training, which can detect arbitrary objects with human inputs such as category names or referring expressions. The key solution of open-set object detection is introducing language to a closed-set detector for open-set concept generalization. To effectively fuse language and vision modalities, we conceptually divide a closed-set detector into three phases and propose a tight fusion solution, which includes a feature enhancer, a language-guided query selection, and a cross-modality decoder for cross-modality fusion. While previous works mainly evaluate open-set object detection on novel categories, we propose to also perform evaluations on referring expression comprehension for objects specified with attributes. Grounding DINO performs remarkably well on all three settings, including benchmarks on COCO, LVIS, ODinW, and RefCOCO/+/g. Grounding DINO achieves a $52.5$ AP on the COCO detection zero-shot transfer benchmark, i.e., without any training data from COCO. It sets a new record on the ODinW zero-shot benchmark with a mean $26.1$ AP. Code will be available at \\url{https://github.com/IDEA-Research/GroundingDINO}.", "year": 2023, "venue": "arXiv.org", "authors": [ "Shilong Liu", "Zhaoyang Zeng", "Tianhe Ren", "Feng Li", "Hao Zhang", "Jie Yang", "Chun-yue Li", "Jianwei Yang", "Hang Su", "Jun-Juan Zhu", "Lei Zhang" ], "externalIds": { "DBLP": "journals/corr/abs-2303-05499", "ArXiv": "2303.05499", "DOI": "10.48550/arXiv.2303.05499", "CorpusId": 257427307 }, "url": "https://www.semanticscholar.org/paper/c3e5a20b844c042d2174263d2fd5b30d8cc8f0b0", "referenceCount": 68, "citationCount": 988, "influentialCitationCount": 147, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Spawrious: A Benchmark for Fine Control of Spurious Correlation Biases", "abstract": "The problem of spurious correlations (SCs) arises when a classifier relies on non-predictive features that happen to be correlated with the labels in the training data. For example, a classifier may misclassify dog breeds based on the background of dog images. This happens when the backgrounds are correlated with other breeds in the training data, leading to misclassifications during test time. Previous SC benchmark datasets suffer from varying issues, e.g., over-saturation or only containing one-to-one (O2O) SCs, but no many-to-many (M2M) SCs arising between groups of spurious attributes and classes. In this paper, we present \\benchmark-\\{O2O, M2M\\}-\\{Easy, Medium, Hard\\}, an image classification benchmark suite containing spurious correlations between classes and backgrounds. To create this dataset, we employ a text-to-image model to generate photo-realistic images and an image captioning model to filter out unsuitable ones. The resulting dataset is of high quality and contains approximately 152k images. Our experimental results demonstrate that state-of-the-art group robustness methods struggle with \\benchmark, most notably on the Hard-splits with none of them getting over $70\\%$ accuracy on the hardest split using a ResNet50 pretrained on ImageNet. By examining model misclassifications, we detect reliances on spurious backgrounds, demonstrating that our dataset provides a significant challenge.", "year": 2023, "venue": "arXiv.org", "authors": [ "Aengus Lynch", "G. Dovonon", "Jean Kaddour", "Ricardo M. A. Silva" ], "externalIds": { "DBLP": "journals/corr/abs-2303-05470", "ArXiv": "2303.05470", "DOI": "10.48550/arXiv.2303.05470", "CorpusId": 257427315 }, "url": "https://www.semanticscholar.org/paper/eb6399becbc470e3f15cc92ce6ea364f815ad1cd", "referenceCount": 88, "citationCount": 16, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Whac-A-Mole Dilemma: Shortcuts Come in Multiples Where Mitigating One Amplifies Others", "abstract": "Machine learning models have been found to learn shortcuts—unintended decision rules that are unable to generalize—undermining models' reliability. Previous works address this problem under the tenuous assumption that only a single shortcut exists in the training data. Real-world images are rife with multiple visual cues from background to texture. Key to advancing the reliability of vision systems is understanding whether existing methods can overcome multiple shortcuts or struggle in a Whac-A-Mole game, i.e., where mitigating one shortcut amplifies reliance on others. To address this shortcoming, we propose two benchmarks: 1) UrbanCars, a dataset with precisely controlled spurious cues, and 2) ImageNet-W, an evaluation set based on ImageNet for watermark, a shortcut we discovered affects nearly every modern vision model. Along with texture and background, ImageNet-W allows us to study multiple shortcuts emerging from training on natural images. We find computer vision models, including large foundation models—regardless of training set, architecture, and supervision—struggle when multiple shortcuts are present. Even methods explicitly designed to combat shortcuts struggle in a Whac-A-Mole dilemma. To tackle this challenge, we propose Last Layer Ensemble, a simple-yet-effective method to mitigate multiple shortcuts without Whac-A-Mole behavior. Our results surface multi-shortcut mitigation as an overlooked challenge critical to advancing the reliability of vision systems. The datasets and code are released: https://github.com/facebookresearch/Whac-A-Mole.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Zhiheng Li", "I. Evtimov", "Albert Gordo", "C. Hazirbas", "Tal Hassner", "Cristian Cantón Ferrer", "Chenliang Xu", "Mark Ibrahim" ], "externalIds": { "DBLP": "conf/cvpr/LiEGHHCXI23", "ArXiv": "2212.04825", "DOI": "10.1109/CVPR52729.2023.01922", "CorpusId": 254536007 }, "url": "https://www.semanticscholar.org/paper/290ad057e7c36901201c537e863bef5ff1d80d5c", "referenceCount": 105, "citationCount": 51, "influentialCitationCount": 10, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Adaptive Testing of Computer Vision Models", "abstract": "Vision models often fail systematically on groups of data that share common semantic characteristics (e.g., rare objects or unusual scenes), but identifying these failure modes is a challenge. We introduce AdaVision, an interactive process for testing vision models which helps users identify and fix coherent failure modes. Given a natural language description of a coherent group, AdaVision retrieves relevant images from LAION-5B with CLIP. The user then labels a small amount of data for model correctness, which is used in successive retrieval rounds to hill-climb towards high-error regions, refining the group definition. Once a group is saturated, AdaVision uses GPT-3 to suggest new group descriptions for the user to explore. We demonstrate the usefulness and generality of AdaVision in user studies, where users find major bugs in state-of-the-art classification, object detection, and image captioning models. These user-discovered groups have failure rates 2-3x higher than those surfaced by automatic error clustering methods. Finally, finetuning on examples found with AdaVision fixes the discovered bugs when evaluated on unseen examples, without degrading in-distribution accuracy, and while also improving performance on out-of-distribution datasets.", "year": 2022, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Irena Gao", "Gabriel Ilharco", "Scott M. Lundberg", "Marco Tulio Ribeiro" ], "externalIds": { "ArXiv": "2212.02774", "DBLP": "journals/corr/abs-2212-02774", "DOI": "10.1109/ICCV51070.2023.00370", "CorpusId": 254274920 }, "url": "https://www.semanticscholar.org/paper/841f5c091ed8491d9fd50cf124de7c67d500bdb8", "referenceCount": 69, "citationCount": 27, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Visual Programming: Compositional visual reasoning without training", "abstract": "We present Visprog, a neuro-symbolic approach to solving complex and compositional visual tasks given natural language instructions. Visprog avoids the need for any task-specific training. Instead, it uses the incontext learning ability of large language models to generate python-like modular programs, which are then executed to get both the solution and a comprehensive and interpretable rationale. Each line of the generated program may invoke one of several off-the-shelf computer vision models, image processing subroutines, or python functions to produce intermediate outputs that may be consumed by subsequent parts of the program. We demonstrate the flexibility of VIsPROG on 4 diverse tasks - compositional visual question answering, zero-shot reasoning on image pairs, factual knowledge object tagging, and language-guided image editing. We believe neuro-symbolic approaches like Visprog are an exciting avenue to easily and effectively expand the scope of AI systems to serve the long tail of complex tasks that people may wish to perform.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Tanmay Gupta", "Aniruddha Kembhavi" ], "externalIds": { "DBLP": "conf/cvpr/GuptaK23", "ArXiv": "2211.11559", "DOI": "10.1109/CVPR52729.2023.01436", "CorpusId": 253734854 }, "url": "https://www.semanticscholar.org/paper/af1c871282ec122869d03f5420ef5d9143358a91", "referenceCount": 41, "citationCount": 275, "influentialCitationCount": 39, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "InstructPix2Pix: Learning to Follow Image Editing Instructions", "abstract": "We propose a method for editing images from human instructions: given an input image and a written instruction that tells the model what to do, our model follows these instructions to edit the image. To obtain training data for this problem, we combine the knowledge of two large pretrained models—a language model (GPT-3) and a text-to-image model (Stable Diffusion)—to generate a large dataset of image editing examples. Our conditional diffusion model, InstructPix2Pix, is trained on our generated data, and generalizes to real images and user-written instructions at inference time. Since it performs edits in the forward pass and does not require per-example fine-tuning or inversion, our model edits images quickly, in a matter of seconds. We show compelling editing results for a diverse collection of input images and written instructions.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Tim Brooks", "Aleksander Holynski", "Alexei A. Efros" ], "externalIds": { "DBLP": "conf/cvpr/BrooksHE23", "ArXiv": "2211.09800", "DOI": "10.1109/CVPR52729.2023.01764", "CorpusId": 253581213 }, "url": "https://www.semanticscholar.org/paper/a2d2bbe4c542173662a444b33b76c66992697830", "referenceCount": 74, "citationCount": 1074, "influentialCitationCount": 204, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Is synthetic data from generative models ready for image recognition?", "abstract": "Recent text-to-image generation models have shown promising results in generating high-fidelity photo-realistic images. Though the results are astonishing to human eyes, how applicable these generated images are for recognition tasks remains under-explored. In this work, we extensively study whether and how synthetic images generated from state-of-the-art text-to-image generation models can be used for image recognition tasks, and focus on two perspectives: synthetic data for improving classification models in data-scarce settings (i.e. zero-shot and few-shot), and synthetic data for large-scale model pre-training for transfer learning. We showcase the powerfulness and shortcomings of synthetic data from existing generative models, and propose strategies for better applying synthetic data for recognition tasks. Code: https://github.com/CVMI-Lab/SyntheticData.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Ruifei He", "Shuyang Sun", "Xin Yu", "Chuhui Xue", "Wenqing Zhang", "Philip H. S. Torr", "Song Bai", "Xiaojuan Qi" ], "externalIds": { "DBLP": "journals/corr/abs-2210-07574", "ArXiv": "2210.07574", "DOI": "10.48550/arXiv.2210.07574", "CorpusId": 252907242 }, "url": "https://www.semanticscholar.org/paper/2a29e1bcbed17c588ffbae1fea2af3baaab924b8", "referenceCount": 77, "citationCount": 209, "influentialCitationCount": 30, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Discovering Bugs in Vision Models using Off-the-shelf Image Generation and Captioning", "abstract": "Automatically discovering failures in vision models under real-world settings remains an open challenge. This work demonstrates how off-the-shelf, large-scale, image-to-text and text-to-image models, trained on vast amounts of data, can be leveraged to automatically find such failures. In essence, a conditional text-to-image generative model is used to generate large amounts of synthetic, yet realistic, inputs given a ground-truth label. Misclassified inputs are clustered and a captioning model is used to describe each cluster. Each cluster's description is used in turn to generate more inputs and assess whether specific clusters induce more failures than expected. We use this pipeline to demonstrate that we can effectively interrogate classifiers trained on ImageNet to find specific failure cases and discover spurious correlations. We also show that we can scale the approach to generate adversarial datasets targeting specific classifier architectures. This work serves as a proof-of-concept demonstrating the utility of large-scale generative models to automatically discover bugs in vision models in an open-ended manner. We also describe a number of limitations and pitfalls related to this approach.", "year": 2022, "venue": "arXiv.org", "authors": [ "Olivia Wiles", "Isabela Albuquerque", "Sven Gowal" ], "externalIds": { "ArXiv": "2208.08831", "DBLP": "journals/corr/abs-2208-08831", "DOI": "10.48550/arXiv.2208.08831", "CorpusId": 251643511 }, "url": "https://www.semanticscholar.org/paper/0ed568b25c1431a7a22a9c64ef2dcff638e3cb04", "referenceCount": 73, "citationCount": 35, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "DataPerf: Benchmarks for Data-Centric AI Development", "abstract": "Machine learning research has long focused on models rather than datasets, and prominent datasets are used for common ML tasks without regard to the breadth, difficulty, and faithfulness of the underlying problems. Neglecting the fundamental importance of data has given rise to inaccuracy, bias, and fragility in real-world applications, and research is hindered by saturation across existing dataset benchmarks. In response, we present DataPerf, a community-led benchmark suite for evaluating ML datasets and data-centric algorithms. We aim to foster innovation in data-centric AI through competition, comparability, and reproducibility. We enable the ML community to iterate on datasets, instead of just architectures, and we provide an open, online platform with multiple rounds of challenges to support this iterative development. The first iteration of DataPerf contains five benchmarks covering a wide spectrum of data-centric techniques, tasks, and modalities in vision, speech, acquisition, debugging, and diffusion prompting, and we support hosting new contributed benchmarks from the community. The benchmarks, online evaluation platform, and baseline implementations are open source, and the MLCommons Association will maintain DataPerf to ensure long-term benefits to academia and industry.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Mark Mazumder", "Colby R. Banbury", "Xiaozhe Yao", "Bojan Karlavs", "W. G. Rojas", "Sudnya Diamos", "G. Diamos", "Lynn He", "Douwe Kiela", "David Jurado", "David Kanter", "Rafael Mosquera", "Juan Ciro", "Lora Aroyo", "Bilge Acun", "Sabri Eyuboglu", "Amirata Ghorbani", "E. Goodman", "Tariq Kane", "Christine R. Kirkpatrick", "Tzu-Sheng Kuo", "Jonas W. Mueller", "Tristan Thrush", "J. Vanschoren", "Margaret J. Warren", "Adina Williams", "Serena Yeung", "Newsha Ardalani", "Praveen K. Paritosh", "Ce Zhang", "James Y. Zou", "Carole-Jean Wu", "Cody Coleman", "Andrew Y. Ng", "Peter Mattson", "V. Reddi" ], "externalIds": { "DBLP": "conf/nips/MazumderBYKRDDH23", "ArXiv": "2207.10062", "DOI": "10.48550/arXiv.2207.10062", "CorpusId": 250699092 }, "url": "https://www.semanticscholar.org/paper/78040774044769c21e1dd7494898f629a62524cc", "referenceCount": 92, "citationCount": 75, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Improving Robustness to Texture Bias via Shape-focused Augmentation", "abstract": "Despite significant progress of deep neural networks in image classification, it has been reported that CNNs trained on ImageNet have heavily focused on local texture information, rather than capturing complex visual concepts of the objects. To delve into this phenomenon, recent studies proposed to generate images with modified texture information for training the model. However, these methods largely sacrifice the classification accuracy on the in-domain dataset while achieving improved performance on the out-of-distribution dataset. Motivated by the fact that human tends to focus on shape information, we aim to resolve this issue by proposing a shape-focused augmentation where the texture in the object’s foreground and background are separately changed. Key idea is that by applying different modifications to the inside and outside of an object, not only the bias toward texture is reduced but also the model is induced to focus on shape. Experiments show that the proposed method successfully reduces texture bias and also improves the classification performance on the original dataset.", "year": 2022, "venue": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "authors": [ "Sangjun Lee", "Inwoo Hwang", "Gi-Cheon Kang", "Byoung-Tak Zhang", "Ai" ], "externalIds": { "DBLP": "conf/cvpr/LeeHKZ22", "DOI": "10.1109/CVPRW56347.2022.00478", "CorpusId": 251034466 }, "url": "https://www.semanticscholar.org/paper/7fee1380b3a3e883ff59d5828232c514474c62eb", "referenceCount": 20, "citationCount": 10, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "High-Resolution Image Synthesis with Latent Diffusion Models", "abstract": "By decomposing the image formation process into a sequential application of denoising autoencoders, diffusion models (DMs) achieve state-of-the-art synthesis results on image data and beyond. Additionally, their formulation allows for a guiding mechanism to control the image generation process without retraining. However, since these models typically operate directly in pixel space, optimization of powerful DMs often consumes hundreds of GPU days and inference is expensive due to sequential evaluations. To enable DM training on limited computational resources while retaining their quality and flexibility, we apply them in the latent space of powerful pretrained autoencoders. In contrast to previous work, training diffusion models on such a representation allows for the first time to reach a near-optimal point between complexity reduction and detail preservation, greatly boosting visual fidelity. By introducing cross-attention layers into the model architecture, we turn diffusion models into powerful and flexible generators for general conditioning inputs such as text or bounding boxes and high-resolution synthesis becomes possible in a convolutional manner. Our latent diffusion models (LDMs) achieve new state of the art scores for image inpainting and class-conditional image synthesis and highly competitive performance on various tasks, including unconditional image generation, text-to-image synthesis, and super-resolution, while significantly reducing computational requirements compared to pixel-based DMs.", "year": 2021, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Robin Rombach", "A. Blattmann", "Dominik Lorenz", "Patrick Esser", "B. Ommer" ], "externalIds": { "ArXiv": "2112.10752", "DBLP": "journals/corr/abs-2112-10752", "DOI": "10.1109/CVPR52688.2022.01042", "CorpusId": 245335280 }, "url": "https://www.semanticscholar.org/paper/c10075b3746a9f3dd5811970e93c8ca3ad39b39d", "referenceCount": 110, "citationCount": 9847, "influentialCitationCount": 2744, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Object-aware Contrastive Learning for Debiased Scene Representation", "abstract": "Contrastive self-supervised learning has shown impressive results in learning visual representations from unlabeled images by enforcing invariance against different data augmentations. However, the learned representations are often contextually biased to the spurious scene correlations of different objects or object and background, which may harm their generalization on the downstream tasks. To tackle the issue, we develop a novel object-aware contrastive learning framework that first (a) localizes objects in a self-supervised manner and then (b) debias scene correlations via appropriate data augmentations considering the inferred object locations. For (a), we propose the contrastive class activation map (ContraCAM), which finds the most discriminative regions (e.g., objects) in the image compared to the other images using the contrastively trained models. We further improve the ContraCAM to detect multiple objects and entire shapes via an iterative refinement procedure. For (b), we introduce two data augmentations based on ContraCAM, object-aware random crop and background mixup, which reduce contextual and background biases during contrastive self-supervised learning, respectively. Our experiments demonstrate the effectiveness of our representation learning framework, particularly when trained under multi-object images or evaluated under the background (and distribution) shifted images.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Sangwoo Mo", "H. Kang", "Kihyuk Sohn", "Chun-Liang Li", "Jinwoo Shin" ], "externalIds": { "DBLP": "journals/corr/abs-2108-00049", "ArXiv": "2108.00049", "CorpusId": 236772573 }, "url": "https://www.semanticscholar.org/paper/37de04f1055d97acdec3d3710a8db219ba8e0273", "referenceCount": 86, "citationCount": 42, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Just Train Twice: Improving Group Robustness without Training Group Information", "abstract": "Standard training via empirical risk minimization (ERM) can produce models that achieve high accuracy on average but low accuracy on certain groups, especially in the presence of spurious correlations between the input and label. Prior approaches that achieve high worst-group accuracy, like group distributionally robust optimization (group DRO) require expensive group annotations for each training point, whereas approaches that do not use such group annotations typically achieve unsatisfactory worst-group accuracy. In this paper, we propose a simple two-stage approach, JTT, that first trains a standard ERM model for several epochs, and then trains a second model that upweights the training examples that the first model misclassified. Intuitively, this upweights examples from groups on which standard ERM models perform poorly, leading to improved worst-group performance. Averaged over four image classification and natural language processing tasks with spurious correlations, JTT closes 75% of the gap in worst-group accuracy between standard ERM and group DRO, while only requiring group annotations on a small validation set in order to tune hyperparameters.", "year": 2021, "venue": "International Conference on Machine Learning", "authors": [ "E. Liu", "Behzad Haghgoo", "Annie S. Chen", "Aditi Raghunathan", "Pang Wei Koh", "Shiori Sagawa", "Percy Liang", "Chelsea Finn" ], "externalIds": { "ArXiv": "2107.09044", "DBLP": "journals/corr/abs-2107-09044", "CorpusId": 235825419 }, "url": "https://www.semanticscholar.org/paper/216d093cb2ad81bf55c21dbce2217f2b9032e67b", "referenceCount": 63, "citationCount": 408, "influentialCitationCount": 120, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "A Survey on Bias in Visual Datasets", "abstract": null, "year": 2021, "venue": "Computer Vision and Image Understanding", "authors": [ "Simone Fabbrizzi", "S. Papadopoulos", "Eirini Ntoutsi", "Y. Kompatsiaris" ], "externalIds": { "ArXiv": "2107.07919", "DBLP": "journals/cviu/FabbrizziPNK22", "DOI": "10.1016/j.cviu.2022.103552", "CorpusId": 236033931 }, "url": "https://www.semanticscholar.org/paper/d4699efa686c7e046dfaf5c7261e6a48de523127", "referenceCount": 114, "citationCount": 87, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Finding and Fixing Spurious Patterns with Explanations", "abstract": "Image classifiers often use spurious patterns, such as\"relying on the presence of a person to detect a tennis racket, which do not generalize. In this work, we present an end-to-end pipeline for identifying and mitigating spurious patterns for such models, under the assumption that we have access to pixel-wise object-annotations. We start by identifying patterns such as\"the model's prediction for tennis racket changes 63% of the time if we hide the people.\"Then, if a pattern is spurious, we mitigate it via a novel form of data augmentation. We demonstrate that our method identifies a diverse set of spurious patterns and that it mitigates them by producing a model that is both more accurate on a distribution where the spurious pattern is not helpful and more robust to distribution shift.", "year": 2021, "venue": "Trans. Mach. Learn. Res.", "authors": [ "Gregory Plumb", "Marco Tulio Ribeiro", "Ameet Talwalkar" ], "externalIds": { "DBLP": "journals/tmlr/PlumbRT22", "ArXiv": "2106.02112", "CorpusId": 235352578 }, "url": "https://www.semanticscholar.org/paper/1e58f1e94a03ef6434ce5e3360781d546f8a2f5b", "referenceCount": 47, "citationCount": 37, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Does enhanced shape bias improve neural network robustness to common corruptions?", "abstract": "Convolutional neural networks (CNNs) learn to extract representations of complex features, such as object shapes and textures to solve image recognition tasks. Recent work indicates that CNNs trained on ImageNet are biased towards features that encode textures and that these alone are sufficient to generalize to unseen test data from the same distribution as the training data but often fail to generalize to out-of-distribution data. It has been shown that augmenting the training data with different image styles decreases this texture bias in favor of increased shape bias while at the same time improving robustness to common corruptions, such as noise and blur. Commonly, this is interpreted as shape bias increasing corruption robustness. However, this relationship is only hypothesized. We perform a systematic study of different ways of composing inputs based on natural images, explicit edge information, and stylization. While stylization is essential for achieving high corruption robustness, we do not find a clear correlation between shape bias and robustness. We conclude that the data augmentation caused by style-variation accounts for the improved corruption robustness and increased shape bias is only a byproduct.", "year": 2021, "venue": "International Conference on Learning Representations", "authors": [ "Chaithanya Kumar Mummadi", "Ranjitha Subramaniam", "Robin Hutmacher", "Julien Vitay", "Volker Fischer", "J. H. Metzen" ], "externalIds": { "ArXiv": "2104.09789", "DBLP": "journals/corr/abs-2104-09789", "CorpusId": 233306976 }, "url": "https://www.semanticscholar.org/paper/3797437ea7990e99cbba6e94d402650a842ba738", "referenceCount": 39, "citationCount": 33, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "When Pigs Fly: Contextual Reasoning in Synthetic and Natural Scenes", "abstract": "Context is of fundamental importance to both human and machine vision; e.g., an object in the air is more likely to be an airplane than a pig. The rich notion of context incorporates several aspects including physics rules, statistical co-occurrences, and relative object sizes, among others. While previous work has focused on crowd-sourced out-of-context photographs from the web to study scene context, controlling the nature and extent of contextual violations has been a daunting task. Here we introduce a diverse, synthetic Out-of-Context Dataset (OCD) with fine-grained control over scene context. By leveraging a 3D simulation engine, we systematically control the gravity, object co-occurrences and relative sizes across 36 object categories in a virtual household environment. We conducted a series of experiments to gain insights into the impact of contextual cues on both human and machine vision using OCD. We conducted psychophysics experiments to establish a human benchmark for out-of-context recognition, and then compared it with state-of-the-art computer vision models to quantify the gap between the two. We propose a context-aware recognition transformer model, fusing object and contextual information via multi-head attention. Our model captures useful information for contextual reasoning, enabling human-level performance and better robustness in out-of-context conditions compared to baseline models across OCD and other out-of-context datasets. All source code and data are publicly available at https://github.com/kreimanlab/WhenPigsFlyContext", "year": 2021, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Philipp Bomatter", "Mengmi Zhang", "D. Karev", "Spandan Madan", "Claire Tseng", "Gabriel Kreiman" ], "externalIds": { "DBLP": "journals/corr/abs-2104-02215", "ArXiv": "2104.02215", "DOI": "10.1109/ICCV48922.2021.00032", "CorpusId": 233033825, "PubMed": "36051852" }, "url": "https://www.semanticscholar.org/paper/ec23ec36e724f87683f162b6f86e655973063787", "referenceCount": 43, "citationCount": 23, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Learning Transferable Visual Models From Natural Language Supervision", "abstract": "State-of-the-art computer vision systems are trained to predict a fixed set of predetermined object categories. This restricted form of supervision limits their generality and usability since additional labeled data is needed to specify any other visual concept. Learning directly from raw text about images is a promising alternative which leverages a much broader source of supervision. We demonstrate that the simple pre-training task of predicting which caption goes with which image is an efficient and scalable way to learn SOTA image representations from scratch on a dataset of 400 million (image, text) pairs collected from the internet. After pre-training, natural language is used to reference learned visual concepts (or describe new ones) enabling zero-shot transfer of the model to downstream tasks. We study the performance of this approach by benchmarking on over 30 different existing computer vision datasets, spanning tasks such as OCR, action recognition in videos, geo-localization, and many types of fine-grained object classification. The model transfers non-trivially to most tasks and is often competitive with a fully supervised baseline without the need for any dataset specific training. For instance, we match the accuracy of the original ResNet-50 on ImageNet zero-shot without needing to use any of the 1.28 million training examples it was trained on. We release our code and pre-trained model weights at https://github.com/OpenAI/CLIP.", "year": 2021, "venue": "International Conference on Machine Learning", "authors": [ "Alec Radford", "Jong Wook Kim", "Chris Hallacy", "A. Ramesh", "Gabriel Goh", "Sandhini Agarwal", "Girish Sastry", "Amanda Askell", "Pamela Mishkin", "Jack Clark", "Gretchen Krueger", "I. Sutskever" ], "externalIds": { "ArXiv": "2103.00020", "DBLP": "conf/icml/RadfordKHRGASAM21", "CorpusId": 231591445 }, "url": "https://www.semanticscholar.org/paper/6f870f7f02a8c59c3e23f407f3ef00dd1dcf8fc4", "referenceCount": 220, "citationCount": 18886, "influentialCitationCount": 5013, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "No Subclass Left Behind: Fine-Grained Robustness in Coarse-Grained Classification Problems", "abstract": "In real-world classification tasks, each class often comprises multiple finer-grained \"subclasses.\" As the subclass labels are frequently unavailable, models trained using only the coarser-grained class labels often exhibit highly variable performance across different subclasses. This phenomenon, known as hidden stratification, has important consequences for models deployed in safety-critical applications such as medicine. We propose GEORGE, a method to both measure and mitigate hidden stratification even when subclass labels are unknown. We first observe that unlabeled subclasses are often separable in the feature space of deep models, and exploit this fact to estimate subclass labels for the training data via clustering techniques. We then use these approximate subclass labels as a form of noisy supervision in a distributionally robust optimization objective. We theoretically characterize the performance of GEORGE in terms of the worst-case generalization error across any subclass. We empirically validate GEORGE on a mix of real-world and benchmark image classification datasets, and show that our approach boosts worst-case subclass accuracy by up to 22 percentage points compared to standard training techniques, without requiring any information about the subclasses.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "N. Sohoni", "Jared A. Dunnmon", "Geoffrey Angus", "Albert Gu", "C. Ré" ], "externalIds": { "ArXiv": "2011.12945", "MAG": "3100350094", "DBLP": "conf/nips/SohoniDAGR20", "CorpusId": 222007674 }, "url": "https://www.semanticscholar.org/paper/8c96b865bbe1f597cf2c644e20ae46eab8e7caad", "referenceCount": 60, "citationCount": 196, "influentialCitationCount": 31, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale", "abstract": "While the Transformer architecture has become the de-facto standard for natural language processing tasks, its applications to computer vision remain limited. In vision, attention is either applied in conjunction with convolutional networks, or used to replace certain components of convolutional networks while keeping their overall structure in place. We show that this reliance on CNNs is not necessary and a pure transformer applied directly to sequences of image patches can perform very well on image classification tasks. When pre-trained on large amounts of data and transferred to multiple mid-sized or small image recognition benchmarks (ImageNet, CIFAR-100, VTAB, etc.), Vision Transformer (ViT) attains excellent results compared to state-of-the-art convolutional networks while requiring substantially fewer computational resources to train.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Alexey Dosovitskiy", "Lucas Beyer", "Alexander Kolesnikov", "Dirk Weissenborn", "Xiaohua Zhai", "Thomas Unterthiner", "Mostafa Dehghani", "Matthias Minderer", "G. Heigold", "S. Gelly", "Jakob Uszkoreit", "N. Houlsby" ], "externalIds": { "MAG": "3094502228", "ArXiv": "2010.11929", "DBLP": "conf/iclr/DosovitskiyB0WZ21", "CorpusId": 225039882 }, "url": "https://www.semanticscholar.org/paper/268d347e8a55b5eb82fb5e7d2f800e33c75ab18a", "referenceCount": 65, "citationCount": 28233, "influentialCitationCount": 4121, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Mitigating Gender Bias in Captioning Systems", "abstract": "Image captioning has made substantial progress with huge supporting image collections sourced from the web. However, recent studies have pointed out that captioning datasets, such as COCO, contain gender bias found in web corpora. As a result, learning models could heavily rely on the learned priors and image context for gender identification, leading to incorrect or even offensive errors. To encourage models to learn correct gender features, we reorganize the COCO dataset and present two new splits COCO-GB V1 and V2 datasets where the train and test sets have different gender-context joint distribution. Models relying on contextual cues will suffer from huge gender prediction errors on the anti-stereotypical test data. Benchmarking experiments reveal that most captioning models learn gender bias, leading to high gender prediction errors, especially for women. To alleviate the unwanted bias, we propose a new Guided Attention Image Captioning model (GAIC) which provides self-guidance on visual attention to encourage the model to capture correct gender visual evidence. Experimental results validate that GAIC can significantly reduce gender prediction errors with a competitive caption quality. Our codes and the designed benchmark datasets are available at https://github.com/datamllab/Mitigating_Gender_Bias_In_Captioning_System.", "year": 2020, "venue": "The Web Conference", "authors": [ "Ruixiang Tang", "Mengnan Du", "Yuening Li", "Zirui Liu", "Xia Hu" ], "externalIds": { "DBLP": "conf/www/TangDLLZH21", "ArXiv": "2006.08315", "MAG": "3034651947", "DOI": "10.1145/3442381.3449950", "CorpusId": 219687817 }, "url": "https://www.semanticscholar.org/paper/922ca07d02384c9b8e807912f3b64e48c36a2357", "referenceCount": 50, "citationCount": 54, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Language Models are Few-Shot Learners", "abstract": "Recent work has demonstrated substantial gains on many NLP tasks and benchmarks by pre-training on a large corpus of text followed by fine-tuning on a specific task. While typically task-agnostic in architecture, this method still requires task-specific fine-tuning datasets of thousands or tens of thousands of examples. By contrast, humans can generally perform a new language task from only a few examples or from simple instructions - something which current NLP systems still largely struggle to do. Here we show that scaling up language models greatly improves task-agnostic, few-shot performance, sometimes even reaching competitiveness with prior state-of-the-art fine-tuning approaches. Specifically, we train GPT-3, an autoregressive language model with 175 billion parameters, 10x more than any previous non-sparse language model, and test its performance in the few-shot setting. For all tasks, GPT-3 is applied without any gradient updates or fine-tuning, with tasks and few-shot demonstrations specified purely via text interaction with the model. GPT-3 achieves strong performance on many NLP datasets, including translation, question-answering, and cloze tasks, as well as several tasks that require on-the-fly reasoning or domain adaptation, such as unscrambling words, using a novel word in a sentence, or performing 3-digit arithmetic. At the same time, we also identify some datasets where GPT-3's few-shot learning still struggles, as well as some datasets where GPT-3 faces methodological issues related to training on large web corpora. Finally, we find that GPT-3 can generate samples of news articles which human evaluators have difficulty distinguishing from articles written by humans. We discuss broader societal impacts of this finding and of GPT-3 in general.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Tom B. Brown", "Benjamin Mann", "Nick Ryder", "Melanie Subbiah", "J. Kaplan", "Prafulla Dhariwal", "Arvind Neelakantan", "Pranav Shyam", "Girish Sastry", "Amanda Askell", "Sandhini Agarwal", "Ariel Herbert-Voss", "Gretchen Krueger", "T. Henighan", "R. Child", "A. Ramesh", "Daniel M. Ziegler", "Jeff Wu", "Clemens Winter", "Christopher Hesse", "Mark Chen", "Eric Sigler", "Ma-teusz Litwin", "Scott Gray", "B. Chess", "Jack Clark", "Christopher Berner", "Sam McCandlish", "Alec Radford", "I. Sutskever", "Dario Amodei" ], "externalIds": { "ArXiv": "2005.14165", "DBLP": "conf/nips/BrownMRSKDNSSAA20", "MAG": "3030163527", "CorpusId": 218971783 }, "url": "https://www.semanticscholar.org/paper/90abbc2cf38462b954ae1b772fac9532e2ccd8b0", "referenceCount": 146, "citationCount": 30854, "influentialCitationCount": 3529, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Distributionally Robust Neural Networks", "abstract": "Overparameterized neural networks trained to minimize average loss can be highly accurate on average on an i.i.d. test set, yet consistently fail on atypical groups of the data (e.g., by learning spurious correlations that do not hold at test time). Distributionally robust optimization (DRO) provides an approach for learning models that instead minimize worst-case training loss over a set of pre-defined groups. We find, however, that naively applying DRO to overparameterized neural networks fails: these models can perfectly fit the training data, and any model with vanishing average training loss will also already have vanishing worst-case training loss. Instead, the poor worst-case performance of these models arises from poor generalization on some groups. As a solution, we show that increased regularization---e.g., stronger-than-typical weight decay or early stopping---allows DRO models to achieve substantially higher worst-group accuracies, with 10% to 40% improvements over standard models on a natural language inference task and two image tasks, while maintaining high average accuracies. Our results suggest that regularization is critical for worst-group performance in the overparameterized regime, even if it is not needed for average performance. Finally, we introduce and provide convergence guarantees for a stochastic optimizer for this group DRO setting, underpinning the empirical study above.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Shiori Sagawa", "Pang Wei Koh", "Tatsunori B. Hashimoto", "Percy Liang" ], "externalIds": { "MAG": "2995793065", "DBLP": "conf/iclr/SagawaKHL20", "CorpusId": 213662188 }, "url": "https://www.semanticscholar.org/paper/5d0e2635a1ebe2c9347529975bc876d4286c9ab7", "referenceCount": 56, "citationCount": 455, "influentialCitationCount": 111, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "REVISE: A Tool for Measuring and Mitigating Bias in Visual Datasets", "abstract": null, "year": 2020, "venue": "International Journal of Computer Vision", "authors": [ "Angelina Wang", "Alexander Liu", "Ryan Zhang", "Anat Kleiman", "Leslie Kim", "Dora Zhao", "Iroha Shirai", "Arvind Narayanan", "Olga Russakovsky" ], "externalIds": { "DBLP": "journals/ijcv/WangLZKKZSNR22", "ArXiv": "2004.07999", "MAG": "3048427567", "DOI": "10.1007/s11263-022-01625-5", "CorpusId": 221112230 }, "url": "https://www.semanticscholar.org/paper/fa2b8b28f0b00a4cb31a51a3b47f13ba55a1623a", "referenceCount": 120, "citationCount": 156, "influentialCitationCount": 6, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Shortcut learning in deep neural networks", "abstract": null, "year": 2020, "venue": "Nature Machine Intelligence", "authors": [ "Robert Geirhos", "J. Jacobsen", "Claudio Michaelis", "R. Zemel", "Wieland Brendel", "M. Bethge", "Felix Wichmann" ], "externalIds": { "DBLP": "journals/natmi/GeirhosJMZBBW20", "ArXiv": "2004.07780", "MAG": "3016970897", "DOI": "10.1038/s42256-020-00257-z", "CorpusId": 215786368 }, "url": "https://www.semanticscholar.org/paper/1b04936c2599e59b120f743fbb30df2eed3fd782", "referenceCount": 167, "citationCount": 1664, "influentialCitationCount": 90, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Biology" ] }, { "title": "Don’t Judge an Object by Its Context: Learning to Overcome Contextual Bias", "abstract": "Existing models often leverage co-occurrences between objects and their context to improve recognition accuracy. However, strongly relying on context risks a model's generalizability, especially when typical co-occurrence patterns are absent. This work focuses on addressing such contextual biases to improve the robustness of the learnt feature representations. Our goal is to accurately recognize a category in the absence of its context, without compromising on performance when it co-occurs with context. Our key idea is to decorrelate feature representations of a category from its co-occurring context. We achieve this by learning a feature subspace that explicitly represents categories occurring in the absence of context along side a joint feature subspace that represents both categories and context. Our very simple yet effective method is extensible to two multi-label tasks -- object and attribute classification. On 4 challenging datasets, we demonstrate the effectiveness of our method in reducing contextual bias.", "year": 2020, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Krishna Kumar Singh", "D. Mahajan", "K. Grauman", "Yong Jae Lee", "Matt Feiszli", "Deepti Ghadiyaram" ], "externalIds": { "MAG": "3034410824", "DBLP": "journals/corr/abs-2001-03152", "ArXiv": "2001.03152", "DOI": "10.1109/cvpr42600.2020.01108", "CorpusId": 210116489 }, "url": "https://www.semanticscholar.org/paper/20b0462acebf2bf859d14a53a020b21448cfbebf", "referenceCount": 39, "citationCount": 100, "influentialCitationCount": 8, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Towards fairer datasets: filtering and balancing the distribution of the people subtree in the ImageNet hierarchy", "abstract": "Computer vision technology is being used by many but remains representative of only a few. People have reported misbehavior of computer vision models, including offensive prediction results and lower performance for underrepresented groups. Current computer vision models are typically developed using datasets consisting of manually annotated images or videos; the data and label distributions in these datasets are critical to the models' behavior. In this paper, we examine ImageNet, a large-scale ontology of images that has spurred the development of many modern computer vision methods. We consider three key factors within the person subtree of ImageNet that may lead to problematic behavior in downstream computer vision technology: (1) the stagnant concept vocabulary of WordNet, (2) the attempt at exhaustive illustration of all categories with images, and (3) the inequality of representation in the images within concepts. We seek to illuminate the root causes of these concerns and take the first steps to mitigate them constructively.", "year": 2019, "venue": "FAT*", "authors": [ "Kaiyu Yang", "Klint Qinami", "Li Fei-Fei", "Jia Deng", "Olga Russakovsky" ], "externalIds": { "MAG": "2996535895", "DBLP": "journals/corr/abs-1912-07726", "ArXiv": "1912.07726", "DOI": "10.1145/3351095.3375709", "CorpusId": 209386709 }, "url": "https://www.semanticscholar.org/paper/d2e9e6991e526a5ec130ea07efab354398faec70", "referenceCount": 86, "citationCount": 283, "influentialCitationCount": 22, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "PyTorch: An Imperative Style, High-Performance Deep Learning Library", "abstract": "Deep learning frameworks have often focused on either usability or speed, but not both. PyTorch is a machine learning library that shows that these two goals are in fact compatible: it was designed from first principles to support an imperative and Pythonic programming style that supports code as a model, makes debugging easy and is consistent with other popular scientific computing libraries, while remaining efficient and supporting hardware accelerators such as GPUs. In this paper, we detail the principles that drove the implementation of PyTorch and how they are reflected in its architecture. We emphasize that every aspect of PyTorch is a regular Python program under the full control of its user. We also explain how the careful and pragmatic implementation of the key components of its runtime enables them to work together to achieve compelling performance. We demonstrate the efficiency of individual subsystems, as well as the overall speed of PyTorch on several commonly used benchmarks.", "year": 2019, "venue": "Neural Information Processing Systems", "authors": [ "Adam Paszke", "Sam Gross", "Francisco Massa", "Adam Lerer", "James Bradbury", "Gregory Chanan", "Trevor Killeen", "Zeming Lin", "N. Gimelshein", "L. Antiga", "Alban Desmaison", "Andreas Köpf", "E. Yang", "Zach DeVito", "Martin Raison", "Alykhan Tejani", "Sasank Chilamkurthy", "Benoit Steiner", "Lu Fang", "Junjie Bai", "Soumith Chintala" ], "externalIds": { "MAG": "2970971581", "DBLP": "journals/corr/abs-1912-01703", "ArXiv": "1912.01703", "CorpusId": 202786778 }, "url": "https://www.semanticscholar.org/paper/3c8a456509e6c0805354bd40a35e3f2dbf8069b1", "referenceCount": 39, "citationCount": 36158, "influentialCitationCount": 3694, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Confident Learning: Estimating Uncertainty in Dataset Labels", "abstract": "Learning exists in the context of data, yet notions of confidence typically focus on model predictions, not label quality. Confident learning (CL) is an alternative approach which focuses instead on label quality by characterizing and identifying label errors in datasets, based on the principles of pruning noisy data, counting with probabilistic thresholds to estimate noise, and ranking examples to train with confidence. Whereas numerous studies have developed these principles independently, here, we combine them, building on the assumption of a class-conditional noise process to directly estimate the joint distribution between noisy (given) labels and uncorrupted (unknown) labels. This results in a generalized CL which is provably consistent and experimentally performant. We present sufficient conditions where CL exactly finds label errors, and show CL performance exceeding seven recent competitive approaches for learning with noisy labels on the CIFAR dataset. Uniquely, the CL framework is not coupled to a specific data modality or model (e.g., we use CL to find several label errors in the presumed error-free MNIST dataset and improve sentiment classification on text data in Amazon Reviews). We also employ CL on ImageNet to quantify ontological class overlap (e.g., estimating 645 missile images are mislabeled as their parent class projectile), and moderately increase model accuracy (e.g., for ResNet) by cleaning data prior to training. These results are replicable using the open-source cleanlab release.", "year": 2019, "venue": "Journal of Artificial Intelligence Research", "authors": [ "Curtis G. Northcutt", "Lu Jiang", "Isaac L. Chuang" ], "externalIds": { "DBLP": "journals/corr/abs-1911-00068", "MAG": "2988175183", "ArXiv": "1911.00068", "DOI": "10.1613/jair.1.12125", "CorpusId": 207870256 }, "url": "https://www.semanticscholar.org/paper/cbaaa1154c491f9da2f050d3c22970e15bb7b52b", "referenceCount": 78, "citationCount": 568, "influentialCitationCount": 85, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Randaugment: Practical automated data augmentation with a reduced search space", "abstract": "Recent work on automated augmentation strategies has led to state-of-the-art results in image classification and object detection. An obstacle to a large-scale adoption of these methods is that they require a separate and expensive search phase. A common way to overcome the expense of the search phase was to use a smaller proxy task. However, it was not clear if the optimized hyperparameters found on the proxy task are also optimal for the actual task. In this work, we rethink the process of designing automated augmentation strategies. We find that while previous work required a search for both magnitude and probability of each operation independently, it is sufficient to only search for a single distortion magnitude that jointly controls all operations. We hence propose a simplified search space that vastly reduces the computational expense of automated augmentation, and permits the removal of a separate proxy task.Despite the simplifications, our method achieves equal or better performance over previous automated augmentation strategies on on CIFAR-10/100, SVHN, ImageNet and COCO datasets. EfficientNet-B7, we achieve 85.0% accuracy, a 1.0% increase over baseline augmentation, a 0.6% improvement over AutoAugment on the ImageNet dataset. With EfficientNet-B8, we achieve 85.4% accuracy on ImageNet, which matches a previous result that used 3.5B extra images. On object detection, the same method as classification leads to 1.0-1.3% improvement over baseline augmentation. Code will be made available online.", "year": 2019, "venue": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "authors": [ "E. D. Cubuk", "Barret Zoph", "Jonathon Shlens", "Quoc V. Le" ], "externalIds": { "MAG": "3102631365", "ArXiv": "1909.13719", "DBLP": "conf/cvpr/CubukZSL20", "DOI": "10.1109/CVPRW50498.2020.00359", "CorpusId": 208006202 }, "url": "https://www.semanticscholar.org/paper/87f6a7c014ce206ac5b57299c07e10667d194b39", "referenceCount": 61, "citationCount": 2997, "influentialCitationCount": 452, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Survey on Bias and Fairness in Machine Learning", "abstract": "With the widespread use of artificial intelligence (AI) systems and applications in our everyday lives, accounting for fairness has gained significant importance in designing and engineering of such systems. AI systems can be used in many sensitive environments to make important and life-changing decisions; thus, it is crucial to ensure that these decisions do not reflect discriminatory behavior toward certain groups or populations. More recently some work has been developed in traditional machine learning and deep learning that address such challenges in different subdomains. With the commercialization of these systems, researchers are becoming more aware of the biases that these applications can contain and are attempting to address them. In this survey, we investigated different real-world applications that have shown biases in various ways, and we listed different sources of biases that can affect AI applications. We then created a taxonomy for fairness definitions that machine learning researchers have defined to avoid the existing bias in AI systems. In addition to that, we examined different domains and subdomains in AI showing what researchers have observed with regard to unfair outcomes in the state-of-the-art methods and ways they have tried to address them. There are still many future directions and solutions that can be taken to mitigate the problem of bias in AI systems. We are hoping that this survey will motivate researchers to tackle these issues in the near future by observing existing work in their respective fields.", "year": 2019, "venue": "ACM Computing Surveys", "authors": [ "Ninareh Mehrabi", "Fred Morstatter", "N. Saxena", "Kristina Lerman", "A. Galstyan" ], "externalIds": { "ArXiv": "1908.09635", "MAG": "2969896603", "DBLP": "journals/csur/MehrabiMSLG21", "DOI": "10.1145/3457607", "CorpusId": 201666566 }, "url": "https://www.semanticscholar.org/paper/0090023afc66cd2741568599057f4e82b566137c", "referenceCount": 188, "citationCount": 3354, "influentialCitationCount": 221, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "CutMix: Regularization Strategy to Train Strong Classifiers With Localizable Features", "abstract": "Regional dropout strategies have been proposed to enhance performance of convolutional neural network classifiers. They have proved to be effective for guiding the model to attend on less discriminative parts of objects (e.g. leg as opposed to head of a person), thereby letting the network generalize better and have better object localization capabilities. On the other hand, current methods for regional dropout removes informative pixels on training images by overlaying a patch of either black pixels or random noise. Such removal is not desirable because it suffers from information loss causing inefficiency in training. We therefore propose the CutMix augmentation strategy: patches are cut and pasted among training images where the ground truth labels are also mixed proportionally to the area of the patches. By making efficient use of training pixels and retaining the regularization effect of regional dropout, CutMix consistently outperforms state-of-the-art augmentation strategies on CIFAR and ImageNet classification tasks, as well as on ImageNet weakly-supervised localization task. Moreover, unlike previous augmentation methods, our CutMix-trained ImageNet classifier, when used as a pretrained model, results in consistent performance gain in Pascal detection and MS-COCO image captioning benchmarks. We also show that CutMix can improve the model robustness against input corruptions and its out-of distribution detection performance.", "year": 2019, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Sangdoo Yun", "Dongyoon Han", "Seong Joon Oh", "Sanghyuk Chun", "Junsuk Choe", "Y. Yoo" ], "externalIds": { "DBLP": "journals/corr/abs-1905-04899", "ArXiv": "1905.04899", "MAG": "2992308087", "DOI": "10.1109/ICCV.2019.00612", "CorpusId": 152282661 }, "url": "https://www.semanticscholar.org/paper/ed17929e66da7f8fbc3666bf5eb613d302ddde0c", "referenceCount": 54, "citationCount": 4097, "influentialCitationCount": 695, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ImageNet-trained CNNs are biased towards texture; increasing shape bias improves accuracy and robustness", "abstract": "Convolutional Neural Networks (CNNs) are commonly thought to recognise objects by learning increasingly complex representations of object shapes. Some recent studies suggest a more important role of image textures. We here put these conflicting hypotheses to a quantitative test by evaluating CNNs and human observers on images with a texture-shape cue conflict. We show that ImageNet-trained CNNs are strongly biased towards recognising textures rather than shapes, which is in stark contrast to human behavioural evidence and reveals fundamentally different classification strategies. We then demonstrate that the same standard architecture (ResNet-50) that learns a texture-based representation on ImageNet is able to learn a shape-based representation instead when trained on \"Stylized-ImageNet\", a stylized version of ImageNet. This provides a much better fit for human behavioural performance in our well-controlled psychophysical lab setting (nine experiments totalling 48,560 psychophysical trials across 97 observers) and comes with a number of unexpected emergent benefits such as improved object detection performance and previously unseen robustness towards a wide range of image distortions, highlighting advantages of a shape-based representation.", "year": 2018, "venue": "International Conference on Learning Representations", "authors": [ "Robert Geirhos", "Patricia Rubisch", "Claudio Michaelis", "M. Bethge", "Felix Wichmann", "Wieland Brendel" ], "externalIds": { "ArXiv": "1811.12231", "DBLP": "journals/corr/abs-1811-12231", "MAG": "2902617128", "CorpusId": 54101493 }, "url": "https://www.semanticscholar.org/paper/0f50b7483f1b200ebf88c4dd7698de986399a0f3", "referenceCount": 44, "citationCount": 2422, "influentialCitationCount": 291, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Biology", "Mathematics" ] }, { "title": "Learning to Reweight Examples for Robust Deep Learning", "abstract": "Deep neural networks have been shown to be very powerful modeling tools for many supervised learning tasks involving complex input patterns. However, they can also easily overfit to training set biases and label noises. In addition to various regularizers, example reweighting algorithms are popular solutions to these problems, but they require careful tuning of additional hyperparameters, such as example mining schedules and regularization hyperparameters. In contrast to past reweighting methods, which typically consist of functions of the cost value of each example, in this work we propose a novel meta-learning algorithm that learns to assign weights to training examples based on their gradient directions. To determine the example weights, our method performs a meta gradient descent step on the current mini-batch example weights (which are initialized from zero) to minimize the loss on a clean unbiased validation set. Our proposed method can be easily implemented on any type of deep network, does not require any additional hyperparameter tuning, and achieves impressive performance on class imbalance and corrupted label problems where only a small amount of clean validation data is available.", "year": 2018, "venue": "International Conference on Machine Learning", "authors": [ "Mengye Ren", "Wenyuan Zeng", "Binh Yang", "R. Urtasun" ], "externalIds": { "MAG": "2795282075", "ArXiv": "1803.09050", "DBLP": "conf/icml/RenZYU18", "CorpusId": 4321928 }, "url": "https://www.semanticscholar.org/paper/c5420ef59d7508d82e53671b0d623027eb58e6ed", "referenceCount": 46, "citationCount": 1319, "influentialCitationCount": 165, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "YOLO9000: Better, Faster, Stronger", "abstract": "We introduce YOLO9000, a state-of-the-art, real-time object detection system that can detect over 9000 object categories. First we propose various improvements to the YOLO detection method, both novel and drawn from prior work. The improved model, YOLOv2, is state-of-the-art on standard detection tasks like PASCAL VOC and COCO. Using a novel, multi-scale training method the same YOLOv2 model can run at varying sizes, offering an easy tradeoff between speed and accuracy. At 67 FPS, YOLOv2 gets 76.8 mAP on VOC 2007. At 40 FPS, YOLOv2 gets 78.6 mAP, outperforming state-of-the-art methods like Faster RCNN with ResNet and SSD while still running significantly faster. Finally we propose a method to jointly train on object detection and classification. Using this method we train YOLO9000 simultaneously on the COCO detection dataset and the ImageNet classification dataset. Our joint training allows YOLO9000 to predict detections for object classes that dont have labelled detection data. We validate our approach on the ImageNet detection task. YOLO9000 gets 19.7 mAP on the ImageNet detection validation set despite only having detection data for 44 of the 200 classes. On the 156 classes not in COCO, YOLO9000 gets 16.0 mAP. YOLO9000 predicts detections for more than 9000 different object categories, all in real-time.", "year": 2016, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Joseph Redmon", "Ali Farhadi" ], "externalIds": { "MAG": "2951433694", "ArXiv": "1612.08242", "DBLP": "conf/cvpr/RedmonF17", "DOI": "10.1109/CVPR.2017.690", "CorpusId": 786357 }, "url": "https://www.semanticscholar.org/paper/7d39d69b23424446f0400ef603b2e3e22d0309d6", "referenceCount": 20, "citationCount": 14209, "influentialCitationCount": 1400, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Grad-CAM: Visual Explanations from Deep Networks via Gradient-Based Localization", "abstract": null, "year": 2016, "venue": "International Journal of Computer Vision", "authors": [ "Ramprasaath R. Selvaraju", "Abhishek Das", "Ramakrishna Vedantam", "Michael Cogswell", "Devi Parikh", "Dhruv Batra" ], "externalIds": { "MAG": "2962858109", "DBLP": "conf/iccv/SelvarajuCDVPB17", "ArXiv": "1610.02391", "DOI": "10.1007/s11263-019-01228-7", "CorpusId": 15019293 }, "url": "https://www.semanticscholar.org/paper/5582bebed97947a41e3ddd9bd1f284b73f1648c2", "referenceCount": 72, "citationCount": 16622, "influentialCitationCount": 1836, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep Residual Learning for Image Recognition", "abstract": "Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. We provide comprehensive empirical evidence showing that these residual networks are easier to optimize, and can gain accuracy from considerably increased depth. On the ImageNet dataset we evaluate residual nets with a depth of up to 152 layers - 8× deeper than VGG nets [40] but still having lower complexity. An ensemble of these residual nets achieves 3.57% error on the ImageNet test set. This result won the 1st place on the ILSVRC 2015 classification task. We also present analysis on CIFAR-10 with 100 and 1000 layers. The depth of representations is of central importance for many visual recognition tasks. Solely due to our extremely deep representations, we obtain a 28% relative improvement on the COCO object detection dataset. Deep residual nets are foundations of our submissions to ILSVRC & COCO 2015 competitions1, where we also won the 1st places on the tasks of ImageNet detection, ImageNet localization, COCO detection, and COCO segmentation.", "year": 2015, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Kaiming He", "X. Zhang", "Shaoqing Ren", "Jian Sun" ], "externalIds": { "DBLP": "conf/cvpr/HeZRS16", "MAG": "2949650786", "ArXiv": "1512.03385", "DOI": "10.1109/cvpr.2016.90", "CorpusId": 206594692 }, "url": "https://www.semanticscholar.org/paper/2c03df8b48bf3fa39054345bafabfeff15bfd11d", "referenceCount": 53, "citationCount": 172713, "influentialCitationCount": 28229, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep Learning Face Attributes in the Wild", "abstract": "Predicting face attributes in the wild is challenging due to complex face variations. We propose a novel deep learning framework for attribute prediction in the wild. It cascades two CNNs, LNet and ANet, which are fine-tuned jointly with attribute tags, but pre-trained differently. LNet is pre-trained by massive general object categories for face localization, while ANet is pre-trained by massive face identities for attribute prediction. This framework not only outperforms the state-of-the-art with a large margin, but also reveals valuable facts on learning face representation. (1) It shows how the performances of face localization (LNet) and attribute prediction (ANet) can be improved by different pre-training strategies. (2) It reveals that although the filters of LNet are fine-tuned only with image-level attribute tags, their response maps over entire images have strong indication of face locations. This fact enables training LNet for face localization with only image-level annotations, but without face bounding boxes or landmarks, which are required by all attribute recognition works. (3) It also demonstrates that the high-level hidden neurons of ANet automatically discover semantic concepts after pre-training with massive face identities, and such concepts are significantly enriched after fine-tuning with attribute tags. Each attribute can be well explained with a sparse linear combination of these concepts.", "year": 2014, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Ziwei Liu", "Ping Luo", "Xiaogang Wang", "Xiaoou Tang" ], "externalIds": { "MAG": "1834627138", "ArXiv": "1411.7766", "DBLP": "journals/corr/LiuLWT14", "DOI": "10.1109/ICCV.2015.425", "CorpusId": 459456 }, "url": "https://www.semanticscholar.org/paper/6424b69f3ff4d35249c0bb7ef912fbc2c86f4ff4", "referenceCount": 44, "citationCount": 7560, "influentialCitationCount": 1648, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Very Deep Convolutional Networks for Large-Scale Image Recognition", "abstract": "In this work we investigate the effect of the convolutional network depth on its accuracy in the large-scale image recognition setting. Our main contribution is a thorough evaluation of networks of increasing depth using an architecture with very small (3x3) convolution filters, which shows that a significant improvement on the prior-art configurations can be achieved by pushing the depth to 16-19 weight layers. These findings were the basis of our ImageNet Challenge 2014 submission, where our team secured the first and the second places in the localisation and classification tracks respectively. We also show that our representations generalise well to other datasets, where they achieve state-of-the-art results. We have made our two best-performing ConvNet models publicly available to facilitate further research on the use of deep visual representations in computer vision.", "year": 2014, "venue": "International Conference on Learning Representations", "authors": [ "K. Simonyan", "Andrew Zisserman" ], "externalIds": { "MAG": "2949429431", "ArXiv": "1409.1556", "DBLP": "journals/corr/SimonyanZ14a", "CorpusId": 14124313 }, "url": "https://www.semanticscholar.org/paper/eb42cf88027de515750f230b23b1a057dc782108", "referenceCount": 43, "citationCount": 93036, "influentialCitationCount": 13588, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Microsoft COCO: Common Objects in Context", "abstract": null, "year": 2014, "venue": "European Conference on Computer Vision", "authors": [ "Tsung-Yi Lin", "M. Maire", "Serge J. Belongie", "James Hays", "P. Perona", "Deva Ramanan", "Piotr Dollár", "C. L. Zitnick" ], "externalIds": { "ArXiv": "1405.0312", "DBLP": "conf/eccv/LinMBHPRDZ14", "MAG": "2952122856", "DOI": "10.1007/978-3-319-10602-1_48", "CorpusId": 14113767 }, "url": "https://www.semanticscholar.org/paper/71b7178df5d2b112d07e45038cb5637208659ff7", "referenceCount": 52, "citationCount": 38114, "influentialCitationCount": 6016, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Unbiased look at dataset bias", "abstract": "Datasets are an integral part of contemporary object recognition research. They have been the chief reason for the considerable progress in the field, not just as source of large amounts of training data, but also as means of measuring and comparing performance of competing algorithms. At the same time, datasets have often been blamed for narrowing the focus of object recognition research, reducing it to a single benchmark performance number. Indeed, some datasets, that started out as data capture efforts aimed at representing the visual world, have become closed worlds unto themselves (e.g. the Corel world, the Caltech-101 world, the PASCAL VOC world). With the focus on beating the latest benchmark numbers on the latest dataset, have we perhaps lost sight of the original purpose? The goal of this paper is to take stock of the current state of recognition datasets. We present a comparison study using a set of popular datasets, evaluated based on a number of criteria including: relative data bias, cross-dataset generalization, effects of closed-world assumption, and sample value. The experimental results, some rather surprising, suggest directions that can improve dataset collection as well as algorithm evaluation protocols. But more broadly, the hope is to stimulate discussion in the community regarding this very important, but largely neglected issue.", "year": 2011, "venue": "Computer Vision and Pattern Recognition", "authors": [ "A. Torralba", "Alexei A. Efros" ], "externalIds": { "DBLP": "conf/cvpr/TorralbaE11", "MAG": "2031342017", "DOI": "10.1109/CVPR.2011.5995347", "CorpusId": 2777306 }, "url": "https://www.semanticscholar.org/paper/0302bb2d5476540cfb21467473f5eca843caf90b", "referenceCount": 21, "citationCount": 2372, "influentialCitationCount": 144, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ImageNet: A large-scale hierarchical image database", "abstract": "The explosion of image data on the Internet has the potential to foster more sophisticated and robust models and algorithms to index, retrieve, organize and interact with images and multimedia data. But exactly how such data can be harnessed and organized remains a critical problem. We introduce here a new database called “ImageNet”, a large-scale ontology of images built upon the backbone of the WordNet structure. ImageNet aims to populate the majority of the 80,000 synsets of WordNet with an average of 500-1000 clean and full resolution images. This will result in tens of millions of annotated images organized by the semantic hierarchy of WordNet. This paper offers a detailed analysis of ImageNet in its current state: 12 subtrees with 5247 synsets and 3.2 million images in total. We show that ImageNet is much larger in scale and diversity and much more accurate than the current image datasets. Constructing such a large-scale database is a challenging task. We describe the data collection scheme with Amazon Mechanical Turk. Lastly, we illustrate the usefulness of ImageNet through three simple applications in object recognition, image classification and automatic object clustering. We hope that the scale, accuracy, diversity and hierarchical structure of ImageNet can offer unparalleled opportunities to researchers in the computer vision community and beyond.", "year": 2009, "venue": "2009 IEEE Conference on Computer Vision and Pattern Recognition", "authors": [ "Jia Deng", "Wei Dong", "R. Socher", "Li-Jia Li", "K. Li", "Li Fei-Fei" ], "externalIds": { "DBLP": "conf/cvpr/DengDSLL009", "MAG": "2108598243", "DOI": "10.1109/CVPR.2009.5206848", "CorpusId": 57246310 }, "url": "https://www.semanticscholar.org/paper/d2c733e34d48784a37d717fe43d9e93277a8c53e", "referenceCount": 27, "citationCount": 56678, "influentialCitationCount": 8947, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Object categorization using co-occurrence, location and appearance", "abstract": "In this work we introduce a novel approach to object categorization that incorporates two types of context-co-occurrence and relative location - with local appearance-based features. Our approach, named CoLA (for co-occurrence, location and appearance), uses a conditional random field (CRF) to maximize object label agreement according to both semantic and spatial relevance. We model relative location between objects using simple pairwise features. By vector quantizing this feature space, we learn a small set of prototypical spatial relationships directly from the data. We evaluate our results on two challenging datasets: PASCAL 2007 and MSRC. The results show that combining co-occurrence and spatial context improves accuracy in as many as half of the categories compared to using co-occurrence alone.", "year": 2008, "venue": "2008 IEEE Conference on Computer Vision and Pattern Recognition", "authors": [ "C. Galleguillos", "Andrew Rabinovich", "Serge J. Belongie" ], "externalIds": { "DBLP": "conf/cvpr/GalleguillosRB08", "MAG": "2160254296", "DOI": "10.1109/CVPR.2008.4587799", "CorpusId": 6060721 }, "url": "https://www.semanticscholar.org/paper/c7f4f5f81ec856891ace4a5bea16b1f082390fbb", "referenceCount": 30, "citationCount": 523, "influentialCitationCount": 23, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Causal Interventional Training for Image Recognition", "abstract": "Deep learning models often fit undesired dataset bias in training. In this paper, we formulate the bias using causal inference, which helps us uncover the ever-elusive causalities among the key factors in training, and thus pursue the desired causal effect without the bias. We start from revisiting the process of building a visual recognition system, and then propose a structural causal model (SCM) for the key variables involved in dataset collection and recognition model: object, common sense, bias, context, and label prediction. Based on the SCM, one can observe that there are “good” and “bad” biases. Intuitively, in the image where a car is driving on a high way in a desert, the “good” bias denoting the common-sense context is the highway, and the “bad” bias accounting for the noisy context factor is the desert. We tackle this problem with a novel causal interventional training (CIT) approach, where we control the observed context in each object class. We offer theoretical justifications for CIT and validate it with extensive classification experiments on CIFAR-10, CIFAR-100 and ImageNet, e.g., surpassing the standard deep neural networks ResNet-34 and ResNet-50, respectively, by 0.95% and 0.70% accuracies on the ImageNet. Our code is open-sourced on the GitHub https://github.com/qinwei-hfut/CIT.", "year": 2023, "venue": "IEEE transactions on multimedia", "authors": [ "Wei Qin", "Hanwang Zhang", "Richang Hong", "E. Lim", "Qianru Sun" ], "externalIds": { "DBLP": "journals/tmm/QinZHLS23", "DOI": "10.1109/TMM.2021.3136717", "CorpusId": 245358470 }, "url": "https://www.semanticscholar.org/paper/23c1273512df9eb5d60e7110a99f6798b09366e7", "referenceCount": 71, "citationCount": 20, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Author ' s personal copy The role of context in object recognition", "abstract": null, "year": 2007, "venue": "", "authors": [], "externalIds": { "DOI": "10.1007/1-4020-0613-6_15664", "CorpusId": 268375669 }, "url": "https://www.semanticscholar.org/paper/75dd15a6df37e0aa51d43bcda439cbb5c554811f", "referenceCount": 0, "citationCount": 37, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Bias-to-text Debiasing unknown visual biases through language interpretation", "abstract": null, "year": null, "venue": "arXiv preprint", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "CRoP: Context-wise Robust Static Human-Sensing Personalization": { "paper_title": "CRoP: Context-wise Robust Static Human-Sensing Personalization", "arxiv_id": "2409.17994v2", "keyword": "deep learning", "authors": [ "Sawinder Kaur", "Avery Gump", "Jingyu Xin", "Yi Xiao", "Harshit Sharma", "Nina R Benway", "Jonathan L Preston", "Asif Salekin" ], "references": [ { "title": "Enhancing Continuous Domain Adaptation with Multi-Path Transfer Curriculum", "abstract": "Addressing the large distribution gap between training and testing data has long been a challenge in machine learning, giving rise to fields such as transfer learning and domain adaptation. Recently, Continuous Domain Adaptation (CDA) has emerged as an effective technique, closing this gap by utilizing a series of intermediate domains. This paper contributes a novel CDA method, W-MPOT, which rigorously addresses the domain ordering and error accumulation problems overlooked by previous studies. Specifically, we construct a transfer curriculum over the source and intermediate domains based on Wasserstein distance, motivated by theoretical analysis of CDA. Then we transfer the source model to the target domain through multiple valid paths in the curriculum using a modified version of continuous optimal transport. A bidirectional path consistency constraint is introduced to mitigate the impact of accumulated mapping errors during continuous transfer. We extensively evaluate W-MPOT on multiple datasets, achieving up to 54.1\\% accuracy improvement on multi-session Alzheimer MR image classification and 94.7\\% MSE reduction on battery capacity estimation.", "year": 2024, "venue": "Pacific-Asia Conference on Knowledge Discovery and Data Mining", "authors": [ "Hanbing Liu", "Junchang Wang", "Xuan Zhang", "Ye Guo", "Yang Li" ], "externalIds": { "ArXiv": "2402.16681", "DBLP": "journals/corr/abs-2402-16681", "DOI": "10.48550/arXiv.2402.16681", "CorpusId": 268032548 }, "url": "https://www.semanticscholar.org/paper/eb966400d0e1dddf08e5781b604a816199aa8149", "referenceCount": 19, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Balancing Continual Learning and Fine-tuning for Human Activity Recognition", "abstract": "Wearable-based Human Activity Recognition (HAR) is a key task in human-centric machine learning due to its fundamental understanding of human behaviours. Due to the dynamic nature of human behaviours, continual learning promises HAR systems that are tailored to users' needs. However, because of the difficulty in collecting labelled data with wearable sensors, existing approaches that focus on supervised continual learning have limited applicability, while unsupervised continual learning methods only handle representation learning while delaying classifier training to a later stage. This work explores the adoption and adaptation of CaSSLe, a continual self-supervised learning model, and Kaizen, a semi-supervised continual learning model that balances representation learning and down-stream classification, for the task of wearable-based HAR. These schemes re-purpose contrastive learning for knowledge retention and, Kaizen combines that with self-training in a unified scheme that can leverage unlabelled and labelled data for continual learning. In addition to comparing state-of-the-art self-supervised continual learning schemes, we further investigated the importance of different loss terms and explored the trade-off between knowledge retention and learning from new tasks. In particular, our extensive evaluation demonstrated that the use of a weighting factor that reflects the ratio between learned and new classes achieves the best overall trade-off in continual learning.", "year": 2024, "venue": "arXiv.org", "authors": [ "Chi Ian Tang", "Lorena Qendro", "Dimitris Spathis", "F. Kawsar", "Akhil Mathur", "Cecilia Mascolo" ], "externalIds": { "DBLP": "journals/corr/abs-2401-02255", "ArXiv": "2401.02255", "DOI": "10.48550/arXiv.2401.02255", "CorpusId": 266755926 }, "url": "https://www.semanticscholar.org/paper/ba87e9c2533ccae70519d72a7bac53ea4f7d729b", "referenceCount": 35, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Dissecting the heterogeneity of “in the wild” stress from multimodal sensor data", "abstract": null, "year": 2023, "venue": "npj Digital Medicine", "authors": [ "Sujay Nagaraj", "S. Goodday", "Tom Hartvigsen", "Adrien Boch", "Kopal Garg", "Sindhu Gowda", "Luca Foschini", "Marzyeh Ghassemi", "Stephen Friend", "Anna Goldenberg" ], "externalIds": { "PubMedCentral": "10733336", "DOI": "10.1038/s41746-023-00975-9", "CorpusId": 266428923, "PubMed": "38123810" }, "url": "https://www.semanticscholar.org/paper/c3b3b1311ffeb5d40b0c42cd7d9d66732ca57d53", "referenceCount": 13, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Test-Time Domain Adaptation by Learning Domain-Aware Batch Normalization", "abstract": "Test-time domain adaptation aims to adapt the model trained on source domains to unseen target domains using a few unlabeled images. Emerging research has shown that the label and domain information is separately embedded in the weight matrix and batch normalization (BN) layer. Previous works normally update the whole network naively without explicitly decoupling the knowledge between label and domain. As a result, it leads to knowledge interference and defective distribution adaptation. In this work, we propose to reduce such learning interference and elevate the domain knowledge learning by only manipulating the BN layer. However, the normalization step in BN is intrinsically unstable when the statistics are re-estimated from a few samples. We find that ambiguities can be greatly reduced when only updating the two affine parameters in BN while keeping the source domain statistics. To further enhance the domain knowledge extraction from unlabeled data, we construct an auxiliary branch with label-independent self-supervised learning (SSL) to provide supervision. Moreover, we propose a bi-level optimization based on meta-learning to enforce the alignment of two learning objectives of auxiliary and main branches. The goal is to use the auxiliary branch to adapt the domain and benefit main task for subsequent inference. Our method keeps the same computational cost at inference as the auxiliary branch can be thoroughly discarded after adaptation. Extensive experiments show that our method outperforms the prior works on five WILDS real-world domain shift datasets. Our method can also be integrated with methods with label-dependent optimization to further push the performance boundary. Our code is available at https://github.com/ynanwu/MABN.", "year": 2023, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Yanan Wu", "Zhixiang Chi", "Yang Wang", "K. Plataniotis", "Songhe Feng" ], "externalIds": { "DBLP": "journals/corr/abs-2312-10165", "ArXiv": "2312.10165", "DOI": "10.48550/arXiv.2312.10165", "CorpusId": 266348676 }, "url": "https://www.semanticscholar.org/paper/ea1dc43aefc69d4a80323eb9e0bbfe96d0857023", "referenceCount": 54, "citationCount": 7, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Efficient Model Adaptation for Continual Learning at the Edge", "abstract": "Most machine learning (ML) systems assume stationary and matching data distributions during training and deployment. This is often a false assumption. When ML models are deployed on real devices, data distributions often shift over time due to changes in environmental factors, sensor characteristics, and task-of-interest. While it is possible to have a human-in-the-loop to monitor for distribution shifts and engineer new architectures in response to these shifts, such a setup is not cost-effective. Instead, non-stationary automated ML (AutoML) models are needed. This paper presents the Encoder-Adaptor-Reconfigurator (EAR) framework for efficient continual learning under domain shifts. The EAR framework uses a fixed deep neural network (DNN) feature encoder and trains shallow networks on top of the encoder to handle novel data. The EAR framework is capable of 1) detecting when new data is out-of-distribution (OOD) by combining DNNs with hyperdimensional computing (HDC), 2) identifying low-parameter neural adaptors to adapt the model to the OOD data using zero-shot neural architecture search (ZS-NAS), and 3) minimizing catastrophic forgetting on previous tasks by progressively growing the neural architecture as needed and dynamically routing data through the appropriate adaptors and reconfigurators for handling domain-incremental and class-incremental continual learning. We systematically evaluate our approach on several benchmark datasets for domain adaptation and demonstrate strong performance compared to state-of-the-art algorithms for OOD detection and few-/zero-shot NAS.", "year": 2023, "venue": "arXiv.org", "authors": [ "Z. Daniels", "Jun Hu", "M. Lomnitz", "P.E.T.E.R.G. Miller", "Aswin Raghavan", "Joe Zhang", "M. Piacentino", "David C. Zhang" ], "externalIds": { "ArXiv": "2308.02084", "DBLP": "journals/corr/abs-2308-02084", "DOI": "10.48550/arXiv.2308.02084", "CorpusId": 260611565 }, "url": "https://www.semanticscholar.org/paper/0d9ee15a9d99e647d489531d5b08b1aa0504b508", "referenceCount": 50, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Artificial intelligence enhanced sensors - enabling technologies to next-generation healthcare and biomedical platform", "abstract": null, "year": 2023, "venue": "Bioelectronics in Medicine", "authors": [ "Chan Wang", "Tianyiyi He", "Hong Zhou", "Zixuan Zhang", "Chengkuo Lee" ], "externalIds": { "PubMedCentral": "10394931", "DOI": "10.1186/s42234-023-00118-1", "CorpusId": 260358057, "PubMed": "37528436" }, "url": "https://www.semanticscholar.org/paper/ec072fc375d9d5f50bfca643c34f72d65407e688", "referenceCount": 217, "citationCount": 52, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Reproducible Speech Research With the Artificial Intelligence-Ready PERCEPT Corpora.", "abstract": "BACKGROUND\nPublicly available speech corpora facilitate reproducible research by providing open-access data for participants who have consented/assented to data sharing among different research teams. Such corpora can also support clinical education, including perceptual training and training in the use of speech analysis tools.\n\n\nPURPOSE\nIn this research note, we introduce the PERCEPT (Perceptual Error Rating for the Clinical Evaluation of Phonetic Targets) corpora, PERCEPT-R (Rhotics) and PERCEPT-GFTA (Goldman-Fristoe Test of Articulation), which together contain over 36 hr of speech audio (> 125,000 syllable, word, and phrase utterances) from children, adolescents, and young adults aged 6-24 years with speech sound disorder (primarily residual speech sound disorders impacting /ɹ/) and age-matched peers. We highlight PhonBank as the repository for the corpora and demonstrate use of the associated speech analysis software, Phon, to query PERCEPT-R. A worked example of research with PERCEPT-R, suitable for clinical education and research training, is included as an appendix. Support for end users and information/descriptive statistics for future releases of the PERCEPT corpora can be found in a dedicated Slack channel. Finally, we discuss the potential for PERCEPT corpora to support the training of artificial intelligence clinical speech technology appropriate for use with children with speech sound disorders, the development of which has historically been constrained by the limited representation of either children or individuals with speech impairments in publicly available training corpora.\n\n\nCONCLUSIONS\nWe demonstrate the use of PERCEPT corpora, PhonBank, and Phon for clinical training and research questions appropriate to child citation speech. Increased use of these tools has the potential to enhance reproducibility in the study of speech development and disorders.", "year": 2023, "venue": "Journal of Speech, Language and Hearing Research", "authors": [ "Nina R. Benway", "J. Preston", "Elaine R. Hitchcock", "Yvan Rose", "Asif Salekin", "Wendy Liang", "Tara McAllister" ], "externalIds": { "DOI": "10.1044/2023_JSLHR-22-00343", "CorpusId": 259173947, "PubMed": "37319018" }, "url": "https://www.semanticscholar.org/paper/15ea0acfbcd1284345ecfeb6ffd1b53f29a8d3e3", "referenceCount": 17, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "People with long-term conditions sharing personal health data via digital health technologies: A scoping review to inform design", "abstract": "The use of digital technology amongst people living with a range of long-term health conditions to support self-management has increased dramatically. More recently, digital health technologies to share and exchange personal health data with others have been investigated. Sharing personal health data with others is not without its risks: sharing data creates threats to the privacy and security of personal data and plays a role in trust, adoption and continued use of digital health technology. Our work aims to inform the design of these digital health technologies by investigating the reported intentions of sharing health data with others, the associated user experiences when using these digital health technologies and the trust, identity, privacy and security (TIPS) considerations for designing digital health technologies that support the trusted sharing of personal health data to support the self-management of long-term health conditions. To address these aims, we conducted a scoping review, analysing over 12,000 papers in the area of digital health technologies. We conducted a reflexive thematic analysis of 17 papers that described digital health technologies that support sharing of personal health data, and extracted design implications that could enhance the future development of trusted, private and secure digital health technologies.", "year": 2023, "venue": "PLOS Digital Health", "authors": [ "A. Rathbone", "S. Stumpf", "C. Claisse", "Elizabeth Sillence", "L. Coventry", "Richard Brown", "Abigail C. Durrant" ], "externalIds": { "PubMedCentral": "10208494", "DOI": "10.1371/journal.pdig.0000264", "CorpusId": 258887275, "PubMed": "37224154" }, "url": "https://www.semanticscholar.org/paper/397e3d979bf6c393e9d8b299eadc5223fbfccb71", "referenceCount": 61, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Correlation between electroencephalographic markers in the healthy brain", "abstract": null, "year": 2023, "venue": "Scientific Reports", "authors": [ "Laura Päeske", "Tuuli Uudeberg", "H. Hinrikus", "J. Lass", "M. Bachmann" ], "externalIds": { "DOI": "10.1038/s41598-023-33364-z", "CorpusId": 258214222, "PubMed": "37072499" }, "url": "https://www.semanticscholar.org/paper/088e2f0383951c7801c9755303380219b4fb7fa8", "referenceCount": 32, "citationCount": 6, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Kaizen: Practical self-supervised continual learning with continual fine-tuning", "abstract": "Self-supervised learning (SSL) has shown remarkable performance in computer vision tasks when trained offline. However, in a Continual Learning (CL) scenario where new data is introduced progressively, models still suffer from catastrophic forgetting. Retraining a model from scratch to adapt to newly generated data is time-consuming and inefficient. Previous approaches suggested re-purposing self-supervised objectives with knowledge distillation to mitigate forgetting across tasks, assuming that labels from all tasks are available during fine-tuning. In this paper, we generalize self-supervised continual learning in a practical setting where available labels can be leveraged in any step of the SSL process. With an increasing number of continual tasks, this offers more flexibility in the pre-training and fine-tuning phases. With Kaizen1, we introduce a training architecture that is able to mitigate catastrophic forgetting for both the feature extractor and classifier with a carefully designed loss function. By using a set of comprehensive evaluation metrics reflecting different aspects of continual learning, we demonstrated that Kaizen significantly outperforms previous SSL models in competitive vision benchmarks, with up to 16.5% accuracy improvement on split CIFAR-100. Kaizen is able to balance the trade-off between knowledge retention and learning from new data with an end-to-end model, paving the way for practical deployment of continual learning systems.", "year": 2023, "venue": "IEEE Workshop/Winter Conference on Applications of Computer Vision", "authors": [ "Chi Ian Tang", "Lorena Qendro", "Dimitris Spathis", "F. Kawsar", "Cecilia Mascolo", "Akhil Mathur" ], "externalIds": { "DBLP": "journals/corr/abs-2303-17235", "ArXiv": "2303.17235", "DOI": "10.1109/WACV57701.2024.00282", "CorpusId": 257834122 }, "url": "https://www.semanticscholar.org/paper/90c10e6a61addc009eca0ee400563c3af380c479", "referenceCount": 40, "citationCount": 8, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Computationally Budgeted Continual Learning: What Does Matter?", "abstract": "Continual Learning (CL) aims to sequentially train models on streams of incoming data that vary in distribution by preserving previous knowledge while adapting to new data. Current CL literature focuses on restricted access to previously seen data, while imposing no constraints on the computational budget for training. This is unreasonable for applications in-the-wild, where systems are primarily constrained by computational and time budgets, not storage. We revisit this problem with a large-scale benchmark and analyze the performance of traditional CL approaches in a compute-constrained setting, where effective memory samples used in training can be implicitly restricted as a consequence of limited computation. We conduct experiments evaluating various CL sampling strategies, distillation losses, and partial fine-tuning on two large-scale datasets, namely ImageNet2K and Continual Google Landmarks V2 in data incremental, class incremental, and time incremental settings. Through extensive experiments amounting to a total of over 1500 GPU-hours, we find that, under compute-constrained setting, traditional CL approaches, with no exception, fail to outperform a simple minimal baseline that samples uniformly from memory. Our conclusions are consistent in a different number of stream time steps, e.g., 20 to 200, and under several computational budgets. This suggests that most existing CL methods are particularly too computationally expensive for realistic budgeted deployment. Code for this project is available at: https://github.com/drimpossible/BudgetCL.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Ameya Prabhu", "Hasan Hammoud", "P. Dokania", "Philip H. S. Torr", "S. Lim", "Bernard Ghanem", "Adel Bibi" ], "externalIds": { "DBLP": "conf/cvpr/PrabhuHDTLGB23", "ArXiv": "2303.11165", "DOI": "10.1109/CVPR52729.2023.00360", "CorpusId": 257631846 }, "url": "https://www.semanticscholar.org/paper/d1dc16a981a7fce5475a1342241baf596d0de9b3", "referenceCount": 68, "citationCount": 42, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "EMGSense: A Low-Effort Self-Supervised Domain Adaptation Framework for EMG Sensing", "abstract": "This paper presents EMGSense, a low-effort self-supervised domain adaptation framework for sensing applications based on Electromyography (EMG). EMGSense addresses one of the fundamental challenges in EMG cross-user sensing—the significant performance degradation caused by time-varying biological heterogeneity—in a low-effort (data-efficient and label-free) manner. To alleviate the burden of data collection and avoid labor-intensive data annotation, we propose two EMG-specific data augmentation methods to simulate the EMG signals generated in various conditions and scope the exploration in label-free scenarios. We model combating biological heterogeneity-caused performance degradation as a multi-source domain adaptation problem that can learn from the diversity among source users to eliminate EMG heterogeneous biological features. To relearn the target-user-specific biological features from the unlabeled data, we integrate advanced self-supervised techniques into a carefully designed deep neural network (DNN) structure. The DNN structure can seamlessly perform two training stages that complement each other to adapt to a new user with satisfactory performance. Comprehensive evaluations on two sizable datasets collected from 13 participants indicate that EMGSense achieves an average accuracy of 91.9% and 81.2% in gesture recognition and activity recognition, respectively. EMGSense outperforms the state-of-the-art EMG-oriented domain adaptation approaches by 12.5%-17.4% and achieves a comparable performance with the one trained in a supervised learning manner.", "year": 2023, "venue": "Annual IEEE International Conference on Pervasive Computing and Communications", "authors": [ "Di Duan", "Huanqi Yang", "Guohao Lan", "Tianxing Li", "X. Jia", "Weitao Xu" ], "externalIds": { "DBLP": "conf/percom/DuanYLLJX23", "DOI": "10.1109/PERCOM56429.2023.10099164", "CorpusId": 258220118 }, "url": "https://www.semanticscholar.org/paper/c49fc59f34f4a1c49db4ee1a1f9c0755f84a4781", "referenceCount": 42, "citationCount": 6, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Finetune like you pretrain: Improved finetuning of zero-shot vision models", "abstract": "Finetuning image-text models such as CLIP achieves state-of-the-art accuracies on a variety of benchmarks. However, recent works (Kumar et al., 2022; Wortsman et al., 2021) have shown that even subtle differences in the finetuning process can lead to surprisingly large differences in the final performance, both for in-distribution (ID) and out-of-distribution (OOD) data. In this work, we show that a natural and simple approach of mimicking contrastive pretraining consistently outperforms alternative finetuning approaches. Specifically, we cast downstream class labels as text prompts and continue optimizing the contrastive loss between image embeddings and class-descriptive prompt embeddings (contrastive finetuning). Our method consistently outperforms baselines across 7 distribution shift, 6 transfer learning, and 3 few-shot learning benchmarks. On WILDS-iWILDCam, our proposed approach FLYP outperforms the top of the leaderboard by 2.3% ID and 2.7% OOD, giving the highest reported accuracy. Averaged across 7 OOD datasets (2 WILDS and 5 ImageNet associated shifts), FLYP gives gains of 4.2% OOD over standard finetuning and outperforms current state-of-the-art (LP-FT) by more than 1 % both ID and OOD. Similarly, on 3 few-shot learning benchmarks, FLYP gives gains up to 4.6% over standard finetuning and 4.4% over the state-of-the-art. Thus we establish our proposed method of contrastive finetuning as a simple and intuitive state-of-the-art for supervised finetuning of image-text models like CLIP. Code is available at https://github.com/locuslab/FLYP.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Sachin Goyal", "Ananya Kumar", "Sankalp Garg", "Zico Kolter", "Aditi Raghunathan" ], "externalIds": { "DBLP": "journals/corr/abs-2212-00638", "ArXiv": "2212.00638", "DOI": "10.1109/CVPR52729.2023.01853", "CorpusId": 254125206 }, "url": "https://www.semanticscholar.org/paper/e35c225e7b6da0568d8e723eb5380294e76e5cc0", "referenceCount": 59, "citationCount": 91, "influentialCitationCount": 19, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Generalization and Personalization of Mobile Sensing-Based Mood Inference Models", "abstract": "Mood inference with mobile sensing data has been studied in ubicomp literature over the last decade. This inference enables context-aware and personalized user experiences in general mobile apps and valuable feedback and interventions in mobile health apps. However, even though model generalization issues have been highlighted in many studies, the focus has always been on improving the accuracies of models using different sensing modalities and machine learning techniques, with datasets collected in homogeneous populations. In contrast, less attention has been given to studying the performance of mood inference models to assess whether models generalize to new countries. In this study, we collected a mobile sensing dataset with 329K self-reports from 678 participants in eight countries (China, Denmark, India, Italy, Mexico, Mongolia, Paraguay, UK) to assess the effect of geographical diversity on mood inference models. We define and evaluate country-specific (trained and tested within a country), continent-specific (trained and tested within a continent), country-agnostic (tested on a country not seen on training data), and multi-country (trained and tested with multiple countries) approaches trained on sensor data for two mood inference tasks with population-level (non-personalized) and hybrid (partially personalized) models. We show that partially personalized country-specific models perform the best yielding area under the receiver operating characteristic curve (AUROC) scores of the range 0.78--0.98 for two-class (negative vs. positive valence) and 0.76--0.94 for three-class (negative vs. neutral vs. positive valence) inference. Further, with the country-agnostic approach, we show that models do not perform well compared to country-specific settings, even when models are partially personalized. We also show that continent-specific models outperform multi-country models in the case of Europe. Overall, we uncover generalization issues of mood inference models to new countries and how the geographical similarity of countries might impact mood inference.", "year": 2022, "venue": "Proceedings of the ACM on Interactive Mobile Wearable and Ubiquitous Technologies", "authors": [ "L. Meegahapola", "William Droz", "P. Kun", "A. D. Gotzen", "Chaitanya Nutakki", "Shyam Diwakar", "Salvador Ruiz Correa", "Donglei Song", "Hao Xu", "Miriam Bidoglia", "G. Gaskell", "Altangerel Chagnaa", "Amarsanaa Ganbold", "Tsolmon Zundui", "Carlo Caprini", "D. Miorandi", "Alethia Hume", "José Luis Zarza", "L. Cernuzzi", "Ivano Bison", "Marcelo Rodas Britez", "Matteo Busso", "Ronald Chenu-Abente", "Can Gunel", "Fausto Giunchiglia", "Laura Schelenz", "D. Gática-Pérez" ], "externalIds": { "ArXiv": "2211.03009", "DBLP": "journals/corr/abs-2211-03009", "DOI": "10.1145/3569483", "CorpusId": 253384550 }, "url": "https://www.semanticscholar.org/paper/5ac843aa2b80d5d8cdcec5f9e1c51f0290d1b8e5", "referenceCount": 123, "citationCount": 27, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Pruning's Effect on Generalization Through the Lens of Training and Regularization", "abstract": "Practitioners frequently observe that pruning improves model generalization. A long-standing hypothesis based on bias-variance trade-off attributes this generalization improvement to model size reduction. However, recent studies on over-parameterization characterize a new model size regime, in which larger models achieve better generalization. Pruning models in this over-parameterized regime leads to a contradiction -- while theory predicts that reducing model size harms generalization, pruning to a range of sparsities nonetheless improves it. Motivated by this contradiction, we re-examine pruning's effect on generalization empirically. We show that size reduction cannot fully account for the generalization-improving effect of standard pruning algorithms. Instead, we find that pruning leads to better training at specific sparsities, improving the training loss over the dense model. We find that pruning also leads to additional regularization at other sparsities, reducing the accuracy degradation due to noisy examples over the dense model. Pruning extends model training time and reduces model size. These two factors improve training and add regularization respectively. We empirically demonstrate that both factors are essential to fully explaining pruning's impact on generalization.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Tian Jin", "Michael Carbin", "Daniel M. Roy", "Jonathan Frankle", "G. Dziugaite" ], "externalIds": { "DBLP": "journals/corr/abs-2210-13738", "ArXiv": "2210.13738", "DOI": "10.48550/arXiv.2210.13738", "CorpusId": 253107616 }, "url": "https://www.semanticscholar.org/paper/0696c125606bcbeefde756f7a8a66055e298e9a2", "referenceCount": 76, "citationCount": 24, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "FIT: A Metric for Model Sensitivity", "abstract": "Model compression is vital to the deployment of deep learning on edge devices. Low precision representations, achieved via quantization of weights and activations, can reduce inference time and memory requirements. However, quantifying and predicting the response of a model to the changes associated with this procedure remains challenging. This response is non-linear and heterogeneous throughout the network. Understanding which groups of parameters and activations are more sensitive to quantization than others is a critical stage in maximizing efficiency. For this purpose, we propose FIT. Motivated by an information geometric perspective, FIT combines the Fisher information with a model of quantization. We find that FIT can estimate the final performance of a network without retraining. FIT effectively fuses contributions from both parameter and activation quantization into a single metric. Additionally, FIT is fast to compute when compared to existing methods, demonstrating favourable convergence properties. These properties are validated experimentally across hundreds of quantization configurations, with a focus on layer-wise mixed-precision quantization.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Ben Zandonati", "Adrian Alan Pol", "M. Pierini", "Olya Sirkin", "Tal Kopetz" ], "externalIds": { "DBLP": "conf/iclr/ZandonatiPPSK23", "ArXiv": "2210.08502", "DOI": "10.48550/arXiv.2210.08502", "CorpusId": 252918639 }, "url": "https://www.semanticscholar.org/paper/8f2c17145ccfb36761c0df28c33200ff9fbb57cf", "referenceCount": 43, "citationCount": 7, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Federated Multi-Task Attention for Cross-Individual Human Activity Recognition", "abstract": "Federated Learning (FL) is an emerging privacy-aware machine learning technique that applies successfully to the collaborative learning of global models for Human Activity Recognition (HAR). As of now, the applications of FL for HAR assume that the data associated with diverse individuals follow the same distribution. However, this assumption is impractical in real-world scenarios where the same activity is frequently performed differently by different individuals. To tackle this issue, we propose FedMAT, a Federated Multi-task ATtention framework for HAR, which extracts and fuses shared as well as individual-specific multi-modal sensor data features. Specifically, we treat the HAR problem associated with each individual as a different task and train a federated multi-task model, composed of a shared feature representation network in a central server plus multiple individual-specific networks with attention modules stored in decentralized nodes. In this architecture, the attention module operates as a mask that allows to learn individual-specific features from the global model, whilst simultaneously allowing for features to be shared among different individuals. We conduct extensive experiments based on publicly available HAR datasets, which are collected in both controlled environments and real-world scenarios. Numeric results verify that our proposed FedMAT significantly outperforms baselines not only in generalizing to existing individuals but also in adapting to new individuals.", "year": 2022, "venue": "International Joint Conference on Artificial Intelligence", "authors": [ "Qiang Shen", "Haotian Feng", "Rui Song", "Stefano Teso", "Fausto Giunchiglia", "Hao Xu" ], "externalIds": { "DBLP": "conf/ijcai/ShenFSTGX22", "DOI": "10.24963/ijcai.2022/475", "CorpusId": 250635108 }, "url": "https://www.semanticscholar.org/paper/0c29fc10926c1f1f4b17a6acc5aef441c7046199", "referenceCount": 31, "citationCount": 12, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Fisher SAM: Information Geometry and Sharpness Aware Minimisation", "abstract": "Recent sharpness-aware minimisation (SAM) is known to find flat minima which is beneficial for better generalisation with improved robustness. SAM essentially modifies the loss function by reporting the maximum loss value within the small neighborhood around the current iterate. However, it uses the Euclidean ball to define the neighborhood, which can be inaccurate since loss functions for neural networks are typically defined over probability distributions (e.g., class predictive probabilities), rendering the parameter space non Euclidean. In this paper we consider the information geometry of the model parameter space when defining the neighborhood, namely replacing SAM's Euclidean balls with ellipsoids induced by the Fisher information. Our approach, dubbed Fisher SAM, defines more accurate neighborhood structures that conform to the intrinsic metric of the underlying statistical manifold. For instance, SAM may probe the worst-case loss value at either a too nearby or inappropriately distant point due to the ignorance of the parameter space geometry, which is avoided by our Fisher SAM. Another recent Adaptive SAM approach stretches/shrinks the Euclidean ball in accordance with the scale of the parameter magnitudes. This might be dangerous, potentially destroying the neighborhood structure. We demonstrate improved performance of the proposed Fisher SAM on several benchmark datasets/tasks.", "year": 2022, "venue": "International Conference on Machine Learning", "authors": [ "Minyoung Kim", "Da Li", "S. Hu", "Timothy M. Hospedales" ], "externalIds": { "ArXiv": "2206.04920", "DBLP": "journals/corr/abs-2206-04920", "DOI": "10.48550/arXiv.2206.04920", "CorpusId": 249605627 }, "url": "https://www.semanticscholar.org/paper/246a3399b08f6c7d9aa1bf082f2dc084ce091354", "referenceCount": 57, "citationCount": 56, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Stress Detection with Deep Learning Using BVP and EDA Signals", "abstract": "In daily life, a person is exposed to many negative factors and emotional states as arising from financial difficulties, working life and personal responsibilities. The aim of this study is to detect stress resulted from these factors in a way that causes the least discomfort to the person. It is quite common to use physiological data such as heart rate, electromyography (EMG), electrocardiography (ECG), electroencephalography (EEG), respiration and skin conductivity to detect when a person is exposed to stress. In this study, stress estimation was made using blood volume pulse (BVP) and electrodermal activity (EDA) sensor data obtained from the Empatica E4 device in the open source WESAD dataset. With the use of BVP and EDA sensors, the intervention to the person has been tried to be minimized. Thus, the model ans sensor method proposed in this study can be easily adapted to daily life. For stress detection, a feed forward deep learning artificial neural network (ANN) technique is proposed by using the baseline and stress labeled data in the dataset. With ANN model, %96.26 accuracy was obtained and a fairly smooth loss curve was observed. This model was compared with the ANN methods in previous studies.", "year": 2022, "venue": "2022 International Congress on Human-Computer Interaction, Optimization and Robotic Applications (HORA)", "authors": [ "Eda Eren", "T. S. Navruz" ], "externalIds": { "DOI": "10.1109/HORA55278.2022.9799933", "CorpusId": 250118825 }, "url": "https://www.semanticscholar.org/paper/065460913b78606b293becbd0d1ed83ea3edf328", "referenceCount": 0, "citationCount": 9, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Wearable multimodal sensors for the detection of behavioral and psychological symptoms of dementia using personalized machine learning models", "abstract": "Behavioral and psychological symptoms of dementia (BPSD) signal distress or unmet needs and present a risk to people with dementia and their caregivers. Variability in the expression of these symptoms is a barrier to the performance of digital biomarkers. The aim of this study was to use wearable multimodal sensors to develop personalized machine learning models capable of detecting individual patterns of BPSD.", "year": 2022, "venue": "Alzheimer's & Dementia", "authors": [ "Andrea Iaboni", "S. Spasojevic", "Kristine Newman", "L. Schindel Martin", "A. Wang", "B. Ye", "Alex Mihailidis", "Shehroz S. Khan" ], "externalIds": { "PubMedCentral": "9043905", "DOI": "10.1002/dad2.12305", "CorpusId": 248428875, "PubMed": "35496371" }, "url": "https://www.semanticscholar.org/paper/8f511d5bee5522cdda9379d3d94a6182b14896a1", "referenceCount": 46, "citationCount": 21, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Continual Test-Time Domain Adaptation", "abstract": "Test-time domain adaptation aims to adapt a source pre-trained model to a target domain without using any source data. Existing works mainly consider the case where the target domain is static. However, real-world machine perception systems are running in non-stationary and continually changing environments where the target domain distribution can change over time. Existing methods, which are mostly based on self-training and entropy regularization, can suffer from these non-stationary environments. Due to the distribution shift over time in the target domain, pseudo-labels become unreliable. The noisy pseudo-labels can further lead to error accumulation and catastrophic forgetting. To tackle these issues, we propose a continual test-time adaptation approach (CoTTA) which comprises two parts. Firstly, we propose to reduce the error accumulation by using weight-averaged and augmentation-averaged predictions which are often more accurate. On the other hand, to avoid catastrophic forgetting, we propose to stochastically restore a small part of the neurons to the source pre-trained weights during each iteration to help preserve source knowledge in the longterm. The proposed method enables the longterm adaptation for all parameters in the network. CoTTA is easy to implement and can be readily incorporated in off-the-shelf pre-trained models. We demonstrate the effectiveness of our approach on four classification tasks and a segmentation task for continual test-time adaptation, on which we outperform existing methods. Our code is available at https://gin.ee/cotta.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Qin Wang", "Olga Fink", "L. Gool", "Dengxin Dai" ], "externalIds": { "DBLP": "conf/cvpr/0013FGD22", "ArXiv": "2203.13591", "DOI": "10.1109/CVPR52688.2022.00706", "CorpusId": 247748613 }, "url": "https://www.semanticscholar.org/paper/0430dbcbfed0a737881d22340fb044028ed851a9", "referenceCount": 76, "citationCount": 295, "influentialCitationCount": 103, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Predicting states of elevated negative affect in adolescents from smartphone sensors: a novel personalized machine learning approach", "abstract": "Abstract Background Adolescence is characterized by profound change, including increases in negative emotions. Approximately 84% of American adolescents own a smartphone, which can continuously and unobtrusively track variables potentially predictive of heightened negative emotions (e.g. activity levels, location, pattern of phone usage). The extent to which built-in smartphone sensors can reliably predict states of elevated negative affect in adolescents is an open question. Methods Adolescent participants (n = 22; ages 13–18) with low to high levels of depressive symptoms were followed for 15 weeks using a combination of ecological momentary assessments (EMAs) and continuously collected passive smartphone sensor data. EMAs probed negative emotional states (i.e. anger, sadness and anxiety) 2–3 times per day every other week throughout the study (total: 1145 EMA measurements). Smartphone accelerometer, location and device state data were collected to derive 14 discrete estimates of behavior, including activity level, percentage of time spent at home, sleep onset and duration, and phone usage. Results A personalized ensemble machine learning model derived from smartphone sensor data outperformed other statistical approaches (e.g. linear mixed model) and predicted states of elevated anger and anxiety with acceptable discrimination ability (area under the curve (AUC) = 74% and 71%, respectively), but demonstrated more modest discrimination ability for predicting states of high sadness (AUC = 66%). Conclusions To the extent that smartphone data could provide reasonably accurate real-time predictions of states of high negative affect in teens, brief ‘just-in-time’ interventions could be immediately deployed via smartphone notifications or mental health apps to alleviate these states.", "year": 2022, "venue": "Psychological Medicine", "authors": [ "Boyu Ren", "Emma G. Balkind", "Brianna Pastro", "Elana S. Israel", "D. Pizzagalli", "H. Rahimi-Eichi", "J. Baker", "Christian A. Webb" ], "externalIds": { "DOI": "10.1017/S0033291722002161", "CorpusId": 246966904, "PubMed": "35894246" }, "url": "https://www.semanticscholar.org/paper/ae022b854a6ae0cea958d7cc8e8a09caf42dda7b", "referenceCount": 43, "citationCount": 11, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Self-Supervised Models are Continual Learners", "abstract": "Self-supervised models have been shown to produce comparable or better visual representations than their su-pervised counterparts when trained offline on unlabeled data at scale. However, their efficacy is catastrophically reduced in a Continual Learning (CL) scenario where data is presented to the model sequentially. In this paper, we show that self-supervised loss functions can be seamlessly converted into distillation mechanisms for CL by adding a predictor network that maps the current state of the repre-sentations to their past state. This enables us to devise a framework for Continual self-supervised visual representation Learning that (i) significantly improves the quality of the learned representations, (ii) is compatible with several state-of-the-art self-supervised objectives, and (iii) needs little to no hyperparameter tuning. We demonstrate the ef-fectiveness of our approach empirically by training six pop-ular self-supervised models in various CL settings. Code: github.com/DonkeyShot21/cassle.", "year": 2021, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Enrico Fini", "Victor Costa", "Xavier Alameda-Pineda", "E. Ricci", "Alahari Karteek", "J. Mairal" ], "externalIds": { "DBLP": "journals/corr/abs-2112-04215", "ArXiv": "2112.04215", "DOI": "10.1109/CVPR52688.2022.00940", "CorpusId": 244954199 }, "url": "https://www.semanticscholar.org/paper/c25fea20e5b5c520be2783dbd0524cc6dc1edaf8", "referenceCount": 59, "citationCount": 124, "influentialCitationCount": 27, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Digital medicine and the curse of dimensionality", "abstract": null, "year": 2021, "venue": "npj Digital Medicine", "authors": [ "Visar Berisha", "Chelsea Krantsevich", "P. Hahn", "Shira Hahn", "Gautam Dasarathy", "P. Turaga", "J. Liss" ], "externalIds": { "DBLP": "journals/npjdm/BerishaKHHDTL21", "PubMedCentral": "8553745", "DOI": "10.1038/s41746-021-00521-5", "CorpusId": 240006455, "PubMed": "34711924" }, "url": "https://www.semanticscholar.org/paper/9abc28e720759dc9ae9cb65ad4538de9d0a7a03e", "referenceCount": 60, "citationCount": 149, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Wearable Sweat Biosensors Refresh Personalized Health/Medical Diagnostics", "abstract": "Sweat contains a broad range of critical biomarkers including ions, small molecules, and macromolecules that may indirectly or directly reflect the health status of the human body and thereby help track disease progression. Wearable sweat biosensors enable the collection and analysis of sweat in situ, achieving real-time, continuous, and noninvasive monitoring of human biochemical parameters at the molecular level. This review summarizes the physiological/pathological information of sweat and wearable sweat biosensors. First, the production of sweat pertaining to various electrolytes, metabolites, and proteins is described. Then, the compositions of the wearable sweat biosensors are summarized, and the design of each subsystem is introduced in detail. The latest applications of wearable sweat biosensors for outdoor, hospital, and family monitoring are highlighted. Finally, the review provides a summary and an outlook on the future developments and challenges of wearable sweat biosensors with the aim of advancing the field of wearable sweat monitoring technology.", "year": 2021, "venue": "Research", "authors": [ "Wenhui Ji", "Jingyu Zhu", "Wanxia Wu", "Nanxiang Wang", "Jiqing Wang", "Jiansheng Wu", "Qiong Wu", "Xuewen Wang", "Changmin Yu", "Gaofeng Wei", "Lin Li", "F. Huo" ], "externalIds": { "PubMedCentral": "8557357", "DOI": "10.34133/2021/9757126", "CorpusId": 244075549, "PubMed": "34778790" }, "url": "https://www.semanticscholar.org/paper/a7cc45285494f0efbcca18dfc67678e827884879", "referenceCount": 0, "citationCount": 43, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Widar3.0: Zero-Effort Cross-Domain Gesture Recognition With Wi-Fi", "abstract": "With the development of signal processing technology, the ubiquitous Wi-Fi devices open an unprecedented opportunity to solve the challenging human gesture recognition problem by learning motion representations from wireless signals. Wi-Fi-based gesture recognition systems, although yield good performance on specific data domains, are still practically difficult to be used without explicit adaptation efforts to new domains. Various pioneering approaches have been proposed to resolve this contradiction but extra training efforts are still necessary for either data collection or model re-training when new data domains appear. To advance cross-domain recognition and achieve fully zero-effort recognition, we propose Widar3.0, a Wi-Fi-based zero-effort cross-domain gesture recognition system. The key insight of Widar3.0 is to derive and extract domain-independent features of human gestures at the lower signal level, which represent unique kinetic characteristics of gestures and are irrespective of domains. On this basis, we develop a one-fits-all general model that requires only one-time training but can adapt to different data domains. Experiments on various domain factors (i.e. environments, locations, and orientations of persons) demonstrate the accuracy of 92.7% for in-domain recognition and 82.6%-92.4% for cross-domain recognition without model re-training, outperforming the state-of-the-art solutions.", "year": 2021, "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", "authors": [ "Yi Zhang", "Yue Zheng", "Kun Qian", "Guidong Zhang", "Yunhao Liu", "Chenshu Wu", "Zheng Yang" ], "externalIds": { "DBLP": "journals/pami/ZhangZQZLWY22", "DOI": "10.1109/TPAMI.2021.3105387", "CorpusId": 263554370, "PubMed": "34406937" }, "url": "https://www.semanticscholar.org/paper/db88f53e1037bcc269aa1a5a407a8d72afd7bb55", "referenceCount": 66, "citationCount": 106, "influentialCitationCount": 32, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Wearable and Mobile Sensors for Personalized Nutrition.", "abstract": "While wearable and mobile chemical sensors have experienced tremendous growth over the past decade, their potential for tracking and guiding nutrition has emerged only over the past three years. Currently, guidelines from doctors and dietitians represent the most common approach for maintaining optimal nutrition status. However, such recommendations rely on population averages and do not take into account individual variability in responding to nutrients. Precision nutrition has recently emerged to address the large heterogeneity in individuals' responses to diet, by tailoring nutrition based on the specific requirements of each person. It aims at preventing and managing diseases by formulating personalized dietary interventions to individuals on the basis of their metabolic profile, background, and environmental exposure. Recent advances in digital nutrition technology, including calories-counting mobile apps and wearable motion tracking devices, lack the ability of monitoring nutrition at the molecular level. The realization of effective precision nutrition requires synergy from different sensor modalities in order to make timely reliable predictions and efficient feedback. This work reviews key opportunities and challenges toward the successful realization of effective wearable and mobile nutrition monitoring platforms. Non-invasive wearable and mobile electrochemical sensors, capable of monitoring temporal chemical variations upon the intake of food and supplements, are excellent candidates to bridge the gap between digital and biochemical analyses for a successful personalized nutrition approach. By providing timely (previously unavailable) dietary information, such wearable and mobile sensors offer the guidance necessary for supporting dietary behavior change toward a managed nutritional balance. Coupling of the rapidly emerging wearable chemical sensing devices-generating enormous dynamic analytical data-with efficient data-fusion and data-mining methods that identify patterns and make predictions is expected to revolutionize dietary decision-making toward effective precision nutrition.", "year": 2021, "venue": "ACS Sensors", "authors": [ "J. Sempionatto", "V. R. Montiel", "E. Vargas", "H. Teymourian", "Joseph Wang" ], "externalIds": { "DOI": "10.1021/acssensors.1c00553", "CorpusId": 234780034, "PubMed": "34008960" }, "url": "https://www.semanticscholar.org/paper/e7278e6684d53d9868cb118ce0e9cac9ea7c8408", "referenceCount": 126, "citationCount": 99, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Gradient Matching for Domain Generalization", "abstract": "Machine learning systems typically assume that the distributions of training and test sets match closely. However, a critical requirement of such systems in the real world is their ability to generalize to unseen domains. Here, we propose an inter-domain gradient matching objective that targets domain generalization by maximizing the inner product between gradients from different domains. Since direct optimization of the gradient inner product can be computationally prohibitive -- requires computation of second-order derivatives -- we derive a simpler first-order algorithm named Fish that approximates its optimization. We demonstrate the efficacy of Fish on 6 datasets from the Wilds benchmark, which captures distribution shift across a diverse range of modalities. Our method produces competitive results on these datasets and surpasses all baselines on 4 of them. We perform experiments on both the Wilds benchmark, which captures distribution shift in the real world, as well as datasets in DomainBed benchmark that focuses more on synthetic-to-real transfer. Our method produces competitive results on both benchmarks, demonstrating its effectiveness across a wide range of domain generalization tasks.", "year": 2021, "venue": "International Conference on Learning Representations", "authors": [ "Yuge Shi", "Jeffrey S. Seely", "Philip H. S. Torr", "Siddharth Narayanaswamy", "Awni Y. Hannun", "Nicolas Usunier", "Gabriel Synnaeve" ], "externalIds": { "DBLP": "journals/corr/abs-2104-09937", "ArXiv": "2104.09937", "CorpusId": 233306870 }, "url": "https://www.semanticscholar.org/paper/525dd120c0b5808ddcbbf703677b46346fb0729b", "referenceCount": 50, "citationCount": 248, "influentialCitationCount": 41, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Sparsity in Deep Learning: Pruning and growth for efficient inference and training in neural networks", "abstract": "The growing energy and performance costs of deep learning have driven the community to reduce the size of neural networks by selectively pruning components. Similarly to their biological counterparts, sparse networks generalize just as well, if not better than, the original dense networks. Sparsity can reduce the memory footprint of regular networks to fit mobile devices, as well as shorten training time for ever growing networks. In this paper, we survey prior work on sparsity in deep learning and provide an extensive tutorial of sparsification for both inference and training. We describe approaches to remove and add elements of neural networks, different training strategies to achieve model sparsity, and mechanisms to exploit sparsity in practice. Our work distills ideas from more than 300 research papers and provides guidance to practitioners who wish to utilize sparsity today, as well as to researchers whose goal is to push the frontier forward. We include the necessary background on mathematical methods in sparsification, describe phenomena such as early structure adaptation, the intricate relations between sparsity and the training process, and show techniques for achieving acceleration on real hardware. We also define a metric of pruned parameter efficiency that could serve as a baseline for comparison of different sparse networks. We close by speculating on how sparsity can improve future workloads and outline major open problems in the field.", "year": 2021, "venue": "Journal of machine learning research", "authors": [ "T. Hoefler", "Dan Alistarh", "Tal Ben-Nun", "Nikoli Dryden", "Alexandra Peste" ], "externalIds": { "DBLP": "journals/corr/abs-2102-00554", "ArXiv": "2102.00554", "CorpusId": 231740691 }, "url": "https://www.semanticscholar.org/paper/9d6acac70b2d1fdb861a08b00766ef263109cd7f", "referenceCount": 0, "citationCount": 552, "influentialCitationCount": 37, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Catastrophic Fisher Explosion: Early Phase Fisher Matrix Impacts Generalization", "abstract": "The early phase of training a deep neural network has a dramatic effect on the local curvature of the loss function. For instance, using a small learning rate does not guarantee stable optimization because the optimization trajectory has a tendency to steer towards regions of the loss surface with increasing local curvature. We ask whether this tendency is connected to the widely observed phenomenon that the choice of the learning rate strongly influences generalization. We first show that stochastic gradient descent (SGD) implicitly penalizes the trace of the Fisher Information Matrix (FIM), a measure of the local curvature, from the start of training. We argue it is an implicit regularizer in SGD by showing that explicitly penalizing the trace of the FIM can significantly improve generalization. We highlight that poor final generalization coincides with the trace of the FIM attaining a large value early in training, to which we refer as catastrophic Fisher explosion. Finally, to gain insight into the regularization effect of penalizing the trace of the FIM, we show that it limits memorization by reducing the learning speed of examples with noisy labels more than that of the examples with clean labels.", "year": 2020, "venue": "International Conference on Machine Learning", "authors": [ "Stanislaw Jastrzebski", "Devansh Arpit", "Oliver Åstrand", "Giancarlo Kerg", "Huan Wang", "Caiming Xiong", "R. Socher", "Kyunghyun Cho", "Krzysztof J. Geras" ], "externalIds": { "DBLP": "conf/icml/JastrzebskiAAKW21", "ArXiv": "2012.14193", "CorpusId": 229680169 }, "url": "https://www.semanticscholar.org/paper/6dfc5870b9ccb75954e861f47ce958417d9ed2be", "referenceCount": 87, "citationCount": 56, "influentialCitationCount": 6, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Invariant Feature Learning for Sensor-Based Human Activity Recognition", "abstract": "Wearable sensor-based human activity recognition (HAR) has been a research focus in the field of ubiquitous and mobile computing for years. In recent years, many deep models have been applied to HAR problems. However, deep learning methods typically require a large amount of data for models to generalize well. Significant variances caused by different participants or diverse sensor devices limit the direct application of a pre-trained model to a subject or device that has not been seen before. To address these problems, we present an invariant feature learning framework (IFLF) that extracts common information shared across subjects and devices. IFLF incorporates two learning paradigms: 1) meta-learning to capture robust features across seen domains and adapt to an unseen one with similarity-based data selection; 2) multi-task learning to deal with data shortage and enhance overall performance via knowledge sharing among different subjects. Experiments demonstrated that IFLF is effective in handling both subject and device diversion across popular open datasets and an in-house dataset. It outperforms a baseline model of up to 40 percent in test accuracy.", "year": 2020, "venue": "IEEE Transactions on Mobile Computing", "authors": [ "Yujiao Hao", "Rong Zheng", "Boyu Wang" ], "externalIds": { "DBLP": "journals/tmc/HaoZW22", "ArXiv": "2012.07963", "MAG": "3111005488", "DOI": "10.1109/TMC.2021.3064252", "CorpusId": 229181307 }, "url": "https://www.semanticscholar.org/paper/123f5ffbf618fc534a5365fb13191bc5ed046640", "referenceCount": 49, "citationCount": 16, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "A Deep Neural Network Pruning Method Based on Gradient L1-norm", "abstract": "The deep neural network model usually has a large number of redundant weight parameters. When calculating the deep neural network model, it needs to occupy a large amount of computing resources and storage space, which makes it difficult to deploy on some edge devices and embedded devices. In order to solve this problem, we propose a deep network pruning algorithm based on gradient L1 norm (GLNP). The core idea of GLNP algorithm is to judge the importance of the filter based on the L1-norm of gradient. Then, according to the pruning ratio, we remove the filter and its connection feature diagram layer by layer, and retrain the precision of the depth network model after pruning. We show that the GLNP algorithm can reduce 71% parameters on VGG-16, with guaranteeing the accuracy 90.81%; Similar experiments with ResNet-56 reveal that the GLNP algorithm can reduce 35.2% parameters on ResNet-56, with guaranteeing the accuracy 93.11%. Compared with the current popular PFEC algorithm, the GLNP algorithm compresses of both network model while improving the accuracy, which has better performance of deep network compression and acceleration.", "year": 2020, "venue": "International Conference on Innovative Computing and Cloud Computing", "authors": [ "Xue Liu", "Weijie Xia", "Zhimiao Fan" ], "externalIds": { "DOI": "10.1109/ICCC51575.2020.9345039", "CorpusId": 231919875 }, "url": "https://www.semanticscholar.org/paper/ff2776ed434d6ebb25fb3dcbf6fea1d9d9a328b1", "referenceCount": 28, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Can We Ditch Feature Engineering? End-to-End Deep Learning for Affect Recognition from Physiological Sensor Data", "abstract": "To further extend the applicability of wearable sensors in various domains such as mobile health systems and the automotive industry, new methods for accurately extracting subtle physiological information from these wearable sensors are required. However, the extraction of valuable information from physiological signals is still challenging—smartphones can count steps and compute heart rate, but they cannot recognize emotions and related affective states. This study analyzes the possibility of using end-to-end multimodal deep learning (DL) methods for affect recognition. Ten end-to-end DL architectures are compared on four different datasets with diverse raw physiological signals used for affect recognition, including emotional and stress states. The DL architectures specialized for time-series classification were enhanced to simultaneously facilitate learning from multiple sensors, each having their own sampling frequency. To enable fair comparison among the different DL architectures, Bayesian optimization was used for hyperparameter tuning. The experimental results showed that the performance of the models depends on the intensity of the physiological response induced by the affective stimuli, i.e., the DL models recognize stress induced by the Trier Social Stress Test more successfully than they recognize emotional changes induced by watching affective content, e.g., funny videos. Additionally, the results showed that the CNN-based architectures might be more suitable than LSTM-based architectures for affect recognition from physiological sensors.", "year": 2020, "venue": "Italian National Conference on Sensors", "authors": [ "Maciej Dziezyc", "M. Gjoreski", "Przemyslaw Kazienko", "Stanisław Saganowski", "M. Gams" ], "externalIds": { "PubMedCentral": "7697590", "DBLP": "journals/sensors/DziezycGKSG20", "MAG": "3105178283", "DOI": "10.3390/s20226535", "CorpusId": 227067135, "PubMed": "33207564" }, "url": "https://www.semanticscholar.org/paper/c9121e48baaaa0074a2e8fdceb6ad279cc8da55f", "referenceCount": 60, "citationCount": 40, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Early detection and tracking of bulbar changes in ALS via frequent and remote speech analysis", "abstract": null, "year": 2020, "venue": "npj Digital Medicine", "authors": [ "G. Stegmann", "Shira Hahn", "J. Liss", "J. Shefner", "S. Rutkove", "K. Shelton", "C. J. Duncan", "Visar Berisha" ], "externalIds": { "DBLP": "journals/npjdm/StegmannHLSRSDB20a", "PubMedCentral": "7555482", "MAG": "3093484458", "DOI": "10.1038/s41746-020-00335-x", "CorpusId": 222299546, "PubMed": "33083567" }, "url": "https://www.semanticscholar.org/paper/9a85ae502ed56585608cab7e63a5581a1e6a84b3", "referenceCount": 25, "citationCount": 56, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Leveraging Mobile Sensing and Machine Learning for Personalized Mental Health Care", "abstract": "Mental illness is widespread in our society, yet remains difficult to treat due to challenges such as stigma and overburdened health care systems. New paradigms are needed for treating mental illness outside the practitioner’s office. We propose a framework to guide the design of mobile sensing systems for personalized mental health interventions. This framework guides researchers in constructing interventions from the ground up through four phases: sensor data collection, digital biomarker extraction, health state detection, and intervention deployment. We highlight how this framework advances research in personalized mHealth and address remaining challenges, such as ground truth fidelity and missing data.", "year": 2020, "venue": "Ergonomics in design", "authors": [ "M. Boukhechba", "Anna N. Baglione", "Laura E. Barnes" ], "externalIds": { "MAG": "3027833786", "DOI": "10.1177/1064804620920494", "CorpusId": 219509903 }, "url": "https://www.semanticscholar.org/paper/8a007c2bf105f9e3418ccd639354865f6de5da35", "referenceCount": 31, "citationCount": 9, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Do We Really Need to Access the Source Data? Source Hypothesis Transfer for Unsupervised Domain Adaptation", "abstract": "Unsupervised domain adaptation (UDA) aims to leverage the knowledge learned from a labeled source dataset to solve similar tasks in a new unlabeled domain. Prior UDA methods typically require to access the source data when learning to adapt the model, making them risky and inefficient for decentralized private data. This work tackles a practical setting where only a trained source model is available and investigates how we can effectively utilize such a model without source data to solve UDA problems. We propose a simple yet generic representation learning framework, named \\emph{Source HypOthesis Transfer} (SHOT). SHOT freezes the classifier module (hypothesis) of the source model and learns the target-specific feature extraction module by exploiting both information maximization and self-supervised pseudo-labeling to implicitly align representations from the target domains to the source hypothesis. To verify its versatility, we evaluate SHOT in a variety of adaptation cases including closed-set, partial-set, and open-set domain adaptation. Experiments indicate that SHOT yields state-of-the-art results among multiple domain adaptation benchmarks.", "year": 2020, "venue": "International Conference on Machine Learning", "authors": [ "Jian Liang", "Dapeng Hu", "Jiashi Feng" ], "externalIds": { "MAG": "3034526587", "DBLP": "conf/icml/LiangHF20", "ArXiv": "2002.08546", "CorpusId": 211205159 }, "url": "https://www.semanticscholar.org/paper/6e1bb490ae54b42f13d14d69b2012edda4664949", "referenceCount": 79, "citationCount": 1013, "influentialCitationCount": 308, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Personalized Activity Recognition with Deep Triplet Embeddings", "abstract": "A significant challenge for a supervised learning approach to inertial human activity recognition is the heterogeneity of data generated by individual users, resulting in very poor performance for some subjects. We present an approach to personalized activity recognition based on deep feature representation derived from a convolutional neural network (CNN). We experiment with both categorical cross-entropy loss and triplet loss for training, and describe a novel loss function based on subject triplets. We evaluate these methods on three publicly available inertial human activity recognition datasets (MHEALTH, WISDM, and SPAR) comparing classification accuracy, out-of-distribution activity detection, and generalization to new activity classes. The proposed triplet algorithm achieved an average 96.7% classification accuracy across tested datasets versus the 87.5% achieved by the baseline CNN algorithm. We demonstrate that personalized algorithms, and, in particular, the proposed novel triplet loss algorithms, are more robust to inter-subject variability and thus exhibit better performance on classification and out-of-distribution detection tasks.", "year": 2020, "venue": "Italian National Conference on Sensors", "authors": [ "D. Burns", "C. Whyne" ], "externalIds": { "MAG": "2999955388", "ArXiv": "2001.05517", "DBLP": "journals/sensors/BurnsBAW22", "PubMedCentral": "9324610", "DOI": "10.3390/s22145222", "CorpusId": 210698775, "PubMed": "35890902" }, "url": "https://www.semanticscholar.org/paper/8f12b62c04056feaa6ba9e25213504a418347b08", "referenceCount": 64, "citationCount": 23, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics", "Medicine" ] }, { "title": "Human walking in the real world: Interactions between terrain type, gait parameters, and energy expenditure", "abstract": "Humans often traverse real-world environments with a variety of surface irregularities and inconsistencies, which can disrupt steady gait and require additional effort. Such effects have, however, scarcely been demonstrated quantitatively, because few laboratory biomechanical measures apply outdoors. Walking can nevertheless be quantified by other means. In particular, the foot’s trajectory in space can be reconstructed from foot-mounted inertial measurement units (IMUs), to yield measures of stride and associated variabilities. But it remains unknown whether such measures are related to metabolic energy expenditure. We therefore quantified the effect of five different outdoor terrains on foot motion (from IMUs) and net metabolic rate (from oxygen consumption) in healthy adults (N = 10; walking at 1.25 m/s). Energy expenditure increased significantly (P < 0.05) in the order Sidewalk, Dirt, Gravel, Grass, and Woodchips, with Woodchips about 27% costlier than Sidewalk. Terrain type also affected measures, particularly stride variability and virtual foot clearance (swing foot’s lowest height above consecutive footfalls). In combination, such measures can also roughly predict metabolic cost (adjusted R2 = 0.52, partial least squares regression), and even discriminate between terrain types (10% reclassification error). Body-worn sensors can characterize how uneven terrain affects gait, gait variability, and metabolic cost in the real world.", "year": 2019, "venue": "bioRxiv", "authors": [ "DB Kowalsky", "JR Rebula", "LV Ojeda", "PG Adamczyk", "AD Kuo" ], "externalIds": { "MAG": "2996807563", "PubMedCentral": "7806134", "DOI": "10.1371/journal.pone.0228682", "CorpusId": 214198244, "PubMed": "33439858" }, "url": "https://www.semanticscholar.org/paper/660c9f976920ea34525f9f010f69b7ee207ed3aa", "referenceCount": 44, "citationCount": 49, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Biology", "Medicine" ] }, { "title": "MetaSense: few-shot adaptation to untrained conditions in deep mobile sensing", "abstract": "Recent improvements in deep learning and hardware support offer a new breakthrough in mobile sensing; we could enjoy context-aware services and mobile healthcare on a mobile device powered by artificial intelligence. However, most related studies perform well only with a certain level of similarity between trained and target data distribution, while in practice, a specific user's behaviors and device make sensor inputs different. Consequently, the performance of such applications might suffer in diverse user and device conditions as training deep models in such countless conditions is infeasible. To mitigate the issue, we propose MetaSense, an adaptive deep mobile sensing system utilizing only a few (e.g., one or two) data instances from the target user. MetaSense employs meta learning that learns how to adapt to the target user's condition, by rehearsing multiple similar tasks generated from our unique task generation strategies in offline training. The trained model has the ability to rapidly adapt to the target user's condition when a few data are available. Our evaluation with real-world traces of motion and audio sensors shows that MetaSense not only outperforms the state-of-the-art transfer learning by 18% and meta learning based approaches by 15% in terms of accuracy, but also requires significantly less adaptation time for the target user.", "year": 2019, "venue": "ACM International Conference on Embedded Networked Sensor Systems", "authors": [ "Taesik Gong", "Yeonsu Kim", "Jinwoo Shin", "Sung-ju Lee" ], "externalIds": { "MAG": "2983307807", "DBLP": "conf/sensys/GongKSL19", "DOI": "10.1145/3356250.3360020", "CorpusId": 207906463 }, "url": "https://www.semanticscholar.org/paper/fc50108cc22409ce69cb3c027ab694a94381e396", "referenceCount": 55, "citationCount": 72, "influentialCitationCount": 7, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "On Iterative Neural Network Pruning, Reinitialization, and the Similarity of Masks", "abstract": "We examine how recently documented, fundamental phenomena in deep learning models subject to pruning are affected by changes in the pruning procedure. Specifically, we analyze differences in the connectivity structure and learning dynamics of pruned models found through a set of common iterative pruning techniques, to address questions of uniqueness of trainable, high-sparsity sub-networks, and their dependence on the chosen pruning method. In convolutional layers, we document the emergence of structure induced by magnitude-based unstructured pruning in conjunction with weight rewinding that resembles the effects of structured pruning. We also show empirical evidence that weight stability can be automatically achieved through apposite pruning techniques.", "year": 2019, "venue": "arXiv.org", "authors": [ "Michela Paganini", "J. Forde" ], "externalIds": { "ArXiv": "2001.05050", "DBLP": "journals/corr/abs-2001-05050", "MAG": "3000649712", "CorpusId": 210701547 }, "url": "https://www.semanticscholar.org/paper/d3b05583c5b619521975611e3766d69b80a45e0c", "referenceCount": 59, "citationCount": 16, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "The Generalization-Stability Tradeoff in Neural Network Pruning", "abstract": "Pruning neural network parameters is often viewed as a means to compress models, but pruning has also been motivated by the desire to prevent overfitting. This motivation is particularly relevant given the perhaps surprising observation that a wide variety of pruning approaches increase test accuracy despite sometimes massive reductions in parameter counts. To better understand this phenomenon, we analyze the behavior of pruning over the course of training, finding that pruning's benefit to generalization increases with pruning's instability (defined as the drop in test accuracy immediately following pruning). We demonstrate that this \"generalization-stability tradeoff\" is present across a wide variety of pruning settings and propose a mechanism for its cause: pruning regularizes similarly to noise injection. Supporting this, we find less pruning stability leads to more model flatness and the benefits of pruning do not depend on permanent parameter removal. These results explain the compatibility of pruning-based generalization improvements and the high generalization recently observed in overparameterized networks.", "year": 2019, "venue": "Neural Information Processing Systems", "authors": [ "Brian Bartoldson", "Ari S. Morcos", "Adrian Barbu", "G. Erlebacher" ], "externalIds": { "DBLP": "conf/nips/BartoldsonMBE20", "ArXiv": "1906.03728", "MAG": "3100430947", "CorpusId": 182952955 }, "url": "https://www.semanticscholar.org/paper/2e4fde9218f5214b9da20411e1bc793cee0f3f9a", "referenceCount": 73, "citationCount": 66, "influentialCitationCount": 7, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Applying Internet of Things and Machine-Learning for Personalized Healthcare: Issues and Challenges", "abstract": "Personalized Healthcare (PH) is a new patientoriented healthcare approach which expects to improve the traditional healthcare system. The focus of this new advancement is the patient data collected from patient Electronic health records (EHR), Internet of Things (IoT) sensor devices, wearables and mobile devices, web-based information and social media. PH applies Artificial Intelligence (AI) techniques to the collected dataset to improve disease progression technique, disease prediction, patient selfmanagement and clinical intervention. Machine learning techniques are widely used in this regard to develop analytic models. These models are integrated into different healthcare service applications and clinical decision support systems. These models mainly analyse the collected data from sensor devices and other sources to identify behavioral patterns and clinical conditions of the patient. For example, these models analyse the collected data to identify the patient's improvements, habits and anomaly in daily routine, changes in sleeping and mobility, eating, drinking and digestive pattern. Based on those patterns the healthcare applications and the clinical decision support systems recommend lifestyle advice, special treatment and care plans for the patient. The doctors and caregivers can also be engaged in the care plan process to validate lifestyle advice. However, there are many uncertainties and a grey area when it comes to applying machine learning in this context. Clinical, behaviour and lifestyle data in nature are very sensitive. There could be different types of biased involved in the process of data collection and interpretation. The training data model could have an older version of the dataset. All these could lead to an incorrect decision from the system without the user's knowledge. In this paper, some of the standards of the ML models reported in the recent research trends, identify the reliability issues and propose improvements.", "year": 2018, "venue": "2018 International Conference on Machine Learning and Data Engineering (iCMLDE)", "authors": [ "Farhad Ahamed", "Farnaz Farid" ], "externalIds": { "MAG": "2908659529", "DOI": "10.1109/ICMLDE.2018.00014", "CorpusId": 58674126 }, "url": "https://www.semanticscholar.org/paper/23da5e8038b6f7b966cb47f095ff9ddad885db27", "referenceCount": 16, "citationCount": 74, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Introducing WESAD, a Multimodal Dataset for Wearable Stress and Affect Detection", "abstract": "Affect recognition aims to detect a person's affective state based on observables, with the goal to e.g. improve human-computer interaction. Long-term stress is known to have severe implications on wellbeing, which call for continuous and automated stress monitoring systems. However, the affective computing community lacks commonly used standard datasets for wearable stress detection which a) provide multimodal high-quality data, and b) include multiple affective states. Therefore, we introduce WESAD, a new publicly available dataset for wearable stress and affect detection. This multimodal dataset features physiological and motion data, recorded from both a wrist- and a chest-worn device, of 15 subjects during a lab study. The following sensor modalities are included: blood volume pulse, electrocardiogram, electrodermal activity, electromyogram, respiration, body temperature, and three-axis acceleration. Moreover, the dataset bridges the gap between previous lab studies on stress and emotions, by containing three different affective states (neutral, stress, amusement). In addition, self-reports of the subjects, which were obtained using several established questionnaires, are contained in the dataset. Furthermore, a benchmark is created on the dataset, using well-known features and standard machine learning methods. Considering the three-class classification problem ( baseline vs. stress vs. amusement ), we achieved classification accuracies of up to 80%,. In the binary case ( stress vs. non-stress ), accuracies of up to 93%, were reached. Finally, we provide a detailed analysis and comparison of the two device locations ( chest vs. wrist ) as well as the different sensor modalities.", "year": 2018, "venue": "International Conference on Multimodal Interaction", "authors": [ "P. Schmidt", "Attila Reiss", "R. Dürichen", "C. Marberger", "Kristof Van Laerhoven" ], "externalIds": { "DBLP": "conf/icmi/SchmidtRDML18", "MAG": "2894771803", "DOI": "10.1145/3242969.3242985", "CorpusId": 52900092 }, "url": "https://www.semanticscholar.org/paper/f7d4957127bb35b0d3cb1042a676ea60e259463d", "referenceCount": 34, "citationCount": 673, "influentialCitationCount": 150, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "LASSO regression", "abstract": "Regression models are commonly used in statistical analyses1,2. A popular use is to model the predicted risk of a likely outcome. Unfortunately, applying standard regression methods to a set of candidate variables to generate a model tends to lead to overfitting in terms of the number of variables ultimately included in the model, and also overestimation of how well the model performs in terms of using the included variables to explain the observed variability (‘optimism bias’). The model tends to perform particularly poorly with predicting observations more ‘extreme’ (very high or very low) risk. Various (penalized or regularization) regression techniques, can be used to address these problems. LASSO (Least Absolute Shrinkage and Selection Operator) regression, a shrinkage and variable selection method for regression models, is an attractive option as it addresses both problems3. Gains in computational power and incorporation into statistical software also mean that its computer-intensive nature is no longer off-putting. One area it has been used is for handing genetic data as the number of potential predictors is often large relative to the number of observations, and there is often little or no a-priori knowledge to inform variable selection. LASSO regression aims to identify the variables and corresponding regression coefficients that lead to a model that minimizes the prediction error. This is achieved by imposing a constraint on the model parameters, which ‘shrinks’ the regression coefficients towards zero, that is by forcing the sum of the absolute value of the regression coefficients to be less than a fixed value (λ). In a practical sense this constrains the complexity of the model. Variables with a regression coefficient of zero after shrinkage are excluded from the model. The choice of λ is often made by using an automated k-fold cross-validation approach. For this approach, the dataset is randomly partitioned into k sub-samples of equal size. While the k-1 sub-samples are used for developing a prediction model, the remaining sub-sample is used for validating this model. This procedure is carried out k times, with each one of the k sub-samples in turn being used for validation and the other ones for model development. An overall result is produced by combining the k separate validation results for a range of λ values and choosing the preferred λ, which is then used to determine the final model. A particular advantage with this technique is that it reduces overfitting without restricting a subset of the dataset to sole use for internal validation. LASSO regression has been shown to outperform standard methods in some settings. However, it is not a panacea to the problems of overfitting and optimism bias, and does not remove the need to validate a model in an external dataset. Additionally, the LASSO approach trades off potential bias in estimating individual parameters for a better expected overall prediction. A corresponding important disadvantage of the LASSO approach is that the regression coefficients may not be reliably interpretable in terms of independent risk factors as the focus is on the best combined prediction, not on the accuracy of the estimation and interpretation of the contribution of individual variables. Variants on the general LASSO approach exist, such as ridge regression and Elastic Net4, and the relative merits of penalization regression techniques is an area of ongoing research.", "year": 2018, "venue": "British Journal of Surgery", "authors": [ "J. Ranstam", "J. Cook" ], "externalIds": { "DOI": "10.1002/bjs.10895", "CorpusId": 52070855, "PubMed": "30133766" }, "url": "https://www.semanticscholar.org/paper/76a94304f4c3eb0f173b5d22faf388260b66b31a", "referenceCount": 14, "citationCount": 415, "influentialCitationCount": 11, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Between Access and Privacy: Challenges in Sharing Health Data", "abstract": "Summary Objective: To summarize notable research contributions published in 2017 on data sharing and privacy issues in medical informatics. Methods: An extensive search of PubMed/Medline, Web of Science, ACM Digital Library, IEEE Xplore, and AAAI Digital Library was conducted to uncover the scientific contributions published in 2017 that addressed issues of biomedical data sharing, with a focus on data access and privacy. The selection process was based on three steps: (i) a selection of candidate best papers, (ii) the review of the candidate best papers by a team of international experts with respect to six predefined criteria, and (iii) the selection of the best papers by the editorial board of the Yearbook. Results: Five best papers were selected. They cover the lifecycle of biomedical data collection, use, and sharing. The papers introduce 1) consenting strategies for emerging environments, 2) software for searching and retrieving datasets in organizationally distributed environments, 3) approaches to measure the privacy risks of sharing new data increasingly utilized in research and the clinical setting (e.g., genomic), 4) new cryptographic techniques for querying clinical data for cohort discovery, and 5) novel game theoretic strategies for publishing summary information about genome-phenome studies that balance the utility of the data with potential privacy risks to the participants of such studies. Conclusion: The papers illustrated that there is no one-size-fitsall solution to privacy while working with biomedical data. At the same time, the papers show that there are opportunities for leveraging newly emerging technologies to enable data use while minimizing privacy risks.", "year": 2018, "venue": "Yearbook of Medical Informatics", "authors": [ "B. Malin", "K. Goodman" ], "externalIds": { "MAG": "2888958687", "PubMedCentral": "6115244", "DOI": "10.1055/s-0038-1641216", "CorpusId": 52117463, "PubMed": "30157505" }, "url": "https://www.semanticscholar.org/paper/51e1e0053bd63a2c2d43aa70a1600cee00040101", "referenceCount": 18, "citationCount": 28, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Piggyback: Adapting a Single Network to Multiple Tasks by Learning to Mask Weights", "abstract": null, "year": 2018, "venue": "European Conference on Computer Vision", "authors": [ "Arun Mallya", "Dillon Davis", "Svetlana Lazebnik" ], "externalIds": { "MAG": "2950262738", "DBLP": "conf/eccv/MallyaDL18", "DOI": "10.1007/978-3-030-01225-0_5", "CorpusId": 3977226 }, "url": "https://www.semanticscholar.org/paper/d5bb3faa48b83469da1a01ef267886e71f4a931a", "referenceCount": 37, "citationCount": 593, "influentialCitationCount": 62, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "PackNet: Adding Multiple Tasks to a Single Network by Iterative Pruning", "abstract": "This paper presents a method for adding multiple tasks to a single deep neural network while avoiding catastrophic forgetting. Inspired by network pruning techniques, we exploit redundancies in large deep networks to free up parameters that can then be employed to learn new tasks. By performing iterative pruning and network re-training, we are able to sequentially \"pack\" multiple tasks into a single network while ensuring minimal drop in performance and minimal storage overhead. Unlike prior work that uses proxy losses to maintain accuracy on older tasks, we always optimize for the task at hand. We perform extensive experiments on a variety of network architectures and large-scale datasets, and observe much better robustness against catastrophic forgetting than prior work. In particular, we are able to add three fine-grained classification tasks to a single ImageNet-trained VGG-16 network and achieve accuracies close to those of separately trained networks for each task.", "year": 2017, "venue": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition", "authors": [ "Arun Mallya", "Svetlana Lazebnik" ], "externalIds": { "MAG": "2950530229", "ArXiv": "1711.05769", "DBLP": "journals/corr/abs-1711-05769", "DOI": "10.1109/CVPR.2018.00810", "CorpusId": 35249701 }, "url": "https://www.semanticscholar.org/paper/47bc048efb90e7b8bae5c1fcc979a78b65763fe9", "referenceCount": 31, "citationCount": 1099, "influentialCitationCount": 92, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "To prune, or not to prune: exploring the efficacy of pruning for model compression", "abstract": "Model pruning seeks to induce sparsity in a deep neural network's various connection matrices, thereby reducing the number of nonzero-valued parameters in the model. Recent reports (Han et al., 2015; Narang et al., 2017) prune deep networks at the cost of only a marginal loss in accuracy and achieve a sizable reduction in model size. This hints at the possibility that the baseline models in these experiments are perhaps severely over-parameterized at the outset and a viable alternative for model compression might be to simply reduce the number of hidden units while maintaining the model's dense connection structure, exposing a similar trade-off in model size and accuracy. We investigate these two distinct paths for model compression within the context of energy-efficient inference in resource-constrained environments and propose a new gradual pruning technique that is simple and straightforward to apply across a variety of models/datasets with minimal tuning and can be seamlessly incorporated within the training process. We compare the accuracy of large, but pruned models (large-sparse) and their smaller, but dense (small-dense) counterparts with identical memory footprint. Across a broad range of neural network architectures (deep CNNs, stacked LSTM, and seq2seq LSTM models), we find large-sparse models to consistently outperform small-dense models and achieve up to 10x reduction in number of non-zero parameters with minimal loss in accuracy.", "year": 2017, "venue": "International Conference on Learning Representations", "authors": [ "Michael Zhu", "Suyog Gupta" ], "externalIds": { "ArXiv": "1710.01878", "DBLP": "conf/iclr/ZhuG18", "MAG": "2764043458", "CorpusId": 27494814 }, "url": "https://www.semanticscholar.org/paper/3b4d671a8c7018c0b42673ba581e5ff3ae762d6c", "referenceCount": 31, "citationCount": 1151, "influentialCitationCount": 127, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Effect of local magnetic field disturbances on inertial measurement units accuracy.", "abstract": null, "year": 2017, "venue": "Applied Ergonomics", "authors": [ "X. Robert-Lachaine", "H. Mecheri", "C. Larue", "A. Plamondon" ], "externalIds": { "MAG": "2608089682", "DOI": "10.1016/j.apergo.2017.04.011", "CorpusId": 4031701, "PubMed": "28502401" }, "url": "https://www.semanticscholar.org/paper/237be53c6207bfe3bbea858b23c64722eb4aca3e", "referenceCount": 29, "citationCount": 56, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Medicine" ] }, { "title": "Learning Deep and Shallow Features for Human Activity Recognition", "abstract": null, "year": 2017, "venue": "Knowledge Science, Engineering and Management", "authors": [ "Sadiq Sani", "Stewart Massie", "N. Wiratunga", "Kay Cooper" ], "externalIds": { "DBLP": "conf/ksem/SaniMWC17", "MAG": "2737046835", "DOI": "10.1007/978-3-319-63558-3_40", "CorpusId": 21710823 }, "url": "https://www.semanticscholar.org/paper/9279deab24be3f7f0c1e63680b46df1063de03cc", "referenceCount": 21, "citationCount": 39, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ThiNet: A Filter Level Pruning Method for Deep Neural Network Compression", "abstract": "We propose an efficient and unified framework, namely ThiNet, to simultaneously accelerate and compress CNN models in both training and inference stages. We focus on the filter level pruning, i.e., the whole filter would be discarded if it is less important. Our method does not change the original network structure, thus it can be perfectly supported by any off-the-shelf deep learning libraries. We formally establish filter pruning as an optimization problem, and reveal that we need to prune filters based on statistics information computed from its next layer, not the current layer, which differentiates ThiNet from existing methods. Experimental results demonstrate the effectiveness of this strategy, which has advanced the state-of-the-art. We also show the performance of ThiNet on ILSVRC-12 benchmark. ThiNet achieves 3.31 x FLOPs reduction and 16.63× compression on VGG-16, with only 0.52% top-5 accuracy drop. Similar experiments with ResNet-50 reveal that even for a compact network, ThiNet can also reduce more than half of the parameters and FLOPs, at the cost of roughly 1% top-5 accuracy drop. Moreover, the original VGG-16 model can be further pruned into a very small model with only 5.05MB model size, preserving AlexNet level accuracy but showing much stronger generalization ability.", "year": 2017, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Jian-Hao Luo", "Jianxin Wu", "Weiyao Lin" ], "externalIds": { "MAG": "2949698407", "DBLP": "conf/iccv/LuoWL17", "ArXiv": "1707.06342", "DOI": "10.1109/ICCV.2017.541", "CorpusId": 11169209 }, "url": "https://www.semanticscholar.org/paper/049fd80f52c0b1fa4d532945d95a24734b62bdf3", "referenceCount": 34, "citationCount": 1675, "influentialCitationCount": 210, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Time series classification from scratch with deep neural networks: A strong baseline", "abstract": "We propose a simple but strong baseline for time series classification from scratch with deep neural networks. Our proposed baseline models are pure end-to-end without any heavy preprocessing on the raw data or feature crafting. The proposed Fully Convolutional Network (FCN) achieves premium performance to other state-of-the-art approaches and our exploration of the very deep neural networks with the ResNet structure is also competitive. The global average pooling in our convolutional model enables the exploitation of the Class Activation Map (CAM) to find out the contributing region in the raw data for the specific labels. Our models provides a simple choice for the real world application and a good starting point for the future research. An overall analysis is provided to discuss the generalization capability of our models, learned features, network structures and the classification semantics.", "year": 2016, "venue": "IEEE International Joint Conference on Neural Network", "authors": [ "Zhiguang Wang", "Weizhong Yan", "T. Oates" ], "externalIds": { "MAG": "2951213053", "DBLP": "conf/ijcnn/WangYO17", "ArXiv": "1611.06455", "DOI": "10.1109/IJCNN.2017.7966039", "CorpusId": 14303613 }, "url": "https://www.semanticscholar.org/paper/ca0917853ea55c0ec4cded50914eb825fd45d00f", "referenceCount": 23, "citationCount": 1448, "influentialCitationCount": 226, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "DeepSense: A Unified Deep Learning Framework for Time-Series Mobile Sensing Data Processing", "abstract": "Mobile sensing and computing applications usually require time-series inputs from sensors, such as accelerometers, gyroscopes, and magnetometers. Some applications, such as tracking, can use sensed acceleration and rate of rotation to calculate displacement based on physical system models. Other applications, such as activity recognition, extract manually designed features from sensor inputs for classification. Such applications face two challenges. On one hand, on-device sensor measurements are noisy. For many mobile applications, it is hard to find a distribution that exactly describes the noise in practice. Unfortunately, calculating target quantities based on physical system and noise models is only as accurate as the noise assumptions. Similarly, in classification applications, although manually designed features have proven to be effective, it is not always straightforward to find the most robust features to accommodate diverse sensor noise patterns and heterogeneous user behaviors. To this end, we propose DeepSense, a deep learning framework that directly addresses the aforementioned noise and feature customization challenges in a unified manner. DeepSense integrates convolutional and recurrent neural networks to exploit local interactions among similar mobile sensors, merge local interactions of different sensory modalities into global interactions, and extract temporal relationships to model signal dynamics. DeepSense thus provides a general signal estimation and classification framework that accommodates a wide range of applications. We demonstrate the effectiveness of DeepSense using three representative and challenging tasks: car tracking with motion sensors, heterogeneous human activity recognition, and user identification with biometric motion analysis. DeepSense significantly outperforms the state-of-the-art methods for all three tasks. In addition, we show that DeepSense is feasible to implement on smartphones and embedded devices thanks to its moderate energy consumption and low latency.", "year": 2016, "venue": "The Web Conference", "authors": [ "Shuochao Yao", "Shaohan Hu", "Yiran Zhao", "Aston Zhang", "T. Abdelzaher" ], "externalIds": { "DBLP": "journals/corr/YaoHZZA16", "MAG": "2553915786", "ArXiv": "1611.01942", "DOI": "10.1145/3038912.3052577", "CorpusId": 5249903 }, "url": "https://www.semanticscholar.org/paper/1b3675fc0f2b16743b1e1f0c2f84829cfdb3d34f", "referenceCount": 55, "citationCount": 571, "influentialCitationCount": 67, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Recognizing Detailed Human Context in the Wild from Smartphones and Smartwatches", "abstract": "The ability to automatically recognize a person’s behavioral context can contribute to health monitoring, aging care, and many other domains. Validating context recognition in the wild is crucial to promote practical applications that work in real-life settings. The authors collected more than 300,000 minutes of sensor data with context labels from 60 subjects. Unlike previous studies, these subjects used their own personal phone, in any way that was convenient to them, and engaged in their routine in their natural environments. Unscripted behavior and unconstrained phone usage resulted in situations that were harder to recognize. The authors demonstrate how fusion of multimodal sensors is important for resolving such cases. They present a baseline system and encourage researchers to use their public dataset to compare methods and improve context recognition in the wild.", "year": 2016, "venue": "IEEE pervasive computing", "authors": [ "Yonatan Vaizman", "Katherine Ellis", "Gert R. G. Lanckriet" ], "externalIds": { "DBLP": "journals/pervasive/VaizmanEL17", "ArXiv": "1609.06354", "MAG": "2953092962", "DOI": "10.1109/MPRV.2017.3971131", "CorpusId": 8728742 }, "url": "https://www.semanticscholar.org/paper/a85a5307c1214b9accea49e3d27bf3daa24335dc", "referenceCount": 53, "citationCount": 254, "influentialCitationCount": 37, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Toward Personalized Activity Recognition Systems With a Semipopulation Approach", "abstract": "Activity recognition is a key component of context-aware computing to support people's physical activity, but conventional approaches often lack in their generalizability and scalability due to problems of diversity in how individuals perform activities, overfitting when building activity models, and collection of a large amount of labeled data from end users. To address these limitations, we propose a semipopulation-based approach that exploits activity models trained from other users; therefore, a new user does not need to provide a large volume of labeled activity data. Instead of relying on any additional information from users like their weight or height, our approach directly measures the fitness of others' models on a small amount of labeled data collected from the new user. With these shared activity models among users, we compose a hybrid model of Bayesian networks and support vector machines to accurately recognize the activity of the new user. On activity data collected from 28 people with a diversity in gender, age, weight, and height, our approach produced an average accuracy of 83.4% (kappa: 0.852), compared with individual and (standard) population models that had accuracies of 77.3% (kappa: 0.79) and 77.7% (kappa: 0.743), respectively. Through an analysis on the performance of our approach and users' demographic information, our approach outperforms others that rely on users' demographic information for recognizing their activities, which may contradict the commonly held belief that physically similar people would have similar activity patterns.", "year": 2016, "venue": "IEEE Transactions on Human-Machine Systems", "authors": [ "Jin-Hyuk Hong", "Julian Ramos", "A. Dey" ], "externalIds": { "MAG": "2285292066", "DBLP": "journals/thms/HongRD16", "DOI": "10.1109/THMS.2015.2489688", "CorpusId": 25911982 }, "url": "https://www.semanticscholar.org/paper/7fc13f5a06d87909865f0b799806259a1768bd94", "referenceCount": 46, "citationCount": 82, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Smart Devices are Different: Assessing and MitigatingMobile Sensing Heterogeneities for Activity Recognition", "abstract": "The widespread presence of motion sensors on users' personal mobile devices has spawned a growing research interest in human activity recognition (HAR). However, when deployed at a large-scale, e.g., on multiple devices, the performance of a HAR system is often significantly lower than in reported research results. This is due to variations in training and test device hardware and their operating system characteristics among others. In this paper, we systematically investigate sensor-, device- and workload-specific heterogeneities using 36 smartphones and smartwatches, consisting of 13 different device models from four manufacturers. Furthermore, we conduct experiments with nine users and investigate popular feature representation and classification techniques in HAR research. Our results indicate that on-device sensor and sensor handling heterogeneities impair HAR performances significantly. Moreover, the impairments vary significantly across devices and depends on the type of recognition technique used. We systematically evaluate the effect of mobile sensing heterogeneities on HAR and propose a novel clustering-based mitigation technique suitable for large-scale deployment of HAR, where heterogeneity of devices and their usage scenarios are intrinsic.", "year": 2015, "venue": "ACM International Conference on Embedded Networked Sensor Systems", "authors": [ "Allan Stisen", "H. Blunck", "S. Bhattacharya", "Thor S. Prentow", "M. Kjærgaard", "A. Dey", "Tobias Sonne", "M. Jensen" ], "externalIds": { "DBLP": "conf/sensys/StisenBBPKDSJ15", "MAG": "2057907879", "DOI": "10.1145/2809695.2809718", "CorpusId": 5687667 }, "url": "https://www.semanticscholar.org/paper/021ad000c6767a794a0b1378c037a56916972e92", "referenceCount": 48, "citationCount": 626, "influentialCitationCount": 87, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Cyclical Learning Rates for Training Neural Networks", "abstract": "It is known that the learning rate is the most important hyper-parameter to tune for training deep neural networks. This paper describes a new method for setting the learning rate, named cyclical learning rates, which practically eliminates the need to experimentally find the best values and schedule for the global learning rates. Instead of monotonically decreasing the learning rate, this method lets the learning rate cyclically vary between reasonable boundary values. Training with cyclical learning rates instead of fixed values achieves improved classification accuracy without a need to tune and often in fewer iterations. This paper also describes a simple way to estimate \"reasonable bounds\" – linearly increasing the learning rate of the network for a few epochs. In addition, cyclical learning rates are demonstrated on the CIFAR-10 and CIFAR-100 datasets with ResNets, Stochastic Depth networks, and DenseNets, and the ImageNet dataset with the AlexNet and GoogLeNet architectures. These are practical tools for everyone who trains neural networks.", "year": 2015, "venue": "IEEE Workshop/Winter Conference on Applications of Computer Vision", "authors": [ "L. Smith" ], "externalIds": { "MAG": "2964054038", "DBLP": "conf/wacv/Smith17", "ArXiv": "1506.01186", "DOI": "10.1109/WACV.2017.58", "CorpusId": 15247298 }, "url": "https://www.semanticscholar.org/paper/37b5dfe87d82ba8f310155165d5bf841dc92dea2", "referenceCount": 34, "citationCount": 2320, "influentialCitationCount": 249, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Adolescent outcomes of children with early speech sound disorders with and without language impairment.", "abstract": "PURPOSE\nIn this study, the authors determined adolescent speech, language, and literacy outcomes of individuals with histories of early childhood speech sound disorders (SSD) with and without comorbid language impairment (LI) and examined factors associated with these outcomes.\n\n\nMETHOD\nThis study used a prospective longitudinal design. Participants with SSD (n = 170), enrolled at early childhood (4-6 years) were followed at adolescence (11-18 years) and were compared to individuals with no histories of speech or language impairment (no SSD; n = 146) on measures of speech, language, and literacy. Comparisons were made between adolescents with early childhood histories of no SSD, SSD only, and SSD plus LI as well as between adolescents with no SSD, resolved SSD, and persistent SSD.\n\n\nRESULTS\nIndividuals with early childhood SSD with comorbid LI had poorer outcomes than those with histories of SSD only or no SSD. Poorer language and literacy outcomes in adolescence were associated with multiple factors, including persistent speech sound problems, lower nonverbal intelligence, and lower socioeconomic status. Adolescents with persistent SSD had higher rates of comorbid LI and reading disability than the no SSD and resolved SSD groups.\n\n\nCONCLUSION\nRisk factors for language and literacy problems in adolescence include an early history of LI, persistent SSD, lower nonverbal cognitive ability, and social disadvantage.", "year": 2015, "venue": "American Journal of Speech-Language Pathology", "authors": [ "B. Lewis", "L. Freebairn", "Jessica Tag", "A. Ciesla", "S. Iyengar", "Catherine M. Stein", "H. Taylor" ], "externalIds": { "MAG": "2000079222", "DOI": "10.1044/2014_AJSLP-14-0075", "CorpusId": 10729917, "PubMed": "25569242" }, "url": "https://www.semanticscholar.org/paper/c538a1284603f81548a810b0c31eca675ad8c1f8", "referenceCount": 45, "citationCount": 73, "influentialCitationCount": 8, "isOpenAccess": true, "fieldsOfStudy": [ "Psychology", "Medicine" ] }, { "title": "FaceNet: A unified embedding for face recognition and clustering", "abstract": "Despite significant recent advances in the field of face recognition [10, 14, 15, 17], implementing face verification and recognition efficiently at scale presents serious challenges to current approaches. In this paper we present a system, called FaceNet, that directly learns a mapping from face images to a compact Euclidean space where distances directly correspond to a measure offace similarity. Once this space has been produced, tasks such as face recognition, verification and clustering can be easily implemented using standard techniques with FaceNet embeddings asfeature vectors. Our method uses a deep convolutional network trained to directly optimize the embedding itself, rather than an intermediate bottleneck layer as in previous deep learning approaches. To train, we use triplets of roughly aligned matching / non-matching face patches generated using a novel online triplet mining method. The benefit of our approach is much greater representational efficiency: we achieve state-of-the-artface recognition performance using only 128-bytes perface. On the widely used Labeled Faces in the Wild (LFW) dataset, our system achieves a new record accuracy of 99.63%. On YouTube Faces DB it achieves 95.12%. Our system cuts the error rate in comparison to the best published result [15] by 30% on both datasets.", "year": 2015, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Florian Schroff", "Dmitry Kalenichenko", "James Philbin" ], "externalIds": { "DBLP": "journals/corr/SchroffKP15", "MAG": "2096733369", "ArXiv": "1503.03832", "DOI": "10.1109/CVPR.2015.7298682", "CorpusId": 206592766 }, "url": "https://www.semanticscholar.org/paper/5aa26299435bdf7db874ef1640a6c3b5a4a2c394", "referenceCount": 24, "citationCount": 12151, "influentialCitationCount": 1693, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Effect of walking speed and placement position interactions in determining the accuracy of various newer pedometers", "abstract": null, "year": 2014, "venue": "", "authors": [ "W. Park", "Victor Js Lee", "Byungmo Ku", "Hirofumi Tanaka" ], "externalIds": { "MAG": "1997712216", "DOI": "10.1016/J.JESF.2014.01.003", "CorpusId": 71696820 }, "url": "https://www.semanticscholar.org/paper/9971253e25bae02cb39c394da7c992de9eff4a8a", "referenceCount": 20, "citationCount": 45, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "User, device and orientation independent human activity recognition on mobile phones: challenges and a proposal", "abstract": "Smart phones equipped with a rich set of sensors are explored as alternative platforms for human activity recognition in the ubiquitous computing domain. However, there exist challenges that should be tackled before the successful acceptance of such systems by the masses. In this paper, we particularly focus on the challenges arising from the differences in user behavior and in the hardware. To investigate the impact of these factors on the recognition accuracy, we performed tests with 20 different users focusing on the recognition of basic locomotion activities using the accelerometer, gyroscope and magnetic field sensors. We investigated the effect of feature types, to represent the raw data, and the use of linear acceleration for user, device and orientation-independent activity recognition.", "year": 2013, "venue": "Ubiquitous Computing", "authors": [ "Yunus Emre Ustev", "Özlem Durmaz Incel", "Cem Ersoy" ], "externalIds": { "DBLP": "conf/huc/UstevIE13", "MAG": "2138725602", "DOI": "10.1145/2494091.2496039", "CorpusId": 6395121 }, "url": "https://www.semanticscholar.org/paper/423cf6dd094e23642129a41ea56874b5b2da1322", "referenceCount": 11, "citationCount": 152, "influentialCitationCount": 11, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Datasets and models For the evaluations we used four datasets: PERCEPT-R [Benway et al., 2023]", "abstract": null, "year": 2022, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "How to reset your apple watch fitness calibration for more accurate workout and activity data", "abstract": null, "year": 2021, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Neuron-level Structured Pruning using Polarization Regularizer", "abstract": "Neuron-level structured pruning is a very effective technique to reduce the computation of neural networks without compromising prediction accuracy. In previous works, structured pruning is usually achieved by imposing L1 regularization on the scaling factors of neurons, and pruning the neurons whose scaling factors are below a certain threshold. The reasoning is that neurons with smaller scaling factors have weaker influence on network output. A scaling factor close to 0 actually suppresses a neuron. However, L1 regularization lacks discrimination between neurons because it pushes all scaling factors towards 0. A more reasonable pruning method is to only suppress unimportant neurons (with 0 scaling factors), and simultaneously keep important neurons intact (with larger scaling factor). To achieve this goal, we propose a new regularizer on scaling factors, namely polarization regularizer. Theoretically, we prove that polarization regularizer pushes some scaling factors to 0 and others to a value a > 0 . Experimentally, we show that structured pruning using polarization regularizer achieves much better results than using L1 regular-izer. Experiments on CIFAR and ImageNet datasets show that polarization pruning achieves the state-of-the-art result.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Zhuang Tao", "Zhixuan Zhang", "Yuheng Huang", "Xiaoyi Zeng", "Kai Shuang", "Xiang Li" ], "externalIds": { "DBLP": "conf/nips/ZhuangZHZSL20", "MAG": "3104591140", "CorpusId": 227276228 }, "url": "https://www.semanticscholar.org/paper/2a8ad5671bd3cba5d1434ff70672153688f57772", "referenceCount": 38, "citationCount": 115, "influentialCitationCount": 16, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "New insights and perspectives on the nat-ural gradient method", "abstract": null, "year": 2020, "venue": "Journal of Machine Learning Research", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Proposed regulatory framework for mondifications to artificial intelligence / machine learning-based software as a medical device", "abstract": null, "year": 2019, "venue": "US Food and Drug Administration: Silver Spring, MD, USA", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Amazon admits listening to alexa conversations: Why it matters", "abstract": null, "year": 2019, "venue": "shorturl.at/fxN78", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Study: Apple watch paired with deep neural network detects atrial fibrillation with 97 percent accuracy", "abstract": null, "year": 2017, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Hey siri: An on-device dnn-powered voice trigger for apple’s personal assistant", "abstract": null, "year": 2017, "venue": "machinelearning", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Real-time physiological signals: E4 eda/gsr sensor", "abstract": null, "year": 2015, "venue": "empetica", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Learning Multiple Layers of Features from Tiny Images", "abstract": "Groups at MIT and NYU have collected a dataset of millions of tiny colour images from the web. It is, in principle, an excellent dataset for unsupervised training of deep generative models, but previous researchers who have tried this have found it dicult to learn a good set of lters from the images. We show how to train a multi-layer generative model that learns to extract meaningful features which resemble those found in the human visual cortex. Using a novel parallelization algorithm to distribute the work among multiple machines connected on a network, we show how training such a model can be done in reasonable time. A second problematic aspect of the tiny images dataset is that there are no reliable class labels which makes it hard to use for object recognition experiments. We created two sets of reliable labels. The CIFAR-10 set has 6000 examples of each of 10 classes and the CIFAR-100 set has 600 examples of each of 100 non-overlapping classes. Using these labels, we show that object recognition is signicantly improved by pre-training a layer of features on a large set of unlabeled tiny images.", "year": 2009, "venue": "", "authors": [ "A. Krizhevsky" ], "externalIds": { "MAG": "2945315962", "CorpusId": 18268744 }, "url": "https://www.semanticscholar.org/paper/5d90f06bb70a0a3dced62413346235c02b1aa086", "referenceCount": 15, "citationCount": 31347, "influentialCitationCount": 7621, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Artificial intelligence assisted speech therapy for / r/ using speech motor chaining and the percept engine: a single case experimental clinical trial with chainingai", "abstract": null, "year": null, "venue": "surface.syr.edu/etd/", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Mobilephys: Personalized mobile camera-based contactless physiological sensing", "abstract": null, "year": null, "venue": "Proc. ACM Interact. Mob. Wearable Ubiquitous Technol.", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Stress Sensing Dataset: For this binary classification problem, F1 score has been used as a performance metric as suggested by the original authors Xiao et al. [2024]", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Supervised and unsupervised machine learning approaches—a survey", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "deep-learning-empowered", "abstract": null, "year": null, "venue": "Sensefi: A library and benchmark on deep-learning-empowered", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "3 tips to make google assistant your own", "abstract": null, "year": null, "venue": "blog.google/products/assistant/how-to-personalize-google-assistant/", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Workout types on apple watch", "abstract": null, "year": null, "venue": "support", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "We restrict our study to the models benchmarked and deployed for datasets used in this work without accounting for model variability", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Psy-chotic relapse prediction in schizophrenia patients us-ing a personalized mobile sensing-based supervised deep learning model", "abstract": null, "year": null, "venue": "IEEE Journal of Biomedical and Health Informatics", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Dap-per: Label-free performance estimation after personalization for heterogeneous mobile sensing", "abstract": null, "year": null, "venue": "Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiq-uitous Technologies", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "The metrics ∆ P and ∆ G are computed for each individual separately as personalized models customized for one user are not applicable to other users in real-world scenarios.", "abstract": null, "year": null, "venue": "inter-user or inter-dataset generalizability", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "The approach relies on using a pre-trained off-the-shelf model as an input, the quality of this model can impact the performance of the final personalized models", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Reading between the heat: Co-teaching body thermal signatures for non-intrusive stress detection", "abstract": null, "year": null, "venue": "Proc. ACM Interact. Mob. Wearable Ubiquitous Technol.", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Sample compression unleashed : New generalization bounds for real valued losses": { "paper_title": "Sample compression unleashed : New generalization bounds for real valued losses", "arxiv_id": "2409.17932v1", "keyword": "deep learning", "authors": [ "Mathieu Bazinet", "Valentina Zantedeschi", "Pascal Germain" ], "references": [ { "title": "List Sample Compression and Uniform Convergence", "abstract": "List learning is a variant of supervised classification where the learner outputs multiple plausible labels for each instance rather than just one. We investigate classical principles related to generalization within the context of list learning. Our primary goal is to determine whether classical principles in the PAC setting retain their applicability in the domain of list PAC learning. We focus on uniform convergence (which is the basis of Empirical Risk Minimization) and on sample compression (which is a powerful manifestation of Occam's Razor). In classical PAC learning, both uniform convergence and sample compression satisfy a form of `completeness': whenever a class is learnable, it can also be learned by a learning rule that adheres to these principles. We ask whether the same completeness holds true in the list learning setting. We show that uniform convergence remains equivalent to learnability in the list PAC learning setting. In contrast, our findings reveal surprising results regarding sample compression: we prove that when the label space is $Y=\\{0,1,2\\}$, then there are 2-list-learnable classes that cannot be compressed. This refutes the list version of the sample compression conjecture by Littlestone and Warmuth (1986). We prove an even stronger impossibility result, showing that there are $2$-list-learnable classes that cannot be compressed even when the reconstructed function can work with lists of arbitrarily large size. We prove a similar result for (1-list) PAC learnable classes when the label space is unbounded. This generalizes a recent result by arXiv:2308.06424.", "year": 2024, "venue": "Annual Conference Computational Learning Theory", "authors": [ "Steve Hanneke", "Shay Moran", "Tom Waknine" ], "externalIds": { "DBLP": "journals/corr/abs-2403-10889", "ArXiv": "2403.10889", "DOI": "10.48550/arXiv.2403.10889", "CorpusId": 268512719 }, "url": "https://www.semanticscholar.org/paper/7f2c28a4548f14460ad09d843c42614af9519bd8", "referenceCount": 21, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "PAC-Bayes-Chernoff bounds for unbounded losses", "abstract": "We introduce a new PAC-Bayes oracle bound for unbounded losses. This result can be understood as a PAC-Bayesian version of the Cram\\'er-Chernoff bound. The proof technique relies on controlling the tails of certain random variables involving the Cram\\'er transform of the loss. We highlight several applications of the main theorem. First, we show that our result naturally allows exact optimization of the free parameter on many PAC-Bayes bounds. Second, we recover and generalize previous results. Finally, we show that our approach allows working with richer assumptions that result in more informative and potentially tighter bounds. In this direction, we provide a general bound under a new ``model-dependent bounded CGF\"assumption from which we obtain bounds based on parameter norms and log-Sobolev inequalities. All these bounds can be minimized to obtain novel posteriors.", "year": 2024, "venue": "arXiv.org", "authors": [ "Ioar Casado", "Luis A. Ortega", "A. Masegosa", "Aritz Pérez Martínez" ], "externalIds": { "DBLP": "journals/corr/abs-2401-01148", "ArXiv": "2401.01148", "DOI": "10.48550/arXiv.2401.01148", "CorpusId": 266725375 }, "url": "https://www.semanticscholar.org/paper/47acb064f9457e9ade5c9b5d55f5dfea4fef77ce", "referenceCount": 58, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Comparing Comparators in Generalization Bounds", "abstract": "We derive generic information-theoretic and PAC-Bayesian generalization bounds involving an arbitrary convex comparator function, which measures the discrepancy between the training and population loss. The bounds hold under the assumption that the cumulant-generating function (CGF) of the comparator is upper-bounded by the corresponding CGF within a family of bounding distributions. We show that the tightest possible bound is obtained with the comparator being the convex conjugate of the CGF of the bounding distribution, also known as the Cram\\'er function. This conclusion applies more broadly to generalization bounds with a similar structure. This confirms the near-optimality of known bounds for bounded and sub-Gaussian losses and leads to novel bounds under other bounding distributions.", "year": 2023, "venue": "International Conference on Artificial Intelligence and Statistics", "authors": [ "Fredrik Hellström", "Benjamin Guedj" ], "externalIds": { "DBLP": "conf/aistats/HellstromG24", "ArXiv": "2310.10534", "DOI": "10.48550/arXiv.2310.10534", "CorpusId": 264146838 }, "url": "https://www.semanticscholar.org/paper/8ec2d05bdeb8820d656ffdc898aafb3bc8f92dab", "referenceCount": 79, "citationCount": 2, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Private Distribution Learning with Public Data: The View from Sample Compression", "abstract": "We study the problem of private distribution learning with access to public data. In this setup, which we refer to as public-private learning, the learner is given public and private samples drawn from an unknown distribution $p$ belonging to a class $\\mathcal Q$, with the goal of outputting an estimate of $p$ while adhering to privacy constraints (here, pure differential privacy) only with respect to the private samples. We show that the public-private learnability of a class $\\mathcal Q$ is connected to the existence of a sample compression scheme for $\\mathcal Q$, as well as to an intermediate notion we refer to as list learning. Leveraging this connection: (1) approximately recovers previous results on Gaussians over $\\mathbb R^d$; and (2) leads to new ones, including sample complexity upper bounds for arbitrary $k$-mixtures of Gaussians over $\\mathbb R^d$, results for agnostic and distribution-shift resistant learners, as well as closure properties for public-private learnability under taking mixtures and products of distributions. Finally, via the connection to list learning, we show that for Gaussians in $\\mathbb R^d$, at least $d$ public samples are necessary for private learnability, which is close to the known upper bound of $d+1$ public samples.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Shai Ben-David", "A. Bie", "C. Canonne", "Gautam Kamath", "Vikrant Singhal" ], "externalIds": { "ArXiv": "2308.06239", "DBLP": "conf/nips/Ben-DavidBC0S23", "DOI": "10.48550/arXiv.2308.06239", "CorpusId": 260865883 }, "url": "https://www.semanticscholar.org/paper/f821b9a0557bfce8b5cab784e257ae003dee2f9e", "referenceCount": 82, "citationCount": 9, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "More PAC-Bayes bounds: From bounded losses, to losses with general tail behaviors, to anytime-validity", "abstract": "In this paper, we present new high-probability PAC-Bayes bounds for different types of losses. Firstly, for losses with a bounded range, we present a strengthened version of Catoni's bound that holds uniformly for all parameter values. This leads to new fast rate and mixed rate bounds that are interpretable and tighter than previous bounds in the literature. Secondly, for losses with more general tail behaviors, we introduce two new parameter-free bounds: a PAC-Bayes Chernoff analogue when the loss' cumulative generating function is bounded, and a bound when the loss' second moment is bounded. These two bounds are obtained using a new technique based on a discretization of the space of possible events for the\"in probability\"parameter optimization problem. Finally, we extend all previous results to anytime-valid bounds using a simple technique applicable to any existing bound.", "year": 2023, "venue": "Journal of machine learning research", "authors": [ "Borja Rodr'iguez G'alvez", "R. Thobaben", "M. Skoglund" ], "externalIds": { "ArXiv": "2306.12214", "DBLP": "journals/corr/abs-2306-12214", "DOI": "10.48550/arXiv.2306.12214", "CorpusId": 259211830 }, "url": "https://www.semanticscholar.org/paper/05fba907547020ef41b3bc8c89c46e34b1dba3fb", "referenceCount": 94, "citationCount": 7, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Compression, Generalization and Learning", "abstract": "A compression function is a map that slims down an observational set into a subset of reduced size, while preserving its informational content. In multiple applications, the condition that one new observation makes the compressed set change is interpreted that this observation brings in extra information and, in learning theory, this corresponds to misclassification, or misprediction. In this paper, we lay the foundations of a new theory that allows one to keep control on the probability of change of compression (which maps into the statistical\"risk\"in learning applications). Under suitable conditions, the cardinality of the compressed set is shown to be a consistent estimator of the probability of change of compression (without any upper limit on the size of the compressed set); moreover, unprecedentedly tight finite-sample bounds to evaluate the probability of change of compression are obtained under a generally applicable condition of preference. All results are usable in a fully agnostic setup, i.e., without requiring any a priori knowledge on the probability distribution of the observations. Not only these results offer a valid support to develop trust in observation-driven methodologies, they also play a fundamental role in learning techniques as a tool for hyper-parameter tuning.", "year": 2023, "venue": "Journal of machine learning research", "authors": [ "M. Campi", "S. Garatti" ], "externalIds": { "DBLP": "journals/jmlr/CampiG23", "ArXiv": "2301.12767", "DOI": "10.48550/arXiv.2301.12767", "CorpusId": 256390591 }, "url": "https://www.semanticscholar.org/paper/ec1df8f494fe2f374b58b55ed3de89fac8ac5f97", "referenceCount": 50, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "How Tight Can PAC-Bayes be in the Small Data Regime?", "abstract": "In this paper, we investigate the question: Given a small number of datapoints, for example N = 30 , how tight can PAC-Bayes and test set bounds be made? For such small datasets, test set bounds adversely affect generalisation performance by withholding data from the training procedure. In this setting, PAC-Bayes bounds are especially attractive, due to their ability to use all the data to simultaneously learn a posterior and bound its generalisation risk. We focus on the case of i.i.d. data with a bounded loss and consider the generic PAC-Bayes theorem of Germain et al. While their theorem is known to recover many existing PAC-Bayes bounds, it is unclear what the tightest bound derivable from their framework is. For a fixed learning algorithm and dataset, we show that the tightest possible bound coincides with a bound considered by Catoni; and, in the more natural case of distributions over datasets, we establish a lower bound on the best bound achievable in expectation. Interestingly, this lower bound recovers the Chernoff test set bound if the posterior is equal to the prior. Moreover, to illustrate how tight these bounds can be, we study synthetic one-dimensional classification tasks in which it is feasible to meta-learn both the prior and the form of the bound to numerically optimise for the tightest bounds possible. We find that in this simple, controlled scenario, PAC-Bayes bounds are competitive with comparable, commonly used Chernoff test set bounds. However, the sharpest test set bounds still lead to better guarantees on the generalisation error than the PAC-Bayes bounds we consider. data-generating distributions, and aim to find the best expected bounds for this distribution achievable by an optimised algorithm. 10 We choose especially simple learning tasks — synthetic 1-dimensional binary classification problems, generated by thresholding Gaussian process (GP) samples — which allows us to fully control the task distribution and easily inspect predictive distributions visually to diagnose learning. Appendix I.1 contains full details.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Andrew Y. K. Foong", "W. Bruinsma", "David R. Burt", "Richard E. Turner" ], "externalIds": { "DBLP": "conf/nips/FoongBBT21", "ArXiv": "2106.03542", "CorpusId": 235358927 }, "url": "https://www.semanticscholar.org/paper/e737e0392e695941fb9031e980f261683ff0e5fd", "referenceCount": 53, "citationCount": 18, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Stable Sample Compression Schemes: New Applications and an Optimal SVM Margin Bound", "abstract": "We analyze a family of supervised learning algorithms based on sample compression schemes that are stable, in the sense that removing points from the training set which were not selected for the compression set does not alter the resulting classifier. We use this technique to derive a variety of novel or improved data-dependent generalization bounds for several learning algorithms. In particular, we prove a new margin bound for SVM, removing a log factor. The new bound is provably optimal. This resolves a long-standing open question about the PAC margin bounds achievable by SVM.", "year": 2020, "venue": "International Conference on Algorithmic Learning Theory", "authors": [ "Steve Hanneke", "A. Kontorovich" ], "externalIds": { "ArXiv": "2011.04586", "DBLP": "journals/corr/abs-2011-04586", "MAG": "3098226149", "CorpusId": 226281407 }, "url": "https://www.semanticscholar.org/paper/44cfaa2b68a22a86f0a1796e6f474a4a08523500", "referenceCount": 47, "citationCount": 21, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Tighter risk certificates for neural networks", "abstract": "This paper presents an empirical study regarding training probabilistic neural networks using training objectives derived from PAC-Bayes bounds. In the context of probabilistic neural networks, the output of training is a probability distribution over network weights. We present two training objectives, used here for the first time in connection with training neural networks. These two training objectives are derived from tight PAC-Bayes bounds. We also re-implement a previously used training objective based on a classical PAC-Bayes bound, to compare the properties of the predictors learned using the different training objectives. We compute risk certificates that are valid on any unseen examples for the learnt predictors. We further experiment with different types of priors on the weights (both data-free and data-dependent priors) and neural network architectures. Our experiments on MNIST and CIFAR-10 show that our training methods produce competitive test set errors and non-vacuous risk bounds with much tighter values than previous results in the literature, showing promise not only to guide the learning algorithm through bounding the risk but also for model selection. These observations suggest that the methods studied here might be good candidates for self-certified learning, in the sense of certifying the risk on any unseen data without the need for data-splitting protocols.", "year": 2020, "venue": "Journal of machine learning research", "authors": [ "M. P'erez-Ortiz", "Omar Rivasplata", "J. Shawe-Taylor", "Csaba Szepesvari" ], "externalIds": { "MAG": "3044132587", "DBLP": "journals/jmlr/Perez-OrtizRSS21", "ArXiv": "2007.12911", "CorpusId": 220793611 }, "url": "https://www.semanticscholar.org/paper/4a2a3ecaab200750a3717283ded3ba64381390ee", "referenceCount": 93, "citationCount": 89, "influentialCitationCount": 21, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "PAC-Bayes Unleashed: Generalisation Bounds with Unbounded Losses", "abstract": "We present new PAC-Bayesian generalisation bounds for learning problems with unbounded loss functions. This extends the relevance and applicability of the PAC-Bayes learning framework, where most of the existing literature focuses on supervised learning problems with a bounded loss function (typically assumed to take values in the interval [0;1]). In order to relax this classical assumption, we propose to allow the range of the loss to depend on each predictor. This relaxation is captured by our new notion of HYPothesis-dependent rangE (HYPE). Based on this, we derive a novel PAC-Bayesian generalisation bound for unbounded loss functions, and we instantiate it on a linear regression problem. To make our theory usable by the largest audience possible, we include discussions on actual computation, practicality and limitations of our assumptions.", "year": 2020, "venue": "Entropy", "authors": [ "Maxime Haddouche", "Benjamin Guedj", "Omar Rivasplata", "J. Shawe-Taylor" ], "externalIds": { "ArXiv": "2006.07279", "DBLP": "journals/entropy/HaddoucheGRS21", "PubMedCentral": "8534909", "MAG": "3034738357", "DOI": "10.3390/e23101330", "CorpusId": 219636248, "PubMed": "34682054" }, "url": "https://www.semanticscholar.org/paper/70ee7e422703e433e8e0714c275f015044897c7c", "referenceCount": 32, "citationCount": 48, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine", "Mathematics" ] }, { "title": "Sample Compression, Support Vectors, and Generalization in Deep Learning", "abstract": "Even though Deep Neural Networks (DNNs) are widely celebrated for their practical performance, they possess many intriguing properties related to depth that are difficult to explain both theoretically and intuitively. Understanding how weights in deep networks coordinate together across layers to form useful learners has proven challenging, in part because the repeated composition of nonlinearities has proved intractable. This paper presents a reparameterization of DNNs as a linear function of a feature map that is locally independent of the weights. This feature map transforms depth-dependencies into simple tensor products and maps each input to a discrete subset of the feature space. Then, using a max-margin assumption, the paper develops a sample compression representation of the neural network in terms of the discrete activation state of neurons induced by $s$ “support vectors”. The paper shows that the number of support vectors $s$ relates with learning guarantees for neural networks through sample compression bounds, yielding a sample complexity of $\\mathcal {O}(ns/\\epsilon)$ for networks with $n$ neurons. Finally, the number of support vectors $s$ is found to monotonically increase with width and label noise but decrease with depth.", "year": 2018, "venue": "IEEE Journal on Selected Areas in Information Theory", "authors": [ "Christopher Snyder", "S. Vishwanath" ], "externalIds": { "MAG": "2981995215", "DBLP": "journals/jsait/SnyderV20", "ArXiv": "1811.02067", "DOI": "10.1109/JSAIT.2020.2981864", "CorpusId": 201737669 }, "url": "https://www.semanticscholar.org/paper/c77e248581a1f760225d48c568a89f2fc9b5fc43", "referenceCount": 31, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Agnostic Sample Compression Schemes for Regression", "abstract": "We obtain the first positive results for bounded sample compression in the agnostic regression setting with the $\\ell_p$ loss, where $p\\in [1,\\infty]$. We construct a generic approximate sample compression scheme for real-valued function classes exhibiting exponential size in the fat-shattering dimension but independent of the sample size. Notably, for linear regression, an approximate compression of size linear in the dimension is constructed. Moreover, for $\\ell_1$ and $\\ell_\\infty$ losses, we can even exhibit an efficient exact sample compression scheme of size linear in the dimension. We further show that for every other $\\ell_p$ loss, $p\\in (1,\\infty)$, there does not exist an exact agnostic compression scheme of bounded size. This refines and generalizes a negative result of David, Moran, and Yehudayoff for the $\\ell_2$ loss. We close by posing general open questions: for agnostic regression with $\\ell_1$ loss, does every function class admits an exact compression scheme of size equal to its pseudo-dimension? For the $\\ell_2$ loss, does every function class admit an approximate compression scheme of polynomial size in the fat-shattering dimension? These questions generalize Warmuth's classic sample compression conjecture for realizable-case classification.", "year": 2018, "venue": "International Conference on Machine Learning", "authors": [ "Steve Hanneke", "A. Kontorovich", "Menachem Sadigurschi" ], "externalIds": { "ArXiv": "1810.01864", "DBLP": "conf/icml/AttiasHKS24", "CorpusId": 52920963 }, "url": "https://www.semanticscholar.org/paper/3c8a481798b262e2a485fd6e195c9b070f32e307", "referenceCount": 47, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "On the Perceptron’s Compression", "abstract": null, "year": 2018, "venue": "Conference on Computability in Europe", "authors": [ "S. Moran", "Ido Nachum", "Itai Panasoff", "A. Yehudayoff" ], "externalIds": { "MAG": "3037931094", "ArXiv": "1806.05403", "PubMedCentral": "7309506", "DBLP": "journals/corr/abs-1806-05403", "DOI": "10.1007/978-3-030-51466-2_29", "CorpusId": 49210971 }, "url": "https://www.semanticscholar.org/paper/7d12e2ceab10c547ea567dd518f26a92f764e069", "referenceCount": 40, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Sample Compression for Real-Valued Learners", "abstract": "We give an algorithmically efficient version of the learner-to-compression scheme conversion in Moran and Yehudayoff (2016). In extending this technique to real-valued hypotheses, we also obtain an efficient regression-to-bounded sample compression converter. To our knowledge, this is the first general compressed regression result (regardless of efficiency or boundedness) guaranteeing uniform approximate reconstruction. Along the way, we develop a generic procedure for constructing weak real-valued learners out of abstract regressors; this may be of independent interest. In particular, this result sheds new light on an open question of H. Simon (1997). We show applications to two regression problems: learning Lipschitz and bounded-variation functions.", "year": 2018, "venue": "International Conference on Algorithmic Learning Theory", "authors": [ "Steve Hanneke", "A. Kontorovich", "Menachem Sadigurschi" ], "externalIds": { "DBLP": "conf/alt/HannekeKS19", "ArXiv": "1805.08254", "MAG": "2950777868", "CorpusId": 46897236 }, "url": "https://www.semanticscholar.org/paper/97ae934f7b4e3989023053942442251f43767e4b", "referenceCount": 54, "citationCount": 17, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Entropy-SGD optimizes the prior of a PAC-Bayes bound: Data-dependent PAC-Bayes priors via differential privacy", "abstract": "We show that Entropy-SGD (Chaudhari et al., 2017), when viewed as a learning algorithm, optimizes a PAC-Bayes bound on the risk of a Gibbs (posterior) classifier, i.e., a randomized classifier obtained by a risk-sensitive perturbation of the weights of a learned classifier. Entropy-SGD works by optimizing the bound’s prior, violating the hypothesis of the PAC-Bayes theorem that the prior is chosen independently of the data. Indeed, available implementations of Entropy-SGD rapidly obtain zero training error on random labels and the same holds of the Gibbs posterior. In order to obtain a valid generalization bound, we show that an e-differentially private prior yields a valid PAC-Bayes bound, a straightforward consequence of results connecting generalization with differential privacy. Using stochastic gradient Langevin dynamics (SGLD) to approximate the well-known exponential release mechanism, we observe that generalization error on MNIST (measured on held out data) falls within the (empirically nonvacuous) bounds computed under the assumption that SGLD produces perfect samples. In particular, Entropy-SGLD can be configured to yield relatively tight generalization bounds and still fit real labels, although these same settings do not obtain state-of-the-art performance.", "year": 2017, "venue": "arXiv.org", "authors": [ "G. Dziugaite", "Daniel M. Roy" ], "externalIds": { "DBLP": "journals/corr/abs-1712-09376", "ArXiv": "1712.09376", "MAG": "2777005400", "CorpusId": 3595524 }, "url": "https://www.semanticscholar.org/paper/72d2ba6ea99b74969da407419509e5616ac95d2a", "referenceCount": 51, "citationCount": 132, "influentialCitationCount": 22, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Sample compression schemes for VC classes", "abstract": "Sample compression schemes were defined by Littlestone and Warmuth (1986) as an abstraction of the structure underlying many learning algorithms. Roughly speaking, a sample compression scheme of size k means that given an arbitrary list of labeled examples, one can retain only k of them in a way that allows to recover the labels of all other examples in the list. They showed that compression implies PAC learnability for binary-labeled classes, and asked whether the other direction holds. We answer their question and show that every concept class C with VC dimension d has a sample compression scheme of size exponential in d. The proof uses an approximate minimax phenomenon for binary matrices of low VC dimension, which may be of interest in the context of game theory.", "year": 2015, "venue": "Information Theory and Applications Workshop", "authors": [ "S. Moran", "A. Yehudayoff" ], "externalIds": { "ArXiv": "1503.06960", "DBLP": "journals/jacm/MoranY16", "MAG": "2598044140", "DOI": "10.1145/2890490", "CorpusId": 641489 }, "url": "https://www.semanticscholar.org/paper/93477a252a8697c5a72bf66adad9c571022a7761", "referenceCount": 45, "citationCount": 87, "influentialCitationCount": 10, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Deep Sparse Rectifier Neural Networks", "abstract": "While logistic sigmoid neurons are more biologically plausible than hyperbolic tangent neurons, the latter work better for training multi-layer neural networks. This paper shows that rectifying neurons are an even better model of biological neurons and yield equal or better performance than hyperbolic tangent networks in spite of the hard non-linearity and non-dierentiabil ity", "year": 2011, "venue": "International Conference on Artificial Intelligence and Statistics", "authors": [ "Xavier Glorot", "Antoine Bordes", "Yoshua Bengio" ], "externalIds": { "MAG": "2156387975", "DBLP": "journals/jmlr/GlorotBB11", "CorpusId": 2239473 }, "url": "https://www.semanticscholar.org/paper/67107f78a84bdb2411053cb54e94fa226eea6d8e", "referenceCount": 37, "citationCount": 7831, "influentialCitationCount": 420, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Geometric Approach to Sample Compression", "abstract": "The Sample Compression Conjecture of Littlestone & Warmuth has remained unsolved for a quarter century. While maximum classes (concept classes meeting Sauer's Lemma with equality) can be compressed, the compression of general concept classes reduces to compressing maximal classes (classes that cannot be expanded without increasing VC dimension). Two promising ways forward are: embedding maximal classes into maximum classes with at most a polynomial increase to VC dimension, and compression via operating on geometric representations. This paper presents positive results on the latter approach and a first negative result on the former, through a systematic investigation of finite maximum classes. Simple arrangements of hyperplanes in hyperbolic space are shown to represent maximum classes, generalizing the corresponding Euclidean result. We show that sweeping a generic hyperplane across such arrangements forms an unlabeled compression scheme of size VC dimension and corresponds to a special case of peeling the one-inclusion graph, resolving a recent conjecture of Kuzmin & Warmuth. A bijection between finite maximum classes and certain arrangements of piecewise-linear (PL) hyperplanes in either a ball or Euclidean space is established. Finally we show that d-maximum classes corresponding to PL-hyperplane arrangements in Rd have cubical complexes homeomorphic to a d-ball, or equivalently complexes that are manifolds with boundary. A main result is that PL arrangements can be swept by a moving hyperplane to unlabeled d-compress any finite maximum class, forming a peeling scheme as conjectured by Kuzmin & Warmuth. A corollary is that some d-maximal classes cannot be embedded into any maximum class of VC-dimension d+k, for any constant k. The construction of the PL sweeping involves Pachner moves on the one-inclusion graph, corresponding to moves of a hyperplane across the intersection of d other hyperplanes. This extends the well known Pachner moves for triangulations to cubical complexes.", "year": 2009, "venue": "Journal of machine learning research", "authors": [ "Benjamin I. P. Rubinstein", "J. Rubinstein" ], "externalIds": { "DBLP": "journals/corr/abs-0911-3633", "MAG": "2154955030", "ArXiv": "0911.3633", "DOI": "10.5555/2503308.2343686", "CorpusId": 2989975 }, "url": "https://www.semanticscholar.org/paper/117499c29435fd9804e742d08976ecb420774003", "referenceCount": 32, "citationCount": 37, "influentialCitationCount": 7, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "PAC-Bayesian learning of linear classifiers", "abstract": "We present a general PAC-Bayes theorem from which all known PAC-Bayes risk bounds are obtained as particular cases. We also propose different learning algorithms for finding linear classifiers that minimize these bounds. These learning algorithms are generally competitive with both AdaBoost and the SVM.", "year": 2009, "venue": "International Conference on Machine Learning", "authors": [ "Pascal Germain", "A. Lacasse", "François Laviolette", "M. Marchand" ], "externalIds": { "MAG": "2149991487", "DBLP": "conf/icml/GermainLLM09", "DOI": "10.1145/1553374.1553419", "CorpusId": 1221684 }, "url": "https://www.semanticscholar.org/paper/66314ee0ab45982a3d811b134cb57a20f2b39444", "referenceCount": 8, "citationCount": 227, "influentialCitationCount": 37, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Sample compression bounds for decision trees", "abstract": "We propose a formulation of the Decision Tree learning algorithm in the Compression settings and derive tight generalization error bounds. In particular, we propose Sample Compression and Occam's Razor bounds. We show how such bounds, unlike the VC dimension or Rademacher complexities based bounds, are more general and can also perform a margin-sparsity trade-off to obtain better classifers. Potentially, these risk bounds can also guide the model selection process and replace traditional pruning strategies.", "year": 2007, "venue": "International Conference on Machine Learning", "authors": [ "Mohak Shah" ], "externalIds": { "MAG": "2100668620", "DBLP": "conf/icml/Shah07", "DOI": "10.1145/1273496.1273597", "CorpusId": 1372939 }, "url": "https://www.semanticscholar.org/paper/7d46d0144bbe9708a89844a8e7a9f4de212088c2", "referenceCount": 15, "citationCount": 10, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Learning with Decision Lists of Data-Dependent Features", "abstract": "We present a learning algorithm for decision lists which allows features that are constructed from the data and allows a trade-off between accuracy and complexity. We provide bounds on the generalization error of this learning algorithm in terms of the number of errors and the size of the classifier it finds on the training data. We also compare its performance on some natural data sets with the set covering machine and the support vector machine. Furthermore, we show that the proposed bounds on the generalization error provide effective guides for model selection.", "year": 2005, "venue": "Journal of machine learning research", "authors": [ "M. Marchand", "Marina Sokolova" ], "externalIds": { "DBLP": "journals/jmlr/MarchandS05", "MAG": "2164053026", "CorpusId": 5723406 }, "url": "https://www.semanticscholar.org/paper/e8f0196b2b427809af36252fa183637df2903894", "referenceCount": 23, "citationCount": 56, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Tutorial on Practical Prediction Theory for Classification", "abstract": "We discuss basic prediction theory and its impact on classification success evaluation, implications for learning algorithm design, and uses in learning algorithm execution. This tutorial is meant to be a comprehensive compilation of results which are both theoretically rigorous and quantitatively useful.There are two important implications of the results presented here. The first is that common practices for reporting results in classification should change to use the test set bound. The second is that train set bounds can sometimes be used to directly motivate learning algorithms.", "year": 2005, "venue": "Journal of machine learning research", "authors": [ "J. Langford" ], "externalIds": { "MAG": "2170207925", "DBLP": "journals/jmlr/Langford05", "CorpusId": 9564029 }, "url": "https://www.semanticscholar.org/paper/ccefaf0feb0120f764d20b4bb974f2994d3be7ed", "referenceCount": 32, "citationCount": 341, "influentialCitationCount": 60, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Margin-Sparsity Trade-Off for the Set Covering Machine", "abstract": null, "year": 2005, "venue": "European Conference on Machine Learning", "authors": [ "François Laviolette", "M. Marchand", "Mohak Shah" ], "externalIds": { "MAG": "1844922534", "DBLP": "conf/ecml/LavioletteMS05", "DOI": "10.1007/11564096_23", "CorpusId": 11862544 }, "url": "https://www.semanticscholar.org/paper/7c6104c9d4f7f5ea0de8113deecf0fff0c76aedb", "referenceCount": 17, "citationCount": 13, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Note on the PAC Bayesian Theorem", "abstract": "We prove general exponential moment inequalities for averages of [0,1]-valued iid random variables and use them to tighten the PAC Bayesian Theorem. The logarithmic dependence on the sample count in the enumerator of the PAC Bayesian bound is halved.", "year": 2004, "venue": "arXiv.org", "authors": [ "Andreas Maurer" ], "externalIds": { "ArXiv": "cs/0411099", "DBLP": "journals/corr/cs-LG-0411099", "MAG": "1628114069", "CorpusId": 7647951 }, "url": "https://www.semanticscholar.org/paper/56e7bff1e8ee48c8d8d43496853485809450931d", "referenceCount": 16, "citationCount": 179, "influentialCitationCount": 36, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "The Set Covering Machine with Data-Dependent Half-Spaces", "abstract": "We examine the set covering machine when it uses data-dependent half-spaces for its set of features and bound its generalization error in terms of the number of training errors and the number of half-spaces it achieves on the training data. We show that it provides a favorable alternative to data-dependent balls on some natural data sets. Compared to the support vector machine, the set covering machine with data-dependent half-spaces produces substantially sparser classifiers with comparable (and sometimes better) generalization. Furthermore, we show that our bound on the generalization error provides an effective guide for model selection.", "year": 2003, "venue": "International Conference on Machine Learning", "authors": [ "M. Marchand", "Mohak Shah", "J. Shawe-Taylor", "Marina Sokolova" ], "externalIds": { "DBLP": "conf/icml/MarchandSSS03", "MAG": "1645673120", "CorpusId": 8882138 }, "url": "https://www.semanticscholar.org/paper/67ad05e9db7ed05fbc5b2d56efe7f7c07459fc43", "referenceCount": 12, "citationCount": 11, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The Set Covering Machine", "abstract": "We extend the classical algorithms of Valiant and Haussler for learning compact conjunctions and disjunctions of Boolean attributes to allow features that are constructed from the data and to allow a trade-off between accuracy and complexity. The result is a general-purpose learning machine, suitable for practical learning tasks, that we call the set covering machine. We present a version of the set covering machine that uses data-dependent balls for its set of features and compare its performance with the support vector machine. By extending a technique pioneered by Littlestone and Warmuth, we bound its generalization error as a function of the amount of data compression it achieves during training. In experiments with real-world learning tasks, the bound is shown to be extremely tight and to provide an effective guide for model selection.", "year": 2003, "venue": "Journal of machine learning research", "authors": [ "M. Marchand", "J. Shawe-Taylor" ], "externalIds": { "DBLP": "journals/jmlr/MarchandS02", "MAG": "2170708028", "CorpusId": 1511272 }, "url": "https://www.semanticscholar.org/paper/0fe97c88452d8d8603d9ba883a0721da46ba84f4", "referenceCount": 18, "citationCount": 143, "influentialCitationCount": 11, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "PAC-Bayesian Generalisation Error Bounds for Gaussian Process Classification", "abstract": "Approximate Bayesian Gaussian process (GP) classification techniques are powerful non-parametric learning methods, similar in appearance and performance to support vector machines. Based on simple probabilistic models, they render interpretable results and can be embedded in Bayesian frameworks for model selection, feature selection, etc. In this paper, by applying the PAC-Bayesian theorem of McAllester (1999a), we prove distribution-free generalisation error bounds for a wide range of approximate Bayesian GP classification techniques. We also provide a new and much simplified proof for this powerful theorem, making use of the concept of convex duality which is a backbone of many machine learning techniques. We instantiate and test our bounds for two particular GPC techniques, including a recent sparse method which circumvents the unfavourable scaling of standard GP algorithms. As is shown in experiments on a real-world task, the bounds can be very tight for moderate training sample sizes. To the best of our knowledge, these results provide the tightest known distribution-free error bounds for approximate Bayesian GPC methods, giving a strong learning-theoretical justification for the use of these techniques.", "year": 2003, "venue": "Journal of machine learning research", "authors": [ "M. Seeger" ], "externalIds": { "MAG": "2168022998", "DBLP": "journals/jmlr/Seeger02", "DOI": "10.1162/153244303765208386", "CorpusId": 14632283 }, "url": "https://www.semanticscholar.org/paper/38bdbf7cf0572732bd21b299bdbaf2aab8da959d", "referenceCount": 58, "citationCount": 327, "influentialCitationCount": 34, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Some PAC-Bayesian Theorems", "abstract": null, "year": 1998, "venue": "COLT' 98", "authors": [ "David A. McAllester" ], "externalIds": { "MAG": "2014384147", "DBLP": "conf/colt/McAllester98", "DOI": "10.1023/A:1007618624809", "CorpusId": 11417123 }, "url": "https://www.semanticscholar.org/paper/0e2f6482e7230e1d12af88d6b8afcef3d5d733e3", "referenceCount": 7, "citationCount": 694, "influentialCitationCount": 85, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Sample compression, learnability, and the Vapnik-Chervonenkis dimension", "abstract": null, "year": 1995, "venue": "Machine-mediated learning", "authors": [ "Sally Floyd", "Manfred K. Warmuth" ], "externalIds": { "DBLP": "journals/ml/FloydW95", "MAG": "2150434953", "DOI": "10.1023/A:1022660318680", "CorpusId": 152849 }, "url": "https://www.semanticscholar.org/paper/06bc06d6998e5da31a53c1d0f2bd3c0ff9e29800", "referenceCount": 43, "citationCount": 297, "influentialCitationCount": 36, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "A training algorithm for optimal margin classifiers", "abstract": "A training algorithm that maximizes the margin between the training patterns and the decision boundary is presented. The technique is applicable to a wide variety of the classification functions, including Perceptrons, polynomials, and Radial Basis Functions. The effective number of parameters is adjusted automatically to match the complexity of the problem. The solution is expressed as a linear combination of supporting patterns. These are the subset of training patterns that are closest to the decision boundary. Bounds on the generalization performance based on the leave-one-out method and the VC-dimension are given. Experimental results on optical character recognition problems demonstrate the good generalization obtained when compared with other learning algorithms.", "year": 1992, "venue": "Annual Conference Computational Learning Theory", "authors": [ "B. Boser", "Isabelle M Guyon", "V. Vapnik" ], "externalIds": { "DBLP": "conf/colt/BoserGV92", "MAG": "2087347434", "DOI": "10.1145/130385.130401", "CorpusId": 207165665 }, "url": "https://www.semanticscholar.org/paper/4aaa30769ca49875f45670970130c088136986d1", "referenceCount": 39, "citationCount": 12367, "influentialCitationCount": 997, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "The perceptron: a probabilistic model for information storage and organization in the brain.", "abstract": "The first of these questions is in the province of sensory physiology, and is the only one for which appreciable understanding has been achieved. This article will be concerned primarily with the second and third questions, which are still subject to a vast amount of speculation, and where the few relevant facts currently supplied by neurophysiology have not yet been integrated into an acceptable theory. With regard to the second question, two alternative positions have been maintained. The first suggests that storage of sensory information is in the form of coded representations or images, with some sort of one-to-one mapping between the sensory stimulus", "year": 1958, "venue": "Psychology Review", "authors": [ "Frank Rosenblatt" ], "externalIds": { "MAG": "2040870580", "DOI": "10.1037/H0042519", "CorpusId": 12781225, "PubMed": "13602029" }, "url": "https://www.semanticscholar.org/paper/5d11aad09f65431b5d3cb1d85328743c9e53ba96", "referenceCount": 16, "citationCount": 10491, "influentialCitationCount": 440, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "The Pick-to-Learn Algorithm: Empowering Compression for Tight Generalization Bounds and Improved Post-training Performance", "abstract": "Generalization bounds are valuable both for theory and applications. On the one hand, they shed light on the mechanisms that underpin the learning processes; on the other, they certify how well a learned model performs against unseen inputs. In this work we build upon a recent breakthrough in compression theory (Campi & Garatti, 2023) to develop a new framework yielding tight generalization bounds of wide practical applicability. The core idea is to embed any given learning algorithm into a suitably-constructed meta-algorithm (here called Pick-to-Learn, P2L) in order to instill desirable compression properties. When applied to the MNIST classification dataset and to a synthetic regression problem, P2L not only attains generalization bounds that compare favorably with the state of the art (test-set and PAC-Bayes bounds), but it also learns models with better post-training performance.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Dario Paccagnan", "M. Campi", "S. Garatti" ], "externalIds": { "DBLP": "conf/nips/PaccagnanCG23", "CorpusId": 268030753 }, "url": "https://www.semanticscholar.org/paper/ab5cefab75f82ffb1d22571d01533910a85fe948", "referenceCount": 29, "citationCount": 1, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Efficient Conversion of Learners to Bounded Sample Compressors", "abstract": null, "year": 2018, "venue": "Proceedings of Machine Learning Research", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Supervised learning through the lens of compression", "abstract": "This work continues the study of the relationship between sample compression schemes and statistical learning, which has been mostly investigated within the framework of binary classification. We first extend the investigation to multiclass categorization: we prove that in this case learnability is equivalent to compression of logarithmic sample size and that the uniform convergence property implies compression of constant size. We use the compressibility-learnability equivalence to show that (i) for multiclass categorization, PAC and agnostic PAC learnability are equivalent, and (ii) to derive a compactness theorem for learnability. We then consider supervised learning under general loss functions: we show that in this case, in order to maintain the compressibility-learnability equivalence, it is necessary to consider an approximate variant of compression. We use it to show that PAC and agnostic PAC are not equivalent, even when the loss function has only three values.", "year": 2016, "venue": "Neural Information Processing Systems", "authors": [ "Ofir David", "S. Moran", "A. Yehudayoff" ], "externalIds": { "MAG": "2552222902", "DBLP": "conf/nips/DavidMY16", "CorpusId": 2252724 }, "url": "https://www.semanticscholar.org/paper/25180afc213ae865ca9d9538e3c5c187aa2cc79e", "referenceCount": 0, "citationCount": 41, "influentialCitationCount": 9, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Généralisations de la théorie PAC-bayésienne pour l'apprentissage inductif, l'apprentissage transductif et l'adaptation de domaine", "abstract": null, "year": 2015, "venue": "", "authors": [ "P. Germain" ], "externalIds": { "MAG": "2802658078", "CorpusId": 171794415 }, "url": "https://www.semanticscholar.org/paper/7b5a7fc6ddf993c4c94db64f22ea3a7f8b3a9d18", "referenceCount": 0, "citationCount": 3, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "A method for stochastic optimization", "abstract": null, "year": 2015, "venue": "3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Dropout: a simple way to prevent neural networks from overfitting", "abstract": "Deep neural nets with a large number of parameters are very powerful machine learning systems. However, overfitting is a serious problem in such networks. Large networks are also slow to use, making it difficult to deal with overfitting by combining the predictions of many different large neural nets at test time. Dropout is a technique for addressing this problem. The key idea is to randomly drop units (along with their connections) from the neural network during training. This prevents units from co-adapting too much. During training, dropout samples from an exponential number of different \"thinned\" networks. At test time, it is easy to approximate the effect of averaging the predictions of all these thinned networks by simply using a single unthinned network that has smaller weights. This significantly reduces overfitting and gives major improvements over other regularization methods. We show that dropout improves the performance of neural networks on supervised learning tasks in vision, speech recognition, document classification and computational biology, obtaining state-of-the-art results on many benchmark data sets.", "year": 2014, "venue": "Journal of machine learning research", "authors": [ "Nitish Srivastava", "Geoffrey E. Hinton", "A. Krizhevsky", "I. Sutskever", "R. Salakhutdinov" ], "externalIds": { "DBLP": "journals/jmlr/SrivastavaHKSS14", "MAG": "2095705004", "DOI": "10.5555/2627435.2670313", "CorpusId": 6844431 }, "url": "https://www.semanticscholar.org/paper/34f25a8704614163c4095b3ee2fc969b60de4698", "referenceCount": 38, "citationCount": 37539, "influentialCitationCount": 3287, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Relating Data Compression and Learnability", "abstract": "We explore the learnability of two-valued functions from samples using the paradigm of Data Compression. A first algorithm (compression) choses a small subset of the sample which is called the kernel. A second algorithm predicts future values of the function from the kernel, i.e. the algorithm acts as an hypothesis for the function to be learned. The second algorithm must be able to reconstruct the correct function values when given a point of the original sample. We demonstrate that the existence of a suitable data compression scheme is sufficient to ensure learnability. We express the probability that the hypothesis predicts the function correctly on a random sample point as a function of the sample and kernel sizes. No assumptions are made on the probability distributions according to which the sample points are generated. This approach provides an alternative to that of [BEHW86], which uses the Vapnik-Chervonenkis dimension to classify learnable geometric concepts. Our bounds are derived directly from the kernel size of the algorithms rather than from the Vapnik-Chervonenkis dimension of the hypothesis class. The proofs are simpler and the introduced compression scheme provides a rigorous model for studying data compression in connection with machine learning.", "year": 2003, "venue": "", "authors": [ "N. Littlestone", "Manfred K. Warmuth" ], "externalIds": { "MAG": "65382156", "CorpusId": 9780485 }, "url": "https://www.semanticscholar.org/paper/0df167e42f204cd6effcc6a7b423e7435f407290", "referenceCount": 10, "citationCount": 283, "influentialCitationCount": 45, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Random Forests", "abstract": null, "year": 2001, "venue": "Machine-mediated learning", "authors": [ "L. Breiman" ], "externalIds": { "MAG": "2911964244", "DBLP": "reference/ml/X17sy", "DOI": "10.1023/A:1010933404324", "CorpusId": 89141 }, "url": "https://www.semanticscholar.org/paper/8e0be569ea77b8cb29bb0e8b031887630fe7a96c", "referenceCount": 25, "citationCount": 89809, "influentialCitationCount": 5836, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Bounds for averaging classifiers", "abstract": null, "year": 2001, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Gradient-based learning applied to document recognition", "abstract": "Multilayer neural networks trained with the back-propagation algorithm constitute the best example of a successful gradient based learning technique. Given an appropriate network architecture, gradient-based learning algorithms can be used to synthesize a complex decision surface that can classify high-dimensional patterns, such as handwritten characters, with minimal preprocessing. This paper reviews various methods applied to handwritten character recognition and compares them on a standard handwritten digit recognition task. Convolutional neural networks, which are specifically designed to deal with the variability of 2D shapes, are shown to outperform all other techniques. Real-life document recognition systems are composed of multiple modules including field extraction, segmentation recognition, and language modeling. A new learning paradigm, called graph transformer networks (GTN), allows such multimodule systems to be trained globally using gradient-based methods so as to minimize an overall performance measure. Two systems for online handwriting recognition are described. Experiments demonstrate the advantage of global training, and the flexibility of graph transformer networks. A graph transformer network for reading a bank cheque is also described. It uses convolutional neural network character recognizers combined with global training techniques to provide record accuracy on business and personal cheques. It is deployed commercially and reads several million cheques per day.", "year": 1998, "venue": "Proceedings of the IEEE", "authors": [ "Yann LeCun", "L. Bottou", "Yoshua Bengio", "P. Haffner" ], "externalIds": { "MAG": "2112796928", "DBLP": "journals/pieee/LeCunBBH98", "DOI": "10.1109/5.726791", "CorpusId": 14542261 }, "url": "https://www.semanticscholar.org/paper/162d958ff885f1462aeda91cd72582323fd6a1f4", "referenceCount": 149, "citationCount": 49590, "influentialCitationCount": 5893, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Chervonenkis: On the uniform convergence of relative frequencies of events to their probabilities", "abstract": null, "year": 1971, "venue": "", "authors": [ "V. Vapnik" ], "externalIds": { "MAG": "2029538739", "DOI": "10.1007/978-3-319-21852-6_3", "CorpusId": 8142232 }, "url": "https://www.semanticscholar.org/paper/a36b028d024bf358c4af1a5e1dc3ca0aed23b553", "referenceCount": 4, "citationCount": 4159, "influentialCitationCount": 367, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Propriétés locales des fonctions à séries de Fourier aléatoires", "abstract": null, "year": 1960, "venue": "", "authors": [ "J. Kahane" ], "externalIds": { "MAG": "1422028892", "DOI": "10.4064/SM-19-1-1-25", "CorpusId": 122468668 }, "url": "https://www.semanticscholar.org/paper/7b6a6d44202f85fcdcb7a36a5aedf21bcff60c92", "referenceCount": 0, "citationCount": 125, "influentialCitationCount": 14, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics" ] } ] }, "Intelligent Energy Management: Remaining Useful Life Prediction and Charging Automation System Comprised of Deep Learning and the Internet of Things": { "paper_title": "Intelligent Energy Management: Remaining Useful Life Prediction and Charging Automation System Comprised of Deep Learning and the Internet of Things", "arxiv_id": "2409.17931v1", "keyword": "deep learning", "authors": [ "Biplov Paneru", "Bishwash Paneru", "DP Sharma Mainali" ], "references": [ { "title": "Probabilistic machine learning for battery health diagnostics and prognostics—review and perspectives", "abstract": null, "year": 2024, "venue": "npj Materials Sustainability", "authors": [ "Adam Thelen", "Xun Huan", "Noah Paulson", "Simona Onori", "Zhen Hu", "Chao Hu" ], "externalIds": { "DOI": "10.1038/s44296-024-00011-1", "CorpusId": 270230797 }, "url": "https://www.semanticscholar.org/paper/294716110ced9a83fa19804651c1c73102782c36", "referenceCount": 229, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "Review of Various Machine Learning Approaches for Predicting Parameters of Lithium-Ion Batteries in Electric Vehicles", "abstract": "Battery management systems (BMSs) play a critical role in electric vehicles (EVs), relying heavily on two essential factors: the state of charge (SOC) and state of health (SOH). However, accurately estimating the SOC and SOH in lithium-ion (Li-ion) batteries remains a challenge. To address this, many researchers have turned to machine learning (ML) techniques. This study provides a comprehensive overview of both BMSs and ML, reviewing the latest research on popular ML methods for estimating the SOC and SOH. Additionally, it highlights the challenges involved. Beyond traditional models like equivalent circuit models (ECMs) and electrochemical battery models, this review emphasizes the prevalence of a support vector machine (SVM), fuzzy logic (FL), k-nearest neighbors (KNN) algorithm, genetic algorithm (GA), and transfer learning in SOC and SOH estimation.", "year": 2024, "venue": "Batteries", "authors": [ "Chunlai Shan", "Cheng Siong Chin", "Venkateshkumar Mohan", "Caizhi Zhang" ], "externalIds": { "DOI": "10.3390/batteries10060181", "CorpusId": 270073124 }, "url": "https://www.semanticscholar.org/paper/39df91330daaa95c710b79357ccbe434d80c16eb", "referenceCount": 144, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "Comparative Analysis of Commonly Used Machine Learning Approaches for Li-Ion Battery Performance Prediction and Management in Electric Vehicles", "abstract": "The significant role of Li-ion batteries (LIBs) in electric vehicles (EVs) emphasizes their advantages in terms of energy density, being lightweight, and being environmentally sustainable. Despite their obstacles, such as costs, safety concerns, and recycling challenges, LIBs are crucial in terms of the popularity of EVs. The accurate prediction and management of LIBs in EVs are essential, and machine learning-based methods have been explored in order to estimate parameters such as the state of charge (SoC), the state of health (SoH), and the state of power (SoP). Various machine learning techniques, including support vector machines, decision trees, and deep learning, have been employed for predicting LIB states. This study proposes a methodology for comparative analysis, focusing on classical and deep learning approaches, and discusses enhancements to the LSTM (long short-term memory) and Bi-LSTM (bidirectional long short-term memory) methods. Evaluation metrics such as MSE, MAE, RMSE, and R-squared are applied to assess the proposed methods’ performances. The study aims to contribute to technological advancements in the electric vehicle industry by predicting the performance of LIBs. The structure of the rest of the study is outlined, covering materials and methods, LIB data preparation, analysis, the proposal of machine learning models, evaluations, and concluding remarks, with recommendations for future studies.", "year": 2024, "venue": "Applied Sciences", "authors": [ "Saadin Oyucu", "Ferdi Doğan", "Ahmet Aksöz", "Emre Biçer" ], "externalIds": { "DOI": "10.3390/app14062306", "CorpusId": 268452687 }, "url": "https://www.semanticscholar.org/paper/360ee5c394d4dc893690cad357130aa231e6c6f4", "referenceCount": 39, "citationCount": 10, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "Battery Voltage Prediction Technology Using Machine Learning Model with High Extrapolation Accuracy", "abstract": "Battery performance prediction techniques based on machine learning (ML) models and lithium-ion battery (LIB) data collected in the real world have received much attention recently. However, poor extrapolation accuracy is a major challenge for ML models using real-world data, as the data frequency distribution can be uneven. Here, we have investigated the extrapolation accuracy of the ML models by using artificial data generated with an electrochemical simulation model. Specifically, we set a lower open circuit voltage (OCV) limit for the training data and generated data limited to the higher state of charge (SOC) region to train the voltage prediction model. We have validated the root mean squared error (RMSE) of the voltage for the test data at several lower OCV limit settings and defined the \n \n average\n +\n 3\n \n standard deviations of them as an evaluation metric. Eight representative ML models were evaluated, and it was found that the multilayer perceptron (MLP) showed an accuracy of 92.7 mV, which was the best extrapolation accuracy. We also evaluated models with published experimental data and found that the MLP had an accuracy of 102.4 mV, reconfirming that it had the best extrapolation accuracy. We also found that MLP was robust to changes in the data of interest since the accuracy degradation when changing from simulation to experimental data was as small as a factor of 1.1. This result shows that MLP can achieve higher voltage prediction accuracy even when collecting data for comprehensive SOC conditions is difficult.", "year": 2023, "venue": "International Journal of Energy Research", "authors": [ "Takuma Kawahara", "Koji Sato", "Yuki Sato" ], "externalIds": { "DOI": "10.1155/2023/5513446", "CorpusId": 265217012 }, "url": "https://www.semanticscholar.org/paper/5b01e2e215b29f8099cbd4828cc28bf09012a985", "referenceCount": 43, "citationCount": 2, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "Machine Learning Assistive State of Charge Estimation of Li-Ion Battery", "abstract": "For an effective and economical deployment of battery-powered electric vehicles, mobile phones, laptops, and medical gadgets, the State of Charge (SoC) of the batteries must be properly assessed. It permits a safe operation, have a longer usable battery life, and prevent malfunctions. In this context, the battery management systems provide diverse SoC estimation solutions. However, the Machine Learning (ML) based SoC estimation mechanisms are becoming popular because of their robustness and higher precision. In this study, the features set is prepared using the intended battery cell charge/discharge curves for voltage, current, and temperature. Utilizing statistical analysis and the shape context, the attributes are extracted. Following that, three credible machine learning (ML) algorithms-decision trees, random forests, and linear regression-process the set of mined attributes. The applicability is tested using the Panasonic Lithium-Ion (Li-Ion) battery cells, publicly provided by the McMaster University. The feature extraction and the ML based SoC prediction modules are implemented in MATLAB. The “correlation coefficient”, “mean absolute error”, and “root mean square error” are used to assess the prediction performance. The results show an outperformance of the random forest regressor among the intended ones by attaining the correlation coefficient value of 0.9988.", "year": 2023, "venue": "2023 IEEE 13th International Conference on Electronics and Information Technologies (ELIT)", "authors": [ "Saeed Mian Qaisar", "A. Alboody", "Shahad Aldossary", "Alhanoof Alhamdan", "Nouf Moahammad", "A. Almaktoom" ], "externalIds": { "DOI": "10.1109/ELIT61488.2023.10310833", "CorpusId": 265133653 }, "url": "https://www.semanticscholar.org/paper/c0f5a743e056b19dc715f6d08e6c226335e04a2b", "referenceCount": 23, "citationCount": 1, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Battery Charge Curve Prediction via Feature Extraction and Supervised Machine Learning", "abstract": "Real‐time onboard state monitoring and estimation of a battery over its lifetime is indispensable for the safe and durable operation of battery‐powered devices. In this study, a methodology to predict the entire constant‐current cycling curve with limited input information that can be collected in a short period of time is developed. A total of 10 066 charge curves of LiNiO2‐based batteries at a constant C‐rate are collected. With the combination of a feature extraction step and a multiple linear regression step, the method can accurately predict an entire battery charge curve with an error of < 2% using only 10% of the charge curve as the input information. The method is further validated across other battery chemistries (LiCoO2‐based) using open‐access datasets. The prediction error of the charge curves for the LiCoO2‐based battery is around 2% with only 5% of the charge curve as the input information, indicating the generalization of the developed methodology for predicting battery cycling curves. The developed method paves the way for fast onboard health status monitoring and estimation for batteries during practical applications.", "year": 2023, "venue": "Advancement of science", "authors": [ "Laisuo Su", "Shuyan Zhang", "A. McGaughey", "B. Reeja‐Jayan", "A. Manthiram" ], "externalIds": { "PubMedCentral": "10502833", "DOI": "10.1002/advs.202301737", "CorpusId": 259663831, "PubMed": "37394730" }, "url": "https://www.semanticscholar.org/paper/88fb584f1940e87ef4b5a911c1974891006b1e07", "referenceCount": 35, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Prediction of Born effective charges using neural network to study ion migration under electric fields: applications to crystalline and amorphous Li3PO4", "abstract": "ABSTRACT Understanding ionic behaviour under external electric fields is crucial to develop electronic and energy-related devices using ion transport. In this study, we propose a neural network (NN) model to predict the Born effective charges of ions along an axis parallel to an applied electric field from atomic structures. The proposed NN model is applied to Li3PO4 as a prototype. The prediction error of the constructed NN model is 0.0376 $e$e/atom. In combination with an NN interatomic potential, molecular dynamics (MD) simulations are performed under a uniform electric field of 0.1 V/Å, whereby an enhanced mean square displacement of Li along the electric field is obtained, which seems physically reasonable. In addition, the external forces along the direction perpendicular to the electric field, originating from the off-diagonal terms of the Born effective charges, are found to have a nonnegligible effect on Li migration. Finally, additional MD simulations are performed to examine the Li motion in an amorphous structure. The results reveal that Li migration occurs in various areas despite the absence of explicitly introduced defects, which may be attributed to the susceptibility of the Li ions in the local minima to the electric field. We expect that the proposed NN method can be applied to any ionic material, thereby leading to atomic-scale elucidation of ion behaviour under electric fields. GRAPHICAL ABSTRACT IMPACT STATEMENT This study introduces a new computational scheme for analysing ion behaviour in solids under electric fields, through the development of a neural network model to predict the Born effective charges.", "year": 2023, "venue": "Science and Technology of Advanced Materials: Methods", "authors": [ "Koji Shimizu", "Ryuji Otsuka", "M. Hara", "E. Minamitani", "S. Watanabe" ], "externalIds": { "ArXiv": "2305.19546", "DOI": "10.1080/27660400.2023.2253135", "CorpusId": 258987997 }, "url": "https://www.semanticscholar.org/paper/9c4f555ef91725cc5b58a1f1587536db3cb8efd0", "referenceCount": 36, "citationCount": 5, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Physics" ] }, { "title": "Estimating Battery State-of-Charge using Machine Learning and Physics-Based Models", "abstract": "Lithium-ion and Lithium polymer batteries are fast becoming ubiquitous in high-discharge rate applications for military and non-military systems. Applications such as small aerial vehicles and energy transfer systems can often function at C-rates greater than 1. To maximize system endurance and battery health, there is a need for models capable of precisely estimating the battery state-of-charge (SoC) under all temperature and loading conditions. However, the ability to perform state estimation consistently and accurately to within 1% error has remained unsolved. Doing so can offer enhanced endurance, safety, reliability, and planning, and additionally, simplify energy management. Therefore, the work presented in this paper aims to study and develop experimentally validated mathematical models capable of high-accuracy battery SoC estimation. In this work, experiments are performed with Lithium Polymer battery cells to measure performance parameters such as current, battery capacity, temperatures, and voltage. Next, physics-based and machine learning modeling approaches are developed to study their ability to predict SoC. Measurements performed at high C-rates (1C – 4C) are used for model training and calibration, validation, and testing. The results show that the Pseudo-2D electrochemical model can predict SoC within about 2 % root-mean-squared-error (RMSE) at different C-rates. However, the Feed Forward Neural Network modeling approach with Butterworth and Hampel filters achieved lower than and close to 1 % RMSE for battery SoC estimations.", "year": 2023, "venue": "SAE technical paper series", "authors": [ "H. Sapra", "Michael Wagner", "S. Kokjohn", "Lukas Desorcy", "Sahana Upadhya", "Chol-Bum M. Kweon", "S. Venkataraman", "J. Shumaker", "Olesia Elfimova" ], "externalIds": { "DOI": "10.4271/2023-01-0522", "CorpusId": 258032825 }, "url": "https://www.semanticscholar.org/paper/1000bf7f1b929d3a4e7b51e771ba0adbeb5bbf8a", "referenceCount": 12, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Hydrogen-Fuel Cell Hybrid Powertrain: Conceptual Layouts and Current Applications", "abstract": "Transportation is one of the largest sources of CO2 emissions, accounting for more than 20% of worldwide emissions. However, it is one of the areas where decarbonization presents the greatest hurdles, owing to its capillarity and the benefits that are associated with the use of fossil fuels in terms of energy density, storage, and transportation. In order to accomplish comprehensive decarbonization in the transport sector, it will be required to encourage a genuine transition to low-carbon fuels and the widespread deployment of the necessary infrastructures to allow for a large-scale innovation. Renewable hydrogen shows potential for sustainable transportation applications, whether in fuel cell electric vehicles (FCEVs), such as automobiles, trucks, and trains, or as a raw material for ship and airplane synthetic fuels. The present paper aims to present how hydrogen-fuel cell hybrid powertrains for road vehicles work in terms of conceptual layouts and operating strategies. A comprehensive overview of real and current applications is presented, concerning existing prototypes and commercially available vehicles, with a focus on the main key performance indicators, such as efficiency, mileage, and energy consumption.", "year": 2022, "venue": "Machines", "authors": [ "P. Fragiacomo", "M. Genovese", "F. Piraino", "O. Corigliano", "G. De Lorenzo" ], "externalIds": { "DOI": "10.3390/machines10121121", "CorpusId": 256790612 }, "url": "https://www.semanticscholar.org/paper/ef7134d0abd8b8dca4e3b14b0b1aac633be9216d", "referenceCount": 37, "citationCount": 17, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "Machine Learning approach for Prediction of residual energy in batteries", "abstract": null, "year": 2022, "venue": "Energy Reports", "authors": [ "T. Jayakumar", "Natesh M. Gowda", "R. Sujatha", "Shankar Nayak Bhukya", "G. Padmapriya", "S. Radhika", "V. Mohanavel", "M. Sudhakar", "R. Sathyamurthy" ], "externalIds": { "DOI": "10.1016/j.egyr.2022.10.027", "CorpusId": 253037873 }, "url": "https://www.semanticscholar.org/paper/b165e63acc1fdcba95054d93293ea0567c085baf", "referenceCount": 24, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "Machine learning-based model development for battery state of charge–open circuit voltage relationship using regression techniques", "abstract": null, "year": 2022, "venue": "Journal of Energy Storage", "authors": [ "S. Siva Suriya Narayanan", "S. Thangavel" ], "externalIds": { "DOI": "10.1016/j.est.2022.104098", "CorpusId": 246744930 }, "url": "https://www.semanticscholar.org/paper/027cd225995dcdb5c6711b69e1ce50d997768bc2", "referenceCount": 25, "citationCount": 15, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Advanced Machine Learning Approaches for State-of-Charge Prediction of Li-ion Batteries under Multisine Excitation", "abstract": "The technological progress of e-Mobility has led to an increased use of Electric Vehicles (EVs), which today satisfies the majority of the customers’ demands. Lithium-ion batteries are widely employed in EVs due to their attractive properties. However, the lithium cells, especially in urban traffic are subjected to intense dynamic loads, with a small share of static operation. The Battery Management System (BMS) has a key role in the EV’s energy management system and in ensuring reliable operation. Since, a major task of a BMS is to determine the State-of-Charge (SOC) of the cell pack. Its accuracy depends on the model used in the BMS. Typically, simple empirical models are applied for this purpose. Furthermore, these models are parameterized by using standard test measurement data. The SOC prediction issue is still a concern of much research. In the field of energy storage systems, Machine Learning (ML) has recently emerged as a well-established modeling approach. This paper presents a comparative analysis of the performance of State-of-theArt Machine Learning approaches in SOC forecasting with regression, under dynamic loads. Data is generated by performing unique dynamic charge/discharge test by applying multisine signals. The results confirm that a great advantage of Advanced ML models is that they are able to capture important relationships between the variables of interest. Investigations has supported that the State-of–the-Art ML techniques outperform classical ML approaches and are powerful in SOC prediction problems due to their capability of storing past information and catching the cell dynamics, which is critical in predicting the future charge levels.", "year": 2021, "venue": "2021 17th Conference on Electrical Machines, Drives and Power Systems (ELMA)", "authors": [ "A. Dineva" ], "externalIds": { "DOI": "10.1109/ELMA52514.2021.9502962", "CorpusId": 236939858 }, "url": "https://www.semanticscholar.org/paper/b604d983157f7cda5a217ac1153088ff6d094bab", "referenceCount": 0, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "A review and research on fuel cell electric vehicles: Topologies, power electronic converters, energy management methods, technical challenges, marketing and future aspects", "abstract": null, "year": 2021, "venue": "", "authors": [ "Mustafa Inci", "Mehmet Büyük", "M. Demir", "Göktürk İlbey" ], "externalIds": { "MAG": "3112666407", "DOI": "10.1016/j.rser.2020.110648", "CorpusId": 230555055 }, "url": "https://www.semanticscholar.org/paper/ea6a3fac15ceec354929ad2c87c40020e3b13091", "referenceCount": 296, "citationCount": 310, "influentialCitationCount": 6, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The scope for improving the efficiency and environmental impact of internal combustion engines", "abstract": null, "year": 2020, "venue": "Transport Engineer", "authors": [ "F. Leach", "G. Kalghatgi", "R. Stone", "P. Miles" ], "externalIds": { "MAG": "3031542079", "DOI": "10.1016/j.treng.2020.100005", "CorpusId": 219782870 }, "url": "https://www.semanticscholar.org/paper/4dc497b1eebaeca0a00f3acf2a2a6cd8302b8e3f", "referenceCount": 131, "citationCount": 274, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Environmental Science" ] }, { "title": "Predicting the state of charge and health of batteries using data-driven machine learning", "abstract": null, "year": 2020, "venue": "Nature Machine Intelligence", "authors": [ "M. Ng", "Jin Zhao", "Qingyu Yan", "G. Conduit", "Z. Seh" ], "externalIds": { "MAG": "3009652674", "DBLP": "journals/natmi/NgZYCS20", "DOI": "10.1038/s42256-020-0156-7", "CorpusId": 215947098 }, "url": "https://www.semanticscholar.org/paper/86788a38467e1f2f1df713f5d5694bfee9f8ae29", "referenceCount": 132, "citationCount": 390, "influentialCitationCount": 9, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Overview of the next quarter century vision of hydrogen fuel cell electric vehicles", "abstract": null, "year": 2019, "venue": "International journal of hydrogen energy", "authors": [ "Bahattin Tanç", "H. T. Arat", "E. Baltacioǧlu", "K. Aydın" ], "externalIds": { "MAG": "2900013330", "DOI": "10.1016/J.IJHYDENE.2018.10.112", "CorpusId": 106384977 }, "url": "https://www.semanticscholar.org/paper/1ac5de0e5f19ef8631990269cd2f65ffb707f57e", "referenceCount": 65, "citationCount": 263, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Engineering" ] }, { "title": "Green Principles for Vehicle Lightweighting.", "abstract": "A large portion of life cycle transportation impacts occur during vehicle operation, and key improvement strategies include increasing powertrain efficiency, vehicle electrification, and lightweighting vehicles by reducing their mass. The potential energy benefits of vehicle lightweighting are large, given that 29.5 EJ was used in all modes of U.S. transportation in 2016, and roughly half of the energy spent in wheeled transportation and the majority of energy spent in aircraft is used to move vehicle mass. We collect and review previous work on lightweighting, identify key parameters affecting vehicle environmental performance (e.g., vehicle mode, fuel type, material type, and recyclability), and propose a set of 10 principles, with examples, to guide environmental improvement of vehicle systems through lightweighting. These principles, based on a life cycle perspective and taken as a set, allow a wide range of stakeholders (designers, policy-makers, and vehicle manufacturers and their material and component suppliers) to evaluate the trade-offs inherent in these complex systems. This set of principles can be used to evaluate trade-offs between impact categories and to help avoid shifting of burdens to other life cycle phases in the process of improving use-phase environmental performance.", "year": 2019, "venue": "Environmental Science and Technology", "authors": [ "G. Lewis", "Cailin A Buchanan", "Krutarth Jhaveri", "J. Sullivan", "J. Kelly", "Sujit Das", "A. Taub", "G. Keoleian" ], "externalIds": { "MAG": "2925274650", "DOI": "10.1021/acs.est.8b05897", "CorpusId": 84844736, "PubMed": "30892881" }, "url": "https://www.semanticscholar.org/paper/00ab348e3a451e530806ff835cfa6948e4e8268d", "referenceCount": 130, "citationCount": 41, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "A new approach to battery powered electric vehicles: A hydrogen fuel-cell-based range extender system", "abstract": null, "year": 2016, "venue": "", "authors": [ "R. A. Fernández", "Fernando Beltrán Cilleruelo", "I. Martínez" ], "externalIds": { "MAG": "2254820224", "DOI": "10.1016/J.IJHYDENE.2016.01.035", "CorpusId": 2983906 }, "url": "https://www.semanticscholar.org/paper/17f431a1b9077b7c7cf652a23030b0d9d3ee8470", "referenceCount": 24, "citationCount": 96, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Chemistry" ] }, { "title": "Assessment of alternative fuel and powertrain transit bus options using real-world operations data: Life-cycle fuel and emissions modeling", "abstract": null, "year": 2015, "venue": "", "authors": [ "Yanzhi Xu", "F. E. Gbologah", "Dong-Yeon Lee", "Haobing Liu", "M. Rodgers", "R. Guensler" ], "externalIds": { "MAG": "1827589999", "DOI": "10.1016/J.APENERGY.2015.04.112", "CorpusId": 109129303 }, "url": "https://www.semanticscholar.org/paper/699dd9ffff6736852ea5d1ec6b7bd0c94b033dd9", "referenceCount": 38, "citationCount": 111, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Engineering" ] }, { "title": "Design and development of a multipurpose utility AWD electric vehicle with a hybrid powertrain based on PEM fuel cells and batteries", "abstract": null, "year": 2012, "venue": "", "authors": [ "F. Barreras", "M. Maza", "A. Lozano", "Sergio Báscones", "V. Roda", "J. Barranco", "M. Cerqueira", "Arturo Vergés" ], "externalIds": { "MAG": "2050719357", "DOI": "10.1016/J.IJHYDENE.2012.06.091", "CorpusId": 98553776 }, "url": "https://www.semanticscholar.org/paper/3e0894383bd8107149035253c076e5010db855a8", "referenceCount": 24, "citationCount": 27, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Manganese oxide-based multifunctionalized mesoporous silica nanoparticles for pH-responsive MRI, ultrasonography and circumvention of MDR in cancer cells.", "abstract": null, "year": 2012, "venue": "Biomaterials", "authors": [ "Yu Chen", "Qi Yin", "X. Ji", "Shengjian Zhang", "Hangrong Chen", "Yuanyi Zheng", "Yang Sun", "Haiyun Qu", "Zheng Wang", "Yaping Li", "Xia Wang", "Kun Zhang", "Linlin Zhang", "Jianlin Shi" ], "externalIds": { "MAG": "2032336447", "DOI": "10.1016/j.biomaterials.2012.06.059", "CorpusId": 5573364, "PubMed": "22789722" }, "url": "https://www.semanticscholar.org/paper/8c0f747d00fae4331186b37cf4a2525762aab877", "referenceCount": 41, "citationCount": 260, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Materials Science", "Medicine" ] }, { "title": "Vehicle Design Strategies to Meet and Exceed PNGV Goals", "abstract": "........................................................................................................................", "year": 1995, "venue": "", "authors": [ "T. Moore", "A. Lovins" ], "externalIds": { "MAG": "1549952861", "DOI": "10.4271/951906", "CorpusId": 4383764 }, "url": "https://www.semanticscholar.org/paper/aa3dbbd7f366a920e7a5cd5eb4859283f283d474", "referenceCount": 84, "citationCount": 79, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Engineering" ] }, { "title": "Battery Remaining Useful Life (RUL) Dataset", "abstract": null, "year": 2023, "venue": "Kaggle", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "“Alternative Fuels Data Center: Fuel Cell Electric Vehicles,”", "abstract": null, "year": 2019, "venue": "Energy.gov", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Hydrogen Mobility Europe, “FCEVs", "abstract": null, "year": 2015, "venue": "“FCEVs |", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Joint Source-Channel Coding: Fundamentals and Recent Progress in Practical Designs": { "paper_title": "Joint Source-Channel Coding: Fundamentals and Recent Progress in Practical Designs", "arxiv_id": "2409.17557v1", "keyword": "deep learning", "authors": [ "Deniz Gündüz", "Michèle A. Wigger", "Tze-Yang Tung", "Ping Zhang", "Yong Xiao" ], "references": [ { "title": "A Deep Joint Source-Channel Coding Scheme for Hybrid Mobile Multi-hop Networks", "abstract": "Efficient data transmission across mobile multi-hop networks that connect edge devices to core servers presents significant challenges, particularly due to the variability in link qualities between wireless and wired segments. This variability necessitates a robust transmission scheme that transcends the limitations of existing deep joint source-channel coding (DeepJSCC) strategies, which often struggle at the intersection of analog and digital methods. Addressing this need, this paper introduces a novel hybrid DeepJSCC framework, h-DJSCC, tailored for effective image transmission from edge devices through a network architecture that includes initial wireless transmission followed by multiple wired hops. Our approach harnesses the strengths of DeepJSCC for the initial, variable-quality wireless link to avoid the cliff effect inherent in purely digital schemes. For the subsequent wired hops, which feature more stable and high-capacity connections, we implement digital compression and forwarding techniques to prevent noise accumulation. This dual-mode strategy is adaptable even in scenarios with limited knowledge of the image distribution, enhancing the framework's robustness and utility. Extensive numerical simulations demonstrate that our hybrid solution outperforms traditional fully digital approaches by effectively managing transitions between different network segments and optimizing for variable signal-to-noise ratios (SNRs). We also introduce a fully adaptive h-DJSCC architecture capable of adjusting to different network conditions and achieving diverse rate-distortion objectives, thereby reducing the memory requirements on network nodes.", "year": 2024, "venue": "", "authors": [ "Chenghong Bian", "Yulin Shao", "Deniz Gunduz" ], "externalIds": { "ArXiv": "2405.09698", "CorpusId": 269791052 }, "url": "https://www.semanticscholar.org/paper/161c39571a4aaf9af461b206a78bcf6d225245c1", "referenceCount": 25, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Engineering" ] }, { "title": "Process-and-Forward: Deep Joint Source-Channel Coding Over Cooperative Relay Networks", "abstract": "We introduce deep joint source-channel coding (DeepJSCC) schemes for image transmission over cooperative relay channels. The relay either amplifies-and-forwards its received signal, called DeepJSCC-AF, or leverages neural networks to extract relevant features from its received signal, called DeepJSCC-PF (Process-and-Forward). We consider both half- and full-duplex relays, and propose a novel transformer-based model at the relay. For a half-duplex relay, it is shown that the proposed scheme learns to generate correlated signals at the relay and source to obtain beamforming gains. In the full-duplex case, we introduce a novel block-based transmission strategy, in which the source transmits in blocks, and the relay updates its knowledge about the input signal after each block and generates its own signal. To enhance practicality, a single transformer-based model is used at the relay at each block, together with an adaptive transmission module, which allows the model to seamlessly adapt to different channel qualities and the transmission powers}. Simulation results demonstrate the superior performance of DeepJSCC-PF compared to the state-of-the-art BPG image compression algorithm operating at the maximum achievable rate of conventional decode-and-forward and compress-and-forward protocols, in both half- and full-duplex relay scenarios over AWGN and Rayleigh fading channels.", "year": 2024, "venue": "arXiv.org", "authors": [ "Chenghong Bian", "Yulin Shao", "Haotian Wu", "Emre Ozfatura", "Deniz Gündüz" ], "externalIds": { "ArXiv": "2403.10613", "DBLP": "journals/corr/abs-2403-10613", "DOI": "10.48550/arXiv.2403.10613", "CorpusId": 268513055 }, "url": "https://www.semanticscholar.org/paper/c80e976c2e607d07ef89f9ccc312d73d9acfed85", "referenceCount": 42, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "GAN-based Massive MIMO Channel Model Trained on Measured Data", "abstract": "Wireless channel models are a commonly used tool for the development of wireless telecommunication systems and standards. The currently prevailing geometry-based stochastic channel models (GSCMs) were manually specified for certain environments in a manual process requiring extensive domain knowledge, on the basis of channel measurement campaigns. By taking into account the stochastic distribution of certain channel properties like Rician k-factor, path loss or delay spread, they model the distribution of channel realizations. Instead of this manual process, a generative machine learning model like a generative adversarial network (GAN) may be used to automatically learn the distribution of channel statistics. Subsequently, the GAN’s generator may be viewed as a channel model that can replace conventional stochastic or raytracer-based models. We propose a GAN architecture for a massive MIMO channel model, and train it on measurement data produced by a distributed massive MIMO channel sounder.", "year": 2024, "venue": "International ITG Workshop on Smart Antennas", "authors": [ "Florian Euchner", "Janina Sanzi", "Marcus Henninger", "S. Brink" ], "externalIds": { "DBLP": "journals/corr/abs-2403-05321", "ArXiv": "2403.05321", "DOI": "10.1109/WSA61681.2024.10511584", "CorpusId": 268296832 }, "url": "https://www.semanticscholar.org/paper/b95f5bf24dc739cbaa67fb10a0bc92abe30cb7b9", "referenceCount": 21, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "Point Cloud in the Air", "abstract": "Acquisition and processing of point clouds (PCs) is a crucial enabler for many emerging applications reliant on 3D spatial data, such as robot navigation, autonomous vehicles, and augmented reality. In most scenarios, PCs acquired by remote sensors must be transmitted to an edge server for fusion, segmentation, or inference. Wireless transmission of PCs not only puts on increased burden on the already congested wireless spectrum, but also confronts a unique set of challenges arising from the irregular and unstructured nature of PCs. In this paper, we meticulously delineate these challenges and offer a comprehensive examination of existing solutions while candidly acknowledging their inherent limitations. In response to these intricacies, we proffer four pragmatic solution frameworks, spanning advanced techniques, hybrid schemes, and distributed data aggregation approaches. In doing so, our goal is to chart a path toward efficient, reliable, and low-latency wireless PC transmission.", "year": 2024, "venue": "arXiv.org", "authors": [ "Yulin Shao", "Chenghong Bian", "Li Yang", "Qianqian Yang", "Zhaoyang Zhang", "Deniz Gündüz" ], "externalIds": { "DBLP": "journals/corr/abs-2401-00658", "ArXiv": "2401.00658", "DOI": "10.48550/arXiv.2401.00658", "CorpusId": 266693351 }, "url": "https://www.semanticscholar.org/paper/bba928296d8d8e192d962c24024b1f3602801137", "referenceCount": 16, "citationCount": 5, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "Deep Learning Enabled Semantic-Secure Communication with Shuffling", "abstract": "Deep learning and natural language processing draw heavily on the recent progress in semantic communications; this paper examines the security aspect of this cutting-edge technique. Our goal is to improve upon the conventional secure coding methods to strike a superior tradeoff between transmission rate and leakage rate. Toward this end, we devise a novel semantic security communication system wherein the random shuffling pattern serves as the secret key shared. Intuitively, the permutation of words in the same text via shuffling would result in the meaning distortion of the target text to such a great extent that an eavesdropper can no longer recover the semantic truth. The proposed method can be rephrased as maximizing the transmission rate while minimizing the semantic error probability under the given leakage rate constraint. Simulations demonstrate the significant advantage of the proposed method over the benchmark in boosting secure transmission, especially when channels are prone to strong noise and unpredictable fading, can achieve up to 60% performance gain.", "year": 2023, "venue": "Global Communications Conference", "authors": [ "Fupei Chen", "Liyao Xiang", "Hei Victor Cheng", "Kaiming Shen" ], "externalIds": { "DBLP": "conf/globecom/ChenXCS23", "DOI": "10.1109/GLOBECOM54140.2023.10436975", "CorpusId": 268047911 }, "url": "https://www.semanticscholar.org/paper/7ab8efa24724ff06d763b91b8b6a922211fa7407", "referenceCount": 21, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Hybrid Joint Source-Channel Coding Scheme for Mobile Multi-Hop Networks", "abstract": "We propose a novel hybrid joint source-channel coding (JSCC) scheme for robust image transmission over multihop networks. In the considered scenario, a mobile user wants to deliver an image to its destination over a mobile cellular network. We assume a practical setting, where the links between the nodes belonging to the mobile core network are stable and of high quality, while the link between the mobile user and the first node (e.g., the access point) is potentially time-varying with poorer quality. In recent years, neural network based JSCC schemes (called DeepJSCC) have emerged as promising solutions to overcome the limitations of separation-based fully digital schemes. However, relying on analog transmission, DeepJSCC suffers from noise accumulation over multi-hop networks. Moreover, most of the hops within the mobile core network may be high-capacity wireless connections, calling for digital approaches. To this end, we propose a hybrid solution, where DeepJSCC is adopted for the first hop, while the received signal at the first relay is digitally compressed and forwarded through the mobile core network. We show through numerical simulations that the proposed scheme is able to outperform both the fully analog and fully digital schemes. Thanks to DeepJSCC it can avoid the cliff effect over the first hop, while also avoiding noise forwarding over the mobile core network thank to digital transmission. We believe this work paves the way for the practical deployment of DeepJSCC solutions in 6G and future wireless networks.", "year": 2023, "venue": "ICC 2024 - IEEE International Conference on Communications", "authors": [ "Chenghong Bian", "Yulin Shao", "Deniz Gündüz" ], "externalIds": { "ArXiv": "2311.07028", "DBLP": "conf/icc/BianSG24", "DOI": "10.1109/ICC51166.2024.10622359", "CorpusId": 265149566 }, "url": "https://www.semanticscholar.org/paper/a4dec240a568985db33086ade29de6aa450a59f3", "referenceCount": 13, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Engineering", "Computer Science" ] }, { "title": "Deep Joint Source-Channel Coding for DNA Image Storage: A Novel Approach With Enhanced Error Resilience and Biological Constraint Optimization", "abstract": "In the current era, DeoxyriboNucleic Acid (DNA) based data storage emerges as an intriguing approach, garnering substantial academic interest and investigation. This paper introduces a novel deep joint source-channel coding (DJSCC) scheme for DNA image storage, designated as DJSCC-DNA. This paradigm distinguishes itself from conventional DNA storage techniques through three key modifications: 1) it employs advanced deep learning methodologies, employing convolutional neural networks for DNA encoding and decoding processes; 2) it seamlessly integrates DNA polymerase chain reaction (PCR) amplification into the network architecture, thereby augmenting data recovery precision; and 3) it restructures the loss function by targeting biological constraints for optimization. The performance of the proposed model is demonstrated via numerical results from specific channel testing, suggesting that it surpasses conventional deep learning methodologies in terms of peak signal-to-noise ratio (PSNR) and structural similarity index (SSIM). Additionally, the model effectively ensures positive constraints on both homopolymer run-length and GC content.", "year": 2023, "venue": "IEEE Transactions on Molecular Biological and Multi-Scale Communications", "authors": [ "Wenfeng Wu", "Luping Xiang", "Qiang Liu", "Kun Yang" ], "externalIds": { "DBLP": "journals/corr/abs-2311-01122", "ArXiv": "2311.01122", "DOI": "10.1109/TMBMC.2023.3331579", "CorpusId": 264935452 }, "url": "https://www.semanticscholar.org/paper/06320fa63ab9bf9da9bedb74aaf942b48f849c57", "referenceCount": 33, "citationCount": 5, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Joint Source Channel Anytime Coding Based on Spatially Coupled Repeat-Accumulate Codes", "abstract": "In our early work, we proposed a class of joint source channel anytime coding (JSCAC) scheme based on spatially coupled repeat-accumulate (SC-RA) codes, in addition to an improved partial joint expanding window decoding (PJEWD) algorithm. In this paper, we extend our preliminary results and make a comprehensive theoretical analysis for this system. First, we propose an improved density evolution (DE) algorithm for the SC-RA based JSCAC with PJEWD scheme. Based on the proposed DE algorithm, we systematically analyze the anytime property, the belief propagation (BP) decoding threshold, and the decoding complexity of this system over binary additive white Gaussian noise channel (BIAWGN), as well as the effects of code parameters on performances of the above three aspects. Moreover, we explore some application scenarios for JSCAC schemes, where it could maximize its advantages over the prior-art JSCC schemes. Both numerical and simulation results demonstrate the potentials of the proposed JSCAC scheme for high reliability and low latency communications. The proposed DE algorithm could also provide a good reference for the analysis of other JSCAC systems.", "year": 2023, "venue": "IEEE Transactions on Communications", "authors": [ "Lijun Deng", "Xiaoxi Yu", "Yixin Wang", "Md. Noor-A.-Rahim", "Y. L. Guan", "Zhiping Shi", "Zhong-pei Zhang" ], "externalIds": { "DBLP": "journals/tcom/DengYWNGSZ23", "DOI": "10.1109/TCOMM.2023.3303955", "CorpusId": 260834747 }, "url": "https://www.semanticscholar.org/paper/b3d237290e5fb3d06a14cb864c2508d4c6324be7", "referenceCount": 34, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Semantic-Preserving Image Coding Based on Conditional Diffusion Models", "abstract": "Semantic communication, rather than on a bit-by-bit recovery of the transmitted messages, focuses on the meaning and the goal of the communication itself. In this paper, we propose a novel semantic image coding scheme that preserves the semantic content of an image, while ensuring a good trade-off between coding rate and image quality. The proposed Semantic-Preserving Image Coding based on Conditional Diffusion Models (SPIC) transmitter encodes a Semantic Segmentation Map (SSM) and a low-resolution version of the image to be transmitted. The receiver then reconstructs a high-resolution image using a Denoising Diffusion Probabilistic Models (DDPM) doubly conditioned to the SSM and the low-resolution image. As shown by the numerical examples, compared to state-of-the-art (SOTA) approaches, the proposed SPIC exhibits a better balance between the conventional rate-distortion trade-off and the preservation of semantically-relevant features. Code available at https://github.com/frapez1/SPIC", "year": 2023, "venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", "authors": [ "Francesco Pezone", "Osman Musa", "Giuseppe Caire", "S. Barbarossa" ], "externalIds": { "DBLP": "conf/icassp/PezoneMCB24", "ArXiv": "2310.15737", "DOI": "10.1109/ICASSP48485.2024.10447279", "CorpusId": 264438996 }, "url": "https://www.semanticscholar.org/paper/3db908df1f1cb5ef4f949e3074dad65e10388624", "referenceCount": 25, "citationCount": 7, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Joint Coding-Modulation for Digital Semantic Communications via Variational Autoencoder", "abstract": "Semantic communications have emerged as a new paradigm for improving communication efficiency by transmitting the semantic information of a source message that is most relevant to a desired task at the receiver. Most existing approaches typically utilize neural networks (NNs) to design end-to-end semantic communication systems, where NN-based semantic encoders output continuously distributed signals to be sent directly to the channel in an analog fashion. In this work, we propose a joint coding-modulation (JCM) framework for digital semantic communications by using variational autoencoder (VAE). Our approach learns the transition probability from source data to discrete constellation symbols, thereby avoiding the non-differentiability problem of digital modulation. Meanwhile, by jointly designing the coding and modulation process together, we can match the obtained modulation strategy with the operating channel condition. We also derive a matching loss function with information-theoretic meaning for end-to-end training. Experiments on image semantic communication validate the superiority of our proposed JCM framework over the state-of-the-art quantization-based digital semantic coding-modulation methods across a wide range of channel conditions, transmission rates, and modulation orders. Furthermore, its performance gap to analog semantic communication reduces as the modulation order increases while enjoying the hardware implementation convenience.", "year": 2023, "venue": "IEEE Transactions on Communications", "authors": [ "Yufei Bo", "Yiheng Duan", "Shuo Shao", "Meixia Tao" ], "externalIds": { "ArXiv": "2310.06690", "DBLP": "journals/corr/abs-2310-06690", "DOI": "10.1109/TCOMM.2024.3386577", "CorpusId": 263831297 }, "url": "https://www.semanticscholar.org/paper/5d2d7252baa52a0c1027c700d08cb285a6c3fc9a", "referenceCount": 51, "citationCount": 11, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "Distributed Deep Joint Source-Channel Coding with Decoder-Only Side Information", "abstract": "We consider low-latency image transmission over a noisy wireless channel when correlated side information is present only at the receiver side (the Wyner-Ziv scenario). In particular, we are interested in developing practical schemes using a data-driven joint source-channel coding (JSCC) approach, which has been previously shown to outperform conventional separation-based approaches in the practical finite blocklength regimes, and to provide graceful degradation with channel quality. We propose a novel neural network architecture that incorporates the decoder-only side information at multiple stages at the receiver side. Our results demonstrate that the proposed method succeeds in integrating the side information, yielding improved performance at all channel conditions in terms of the various quality measures considered here, especially at low channel signal-to-noise ratios (SNRs) and small bandwidth ratios (BRs). We have made the source code of the proposed method public to enable further research, and the reproducibility of the results.", "year": 2023, "venue": "2024 IEEE International Conference on Machine Learning for Communication and Networking (ICMLCN)", "authors": [ "Selim F. Yilmaz", "Ezgi Ozyilkan", "Deniz Gunduz", "E. Erkip" ], "externalIds": { "DBLP": "journals/corr/abs-2310-04311", "ArXiv": "2310.04311", "DOI": "10.1109/ICMLCN59089.2024.10625214", "CorpusId": 263829595 }, "url": "https://www.semanticscholar.org/paper/0a3409d305ea44a465f79ff58adc1781c4c55578", "referenceCount": 38, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "CommIN: Semantic Image Communications as an Inverse Problem with INN-Guided Diffusion Models", "abstract": "Joint source-channel coding schemes based on deep neural networks (DeepJSCC) have recently achieved remarkable performance for wireless image transmission. However, these methods usually focus only on the distortion of the reconstructed signal at the receiver side with respect to the source at the transmitter side, rather than the perceptual quality of the reconstruction which carries more semantic information. As a result, severe perceptual distortion can be introduced under extreme conditions such as low bandwidth and low signal-to-noise ratio. In this work, we propose CommIN, which views the recovery of high-quality source images from degraded reconstructions as an inverse problem. To address this, CommIN combines Invertible Neural Networks (INN) with diffusion models, aiming for superior perceptual quality. Through experiments, we show that our CommIN significantly improves the perceptual quality compared to DeepJSCC under extreme conditions and outperforms other inverse problem approaches used in DeepJSCC.", "year": 2023, "venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", "authors": [ "Jiakang Chen", "Di You", "Deniz Gündüz", "P. Dragotti" ], "externalIds": { "ArXiv": "2310.01130", "DBLP": "conf/icassp/ChenYGD24", "DOI": "10.1109/ICASSP48485.2024.10448462", "CorpusId": 263605501 }, "url": "https://www.semanticscholar.org/paper/0120054a5209131ccc6310f8379e2599099a6e02", "referenceCount": 22, "citationCount": 10, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "High Perceptual Quality Wireless Image Delivery with Denoising Diffusion Models", "abstract": "We consider the image transmission problem over a noisy wireless channel via deep learning-based joint source-channel coding (DeepJSCC) along with a denoising diffusion probabilistic model (DDPM) at the receiver. Specifically, we are interested in the perception-distortion trade-off in the practical finite block length regime, in which separate source and channel coding can be highly suboptimal. We introduce a novel scheme, where the conventional DeepJSCC encoder targets transmitting a lower resolution version of the image, which later can be refined thanks to the generative model available at the receiver. In particular, we utilize the range-null space decomposition of the target image; DeepJSCC transmits the range-space of the image, while DDPM progressively refines its null space contents. Through extensive experiments, we demonstrate significant improvements in distortion and perceptual quality of reconstructed images compared to standard DeepJSCC and the state-of-the-art generative learning-based method.", "year": 2023, "venue": "Conference on Computer Communications Workshops", "authors": [ "Selim F. Yilmaz", "Xueyan Niu", "B. Bai", "Wei Han", "Lei Deng", "Deniz Gunduz" ], "externalIds": { "DBLP": "conf/infocom/YilmazN00DG24", "ArXiv": "2309.15889", "DOI": "10.1109/INFOCOMWKSHPS61880.2024.10620904", "CorpusId": 263134970 }, "url": "https://www.semanticscholar.org/paper/9b60826f4a1721c4102ab9f67fbe4db5f059fefb", "referenceCount": 29, "citationCount": 11, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Engineering", "Computer Science", "Mathematics" ] }, { "title": "CDDM: Channel Denoising Diffusion Models for Wireless Semantic Communications", "abstract": "Diffusion models (DM) can gradually learn to remove noise, which have been widely used in artificial intelligence generated content (AIGC) in recent years. The property of DM for eliminating noise leads us to wonder whether DM can be applied to wireless communications to help the receiver mitigate the channel noise. To address this, we propose channel denoising diffusion models (CDDM) for semantic communications over wireless channels in this paper. CDDM can be applied as a new physical layer module after the channel equalization to learn the distribution of the channel input signal, and then utilizes this learned knowledge to remove the channel noise. We derive corresponding training and sampling algorithms of CDDM according to the forward diffusion process specially designed to adapt the channel models. We also theoretically prove that the well-trained CDDM can effectively reduce the conditional entropy of the received signal under small sampling steps. Moreover, we apply CDDM to a semantic communications system based on joint source-channel coding (JSCC) for image transmission and design a three-stage training algorithm for combining them. Extensive experimental results demonstrate that CDDM can further reduce the mean square error (MSE) after minimum mean square error (MMSE) equalizer, and the joint CDDM and JSCC system achieves better performance than the JSCC system, the traditional JPEG2000 with low-density parity-check (LDPC) code approach and other benchmarks in diverse scenarios.", "year": 2023, "venue": "IEEE Transactions on Wireless Communications", "authors": [ "Tong Wu", "Zhiyong Chen", "Dazhi He", "Liang Qian", "Yin Xu", "Meixia Tao", "Wenjun Zhang" ], "externalIds": { "DBLP": "journals/corr/abs-2309-08895", "ArXiv": "2309.08895", "DOI": "10.1109/TWC.2024.3379244", "CorpusId": 261885380 }, "url": "https://www.semanticscholar.org/paper/607da39df8c8219f7e2b0024749179d0d3e7869b", "referenceCount": 52, "citationCount": 11, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "Deep Joint Source-Channel Coding for Adaptive Image Transmission over MIMO Channels", "abstract": "This paper introduces a vision transformer (ViT)-based deep joint source and channel coding (DeepJSCC) scheme for wireless image transmission over multiple-input multiple-output (MIMO) channels, denoted as DeepJSCC-MIMO. We consider DeepJSCC-MIMO for adaptive image transmission in both open-loop and closed-loop MIMO systems. The novel DeepJSCC-MIMO architecture surpasses the classical separation-based benchmarks with robustness to channel estimation errors and showcases remarkable flexibility in adapting to diverse channel conditions and antenna numbers without requiring retraining. Specifically, by harnessing the self-attention mechanism of ViT, DeepJSCC-MIMO intelligently learns feature mapping and power allocation strategies tailored to the unique characteristics of the source image and prevailing channel conditions. Extensive numerical experiments validate the significant improvements in transmission quality achieved by DeepJSCC-MIMO for both open-loop and closed-loop MIMO systems across a wide range of scenarios. Moreover, DeepJSCC-MIMO exhibits robustness to varying channel conditions, channel estimation errors, and different antenna numbers, making it an appealing solution for emerging semantic communication systems.", "year": 2023, "venue": "IEEE Transactions on Wireless Communications", "authors": [ "Haotian Wu", "Yulin Shao", "Chenghong Bian", "K. Mikolajczyk", "Deniz Gündüz" ], "externalIds": { "ArXiv": "2309.00470", "DBLP": "journals/corr/abs-2309-00470", "DOI": "10.48550/arXiv.2309.00470", "CorpusId": 261494094 }, "url": "https://www.semanticscholar.org/paper/31e523991ba336a739d0f4641f4a60e54e823061", "referenceCount": 83, "citationCount": 10, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "6G Perspective of Mobile Network Operators, Manufacturers, and Verticals", "abstract": "The first release of 5G technology is being rolled out worldwide. In parallel, 3GPP is constantly adding new features to upcoming releases covering well-known use cases. This raises the questions i) when will 6G be introduced?, ii) how can 6G be motivated for the stakeholders, and iii) what are the 6G use cases? In this letter, we present the perspective of these stakeholders, namely the network operators, manufacturers, and verticals, identifying potential 5G shortcomings and the remaining 6G solution space. We will highlight the Metaverse as the enabler for 6G addressing omnipresent daily challenges and the upcoming energy problem.", "year": 2023, "venue": "IEEE Networking Letters", "authors": [ "Paul Schwenteck", "Giang T. Nguyen", "H. Boche", "W. Kellerer", "F. Fitzek" ], "externalIds": { "DBLP": "journals/ieeenl/SchwenteckNBKF23", "DOI": "10.1109/LNET.2023.3266863", "CorpusId": 258152508 }, "url": "https://www.semanticscholar.org/paper/219fc43cafde93cbb886d6e800104a923f8f4367", "referenceCount": 25, "citationCount": 31, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Alleviating Distortion Accumulation in Multi-Hop Semantic Communication", "abstract": "Recently, semantic communication has been investigated to boost the performance of end-to-end image transmission systems. However, existing semantic approaches are generally based on deep learning and belong to lossy transmission. Consequently, as the receiver continues to transmit received images to another device, the distortion of images accumulates with each transmission. Unfortunately, most recent advances overlook this issue and only consider single-hop scenarios, where images are transmitted only once from a transmitter to a receiver. In this letter, we propose a novel framework of a multi-hop semantic communication system. To address the problem of distortion accumulation, we introduce a novel recursive training method for the encoder and decoder of semantic communication systems. Specifically, the received images are recursively input into the encoder and decoder to retrain the semantic communication system. This empowers the system to handle distorted received images and achieve higher performance. Our extensive simulation results demonstrate that the proposed methods significantly alleviate distortion accumulation in multi-hop semantic communication.", "year": 2023, "venue": "IEEE Communications Letters", "authors": [ "Guangyi Zhang", "Qiyu Hu", "Yunlong Cai", "Guanding Yu" ], "externalIds": { "DBLP": "journals/icl/ZhangHCY24", "ArXiv": "2308.11126", "DOI": "10.1109/LCOMM.2023.3339776", "CorpusId": 261064962 }, "url": "https://www.semanticscholar.org/paper/01826e88b097f9c625144f6de8778daffa5e3faa", "referenceCount": 18, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Engineering", "Computer Science" ] }, { "title": "Semantic Communication on Multi-Hop Concatenated Relay Networks", "abstract": "In order to achieve reliable information delivery over long distance or high path loss attenuations, semantic communication (SC) on multi-hop relay networks is investigated. Compared to traditional communications, although SC shows an advantage in single-hop communications, the semantic attenuation, i.e., the loss of semantic information, over multiple hops remains an open problem. To characterize the effect of multi-hop relaying on the fidelity of semantic information, a new metric \"Semantic Similarity\" is introduced, which can quantify the similarity between the meanings represented by two messages at the sentence level. Then an optimization function is established, based on which end-to-end optimization is performed to preserve the semantic information to the maximum extent. Experimental results demonstrate the effectiveness of the proposed scheme, where superior performance is achieved by the proposed scheme at low-to-medium signal-to-noise ratio (SNR) regime and subject to high path loss. Compared to existing schemes, a much slower and smoother performance degradation is achieved by the proposed scheme as the number of relays increases.", "year": 2023, "venue": "International Conference on Innovative Computing and Cloud Computing", "authors": [ "Yiran Yin", "Likun Huang", "Q. Li", "Bing Tang", "Ashish Pandharipande", "Xiaohu Ge" ], "externalIds": { "DBLP": "conf/iccchina/YinHLTPG23", "DOI": "10.1109/ICCC57788.2023.10233446", "CorpusId": 261563299 }, "url": "https://www.semanticscholar.org/paper/5a2568d12156b73b32124e045830ae9c900eb304", "referenceCount": 21, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Secure Deep-JSCC Against Multiple Eavesdroppers", "abstract": "In this paper, a generalization of deep learning-aided joint source channel coding (Deep-JSCC) approach to secure communications is studied. We propose an end-to-end (E2E) learning-based approach for secure communication against multiple eavesdroppers over complex-valued fading channels. Both scenarios of colluding and non-colluding eavesdroppers are studied. For the colluding strategy, eavesdroppers share their logits to collaboratively infer private attributes based on ensemble learning method, while for the non-colluding setup they act alone. The goal is to prevent eavesdroppers from inferring private (sensitive) information about the transmitted images, while delivering the images to a legitimate receiver with minimum distortion. By generalizing the ideas of privacy funnel and wiretap channel coding, the trade-off between the image recovery at the legitimate node and the information leakage to the eavesdroppers is characterized. To solve this secrecy funnel framework, we implement deep neural networks (DNNs) to realize a data-driven secure communication scheme, without relying on a specific data distribution. Simulations over CIFAR-10 dataset verifies the secrecy-utility trade-off. Adversarial accuracy of eavesdroppers are also studied over Rayleigh fading, Nakagami-m, and AWGN channels to verify the generalization of the proposed scheme. Our experiments show that employing the proposed secure neural encoding can decrease the adversarial accuracy by 28%.", "year": 2023, "venue": "Global Communications Conference", "authors": [ "Seyyed Amirhossein Ameli Kalkhoran", "Mehdi Letafati", "Ece Naz Erdemir", "B. Khalaj", "H. Behroozi", "Deniz Gunduz" ], "externalIds": { "ArXiv": "2308.02892", "DBLP": "journals/corr/abs-2308-02892", "DOI": "10.1109/GLOBECOM54140.2023.10436928", "CorpusId": 260683335 }, "url": "https://www.semanticscholar.org/paper/beb2bc3ec8fef54c1f7c14536215c45d083e0c6e", "referenceCount": 21, "citationCount": 8, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "Design of Low-Density Parity-Check Code Pair for Joint Source-Channel Coding Systems Based on Graph Theory", "abstract": "In this article, a graph-theoretic method (taking advantage of constraints among sets associated with the corresponding parity-check matrices) is applied for the construction of a double low-density parity-check (D-LDPC) code (also known as LDPC code pair) in a joint source-channel coding (JSCC) system. Specifically, we pre-set the girth of the parity-check matrix for the LDPC code pair when jointly designing the two LDPC codes, which are constructed by following the set constraints. The constructed parity-check matrices for channel codes comprise an identity submatrix and an additional submatrix, whose column weights can be pre-set to be any positive integer numbers. Simulation results illustrate that the constructed D-LDPC codes exhibit significant performance improvement and enhanced flexible frame length (i.e., adaptability under various channel conditions) compared with the benchmark code pair.", "year": 2023, "venue": "Entropy", "authors": [ "Yijie Lv", "Jiguang He", "Weikai Xu", "L. Wang" ], "externalIds": { "PubMedCentral": "10453591", "DBLP": "journals/entropy/LvHXW23", "DOI": "10.3390/e25081189", "CorpusId": 260831048, "PubMed": "37628222" }, "url": "https://www.semanticscholar.org/paper/9621ace9aac4128c2d8556e67dce9a5484b0575a", "referenceCount": 25, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Predictive and Adaptive Deep Coding for Wireless Image Transmission in Semantic Communication", "abstract": "Semantic communication is a newly emerged communication paradigm that exploits deep learning (DL) models to realize communication processes like source coding and channel coding. Recent advances have demonstrated that DL-based joint source-channel coding (DeepJSCC) can achieve exciting data compression and noise-resiliency performances for wireless image transmission tasks, especially in environments with low channel signal-to-noises (SNRs). However, existing DeepJSCC-based semantic communication frameworks still cannot achieve adaptive code rates for different channel SNRs and image contents, which reduces its flexibility and bandwidth efficiency. In this paper, we propose a predictive and adaptive deep coding (PADC) framework for realizing flexible code rate optimization with a given target transmission quality requirement. PADC is realized by a variable code length enabled DeepJSCC (DeepJSCC-V) model for realizing flexible code length adjustment, an Oracle Network (OraNet) model for predicting peak-signal-to-noise (PSNR) value for an image transmission task according to its contents, channel signal to noise ratio (SNR) and the compression ratio (CR) value, and a CR optimizer aims at finding the minimal data-level or instance-level CR with a PSNR quality constraint. By using the above three modules, PADC can transmit the image data with minimal CR, which greatly increases bandwidth efficiency. Simulation results demonstrate that the proposed DeepJSCC-V model can achieve similar PSNR performances compared with the state-of-the-art Attention-based DeepJSCC (ADJSCC) model, and the proposed OraNet model is able to predict high-quality PSNR values with an average error lower than 0.5dB. Results also demonstrate that the proposed PADC can use nearly minimal bandwidth consumption for wireless image transmission tasks with different channel SNR and image contents, at the same time guaranteeing the PSNR constraint for each image data.", "year": 2023, "venue": "IEEE Transactions on Wireless Communications", "authors": [ "Wenyu Zhang", "Haijun Zhang", "Hui Ma", "Hua Shao", "Ning Wang", "Victor C. M. Leung" ], "externalIds": { "DBLP": "journals/twc/ZhangZMSWL23", "DOI": "10.1109/TWC.2023.3234408", "CorpusId": 255893933 }, "url": "https://www.semanticscholar.org/paper/46e2af3d7e8dd50675b0062fe9e4fce5cd5a0508", "referenceCount": 36, "citationCount": 36, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The Role of Fidelity in Goal-Oriented Semantic Communication: A Rate Distortion Approach", "abstract": "We study a variant of a robust description source coding framework, which is a relevant model for goal-oriented semantic information transmission, via its corresponding characterization. Considering two individual single-letter separable distortion constraints and input and output data acting as the intrinsic and extrinsic message, respectively, we first derive a lower bound on the optimal rates of the problem, as well as necessary and sufficient conditions for this bound to be tight. Subsequently, we prove a general result that provides in parametric form the optimal solution of the characterization of this problem. Capitalizing on these results, we examine the structure of the solution for one case study of general binary alphabets under Hamming distortions and solve in closed form a special case. We also solve another general binary alphabet case where a Hamming and an erasure distortion coexist, as a means to highlight the importance of selecting the type of the distortion constraint in goal-oriented semantic communication. Furthermore, we develop a goal-oriented Blahut-Arimoto (BA) algorithm, which can be used for the computation of any finite alphabet intrinsic or extrinsic message under individual distortion criteria. Finally, we revisit the problem for multidimensional independent and identically distributed ( $\\mathop {\\mathrm {i.i.d.}}$ ) jointly Gaussian processes with individual mean-square error (MSE) distortion constraints, providing new insights that have previously been overlooked. This work reveals the cardinal role of context-dependent fidelity criteria in goal-oriented semantic communication.", "year": 2023, "venue": "IEEE Transactions on Communications", "authors": [ "Photios A. Stavrou", "M. Kountouris" ], "externalIds": { "DBLP": "journals/tcom/StavrouK23", "DOI": "10.1109/TCOMM.2023.3274122", "CorpusId": 258625220 }, "url": "https://www.semanticscholar.org/paper/190e92917eab77cb771f78543f0ed739eb432f83", "referenceCount": 55, "citationCount": 21, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Improved Random-Binning Exponent for Distributed Hypothesis Testing", "abstract": "Shimokawa, Han, and Amari proposed a\"quantization and binning\"scheme for distributed binary hypothesis testing. We propose a simple improvement on the receiver's guessing rule in this scheme. This attains a better exponent of the error probability of the second type.", "year": 2023, "venue": "arXiv.org", "authors": [ "Yuval Kochman", "Ligong Wang" ], "externalIds": { "DBLP": "journals/corr/abs-2306-14499", "ArXiv": "2306.14499", "DOI": "10.48550/arXiv.2306.14499", "CorpusId": 259251821 }, "url": "https://www.semanticscholar.org/paper/5a86f1b049042afcf7f333e5bee0bb521a73937e", "referenceCount": 9, "citationCount": 3, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Reasoning Over the Air: A Reasoning- Based Implicit Semantic-Aware Communication Framework", "abstract": "Semantic-aware communication is a novel paradigm that draws inspiration from human communication focusing on the delivery of the meaning of messages. It has attracted significant interest recently due to its potential to improve the efficiency and reliability of communication and enhance users’ quality-of-experience (QoE). Most existing works focus on transmitting and delivering the explicit semantic meaning that can be directly identified from the source signal. This paper investigates the implicit semantic-aware communication in which the hidden information, e.g., hidden relations, concepts and implicit reasoning mechanisms of users, that cannot be directly observed from the source signal must be recognized and interpreted by the intended users. To this end, a novel implicit semantic-aware communication (iSAC) architecture is proposed for representing, communicating, and interpreting the implicit semantic meaning between source and destination users. A graph-inspired structure is first developed to represent the complete semantics, including both explicit and implicit, of a message. A projection-based semantic encoder is then proposed to convert the high-dimensional graphical representation of explicit semantics into a low-dimensional semantic constellation space for efficient physical channel transmission. To enable the destination user to learn and imitate the implicit semantic reasoning process of source user, a generative adversarial imitation learning-based solution, called G-RML, is proposed. Different from existing communication solutions, the source user in G-RML does not focus only on sending as much of the useful messages as possible; but, instead, it tries to guide the destination user to learn a reasoning mechanism to map any observed explicit semantics to the corresponding implicit semantics that are most relevant to the semantic meaning. By applying G-RML, we prove that the destination user can accurately imitate the reasoning process of the source user and automatically generate a set of implicit reasoning paths following the same probability distribution as the expert paths. Compared to the existing solutions, our proposed G-RML requires much less communication and computational resources and scales well to the scenarios involving the communication of rich semantic meanings consisting of a large number of concepts and relations. Numerical results show that the proposed solution achieves up to 92% accuracy of implicit meaning interpretation.", "year": 2023, "venue": "IEEE Transactions on Wireless Communications", "authors": [ "Yong Xiao", "Yiwei Liao", "Yingyu Li", "Guangming Shi", "H. Poor", "W. Saad", "M. Debbah", "M. Bennis" ], "externalIds": { "DBLP": "journals/corr/abs-2306-11229", "ArXiv": "2306.11229", "DOI": "10.1109/TWC.2023.3312115", "CorpusId": 259202819 }, "url": "https://www.semanticscholar.org/paper/ae47608b6b7f45631aa8c229c616ad8fa8c8f0b5", "referenceCount": 45, "citationCount": 8, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Transformer-Aided Wireless Image Transmission With Channel Feedback", "abstract": "This paper presents a novel wireless image transmission paradigm that can exploit feedback from the receiver, called JSCCformer-f. We consider a block feedback channel model, where the transmitter receives noiseless/noisy channel output feedback after each block. The proposed scheme employs a single encoder to facilitate transmission over multiple blocks, refining the receiver’s estimation at each block. Specifically, the unified encoder of JSCCformer-f can leverage the semantic information from the source image, and acquire channel state information and the decoder’s current belief about the source image from the feedback signal to generate coded symbols at each block. Numerical experiments show that our JSCCformer-f scheme achieves state-of-the-art performance with robustness to noise in the feedback link. Additionally, JSCCformer-f can adapt to the channel condition directly through feedback without the need for separate channel estimation. We further extend the scope of the JSCCformer-f approach to include the broadcast channel, which enables the transmitter to generate broadcast codes in accordance with signal semantics and channel feedback from individual receivers.", "year": 2023, "venue": "IEEE Transactions on Wireless Communications", "authors": [ "Haotian Wu", "Yulin Shao", "Emre Ozfatura", "K. Mikolajczyk", "Deniz Gündüz" ], "externalIds": { "ArXiv": "2306.09101", "DBLP": "journals/twc/WuSOMG24", "DOI": "10.1109/TWC.2024.3386052", "CorpusId": 259165063 }, "url": "https://www.semanticscholar.org/paper/fd572a1985488dc42721251be4027f015c91bf69", "referenceCount": 57, "citationCount": 13, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "Wireless Point Cloud Transmission", "abstract": "3D point cloud is a three-dimensional data format generated by LiDARs and depth sensors, and is being increasingly used in a large variety of applications. This paper presents a novel solution called SEmantic Point cloud Transmission (SEPT), for the transmission of point clouds over wireless channels with limited bandwidth. At the transmitter, SEPT encodes the point cloud via an iterative downsampling and feature extraction process. At the receiver, SEPT reconstructs the point cloud with latent reconstruction and offset-based upsampling. Extensive numerical experiments confirm that SEPT significantly outperforms the standard approach with octree-based compression followed by channel coding. Compared with a more advanced benchmark that utilizes state-of-the-art deep learning-based compression techniques, SEPT achieves comparable performance while eliminating the cliff and leveling effects. Thanks to its improved performance and robustness against channel variations, we believe that SEPT can be instrumental in collaborative sensing and inference applications among robots and vehicles, particularly in the low-latency and high-mobility scenarios.", "year": 2023, "venue": "arXiv.org", "authors": [ "Chenghong Bian", "Yulin Shao", "Deniz Gündüz" ], "externalIds": { "DBLP": "journals/corr/abs-2306-08730", "ArXiv": "2306.08730", "DOI": "10.48550/arXiv.2306.08730", "CorpusId": 259165071 }, "url": "https://www.semanticscholar.org/paper/c6e8c8fded1fa9c58520b61364693f2f5c080540", "referenceCount": 29, "citationCount": 10, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "DeepJSCC-1++: Robust and Bandwidth-Adaptive Wireless Image Transmission", "abstract": "This paper presents a novel vision transformer (ViT) based deep joint source channel coding (DeepJSCC) scheme, dubbed DeepJSCC-l++, which can adapt to different target bandwidth ratios as well as channel signal-to-noise ratios (SNRs) using a single model. To achieve this, we treat the bandwidth ratio and the SNR as channel state information available to the encoder and decoder, which are fed to the model as side information, and train the proposed DeepJSCC-l++ model with different bandwidth ratios and SNRs. The reconstruction losses corresponding to different bandwidth ratios are calculated, and a novel training methodology, which dynamically assigns different weights to the losses of different bandwidth ratios according to their individual reconstruction qualities, is introduced. Shifted window (Swin) transformer is adopted as the backbone for our DeepJSCC-l++ model, and it is shown through extensive simulations that the proposed DeepJSCC-l++ can adapt to different bandwidth ratios and channel SNRs with marginal performance loss compared to the separately trained models. We also observe the proposed schemes can outperform the digital baseline, which concatenates the BPG compression with capacity-achieving channel code. We believe this is an important step towards the implementation of DeepJSCC in practice as a single pre-trained model is sufficient to serve the user in a wide range of channel conditions.", "year": 2023, "venue": "Global Communications Conference", "authors": [ "Chenghong Bian", "Yulin Shao", "Deniz Gündüz" ], "externalIds": { "DBLP": "journals/corr/abs-2305-13161", "ArXiv": "2305.13161", "DOI": "10.1109/GLOBECOM54140.2023.10436878", "CorpusId": 258833652 }, "url": "https://www.semanticscholar.org/paper/8e9ac008e6f0949c5dd388c1f0c3299569288731", "referenceCount": 15, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "Fusion-Based Multi-User Semantic Communications for Wireless Image Transmission Over Degraded Broadcast Channels", "abstract": "Degraded broadcast channels (DBC) are a typical multiuser communication scenario. There exist classic transmission methods, such as superposition coding with successive interference cancellation, to achieve the DBC capacity region. However, semantic communication method over DBC remains lack of in-depth research. To address this, we design a semantic communications system for wireless image transmission over DBC in this paper. The proposed architecture supports a transmitter extracting semantic features for two users separately, and learns to dynamically fuse these semantic features into a joint latent representation for broadcasting. The key here is to design a flexible image semantic fusion (FISF) module to fuse the semantic features of two users, and to use a multi-layer perceptron (MLP) based neural network to adjust the weights of different user semantic features for flexible adaptability to different users channels. Experiments present the semantic performance region based on the peak signal-to-noise ratio (PSNR) of both users, and show that the proposed system dominates the traditional methods.", "year": 2023, "venue": "Global Communications Conference", "authors": [ "Tong Wu", "Zhiyong Chen", "M. Tao", "Bin Xia", "Wenjun Zhang" ], "externalIds": { "DBLP": "journals/corr/abs-2305-09165", "ArXiv": "2305.09165", "DOI": "10.1109/GLOBECOM54140.2023.10437864", "CorpusId": 258715244 }, "url": "https://www.semanticscholar.org/paper/997bd077d5a7ab4046cd8b4da9848921f94c8541", "referenceCount": 18, "citationCount": 9, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "Physical-Layer Adversarial Robustness for Deep Learning-Based Semantic Communications", "abstract": "End-to-end semantic communications (ESC) rely on deep neural networks (DNN) to boost communication efficiency by only transmitting the semantics of data, showing great potential for high-demand mobile applications. We argue that central to the success of ESC is the robust interpretation of conveyed semantics at the receiver side, especially for security-critical applications such as automatic driving and smart healthcare. However, robustifying semantic interpretation is challenging as ESC is extremely vulnerable to physical-layer adversarial attacks due to the openness of wireless channels and the fragileness of neural models. Toward ESC robustness in practice, we ask the following two questions: Q1: For attacks, is it possible to generate semantic-oriented physical-layer adversarial attacks that are imperceptible, input-agnostic and controllable? Q2: Can we develop a defense strategy against such semantic distortions and previously proposed adversaries? To this end, we first present MobileSC, a novel semantic communication framework that considers the computation and memory efficiency in wireless environments. Equipped with this framework, we propose SemAdv, a physical-layer adversarial perturbation generator that aims to craft semantic adversaries over the air with the abovementioned criteria, thus answering the Q1. To better characterize the real-world effects for robust training and evaluation, we further introduce a novel adversarial training method $\\texttt {SemMixed}$ to harden the ESC against SemAdv attacks and existing strong threats, thus answering the Q2. Extensive experiments on three public benchmarks verify the effectiveness of our proposed methods against various physical adversarial attacks. We also show some interesting findings, e.g., our MobileSC can even be more robust than classical block-wise communication systems in the low SNR regime.", "year": 2023, "venue": "IEEE Journal on Selected Areas in Communications", "authors": [ "Gu Nan", "Zhichun Li", "Jinli Zhai", "Qimei Cui", "Gong Chen", "Xin Du", "Xuefei Zhang", "Xiaofeng Tao", "Zhu Han", "Tony Q. S. Quek" ], "externalIds": { "DBLP": "journals/jsac/NanLZCCDZTHQ23", "ArXiv": "2305.07220", "DOI": "10.1109/JSAC.2023.3288249", "CorpusId": 258676080 }, "url": "https://www.semanticscholar.org/paper/09c0fc14d057b2d2a30ca82f71f183c568f37aff", "referenceCount": 72, "citationCount": 16, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Improved Nonlinear Transform Source-Channel Coding to Catalyze Semantic Communications", "abstract": "Recent deep learning methods have led to increased interest in solving high-efficiency end-to-end transmission problems. These methods, we call nonlinear transform source-channel coding (NTSCC), extract the semantic latent features of source signal, and learn entropy model to guide the joint source-channel coding with variable rate to transmit latent features over wireless channels. In this article, we propose a comprehensive framework for improving NTSCC, thereby higher system coding gain, better model compatibility, more flexible adaptation strategy aligned with semantic guidance are all achieved. This new sophisticated NTSCC model is now ready to support large-size data interaction in emerging XR, which catalyzes the application of semantic communications. Specifically, we propose three useful improvement approaches. First, we introduce a contextual entropy model to better capture the spatial correlations among the semantic latent features, thereby more accurate rate allocation and contextual joint source-channel coding method are developed accordingly to enable higher coding gain. On that basis, we further propose a response network architecture to formulate compatible NTSCC, i.e., once-learned model supports various bandwidth ratios and channel states that benefits practical deployment greatly. Following this, we propose an online latent feature editing mechanism to enable more flexible coding rate allocation aligned with some specific semantic guidance. By comprehensively applying the above three improvement methods for NTSCC, a deployment-friendly semantic coded transmission system stands out finally. Our improved NTSCC system has been experimentally verified to achieve a better rate-distortion efficiency versus the state-of-the-art engineered VTM + 5G LDPC coded transmission system with lower processing latency.", "year": 2023, "venue": "IEEE Journal on Selected Topics in Signal Processing", "authors": [ "Sixian Wang", "Jincheng Dai", "Xiaoqi Qin", "Zhongwei Si", "K. Niu", "Ping Zhang" ], "externalIds": { "ArXiv": "2303.14637", "DBLP": "journals/corr/abs-2303-14637", "DOI": "10.1109/JSTSP.2023.3304140", "CorpusId": 257766714 }, "url": "https://www.semanticscholar.org/paper/fb7651c449f1100a8fe0ebf1aa367cc1250b8a8d", "referenceCount": 68, "citationCount": 12, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Multi-Realism Image Compression with a Conditional Generator", "abstract": "By optimizing the rate-distortion-realism trade-off, generative compression approaches produce detailed, realistic images, even at low bit rates, instead of the blurry reconstructions produced by rate-distortion optimized models. However, previous methods do not explicitly control how much detail is synthesized, which results in a common criticism of these methods: users might be worried that a misleading reconstruction far from the input image is generated. In this work, we alleviate these concerns by training a decoder that can bridge the two regimes and navigate the distortion-realism tradeoff. From a single compressed representation, the receiver can decide to either reconstruct a low mean squared error reconstruction that is close to the input, a realistic reconstruction with high perceptual quality, or anything in between. With our method, we set a new state-of-the-art in distortion-realism, pushing the frontier of achievable distortion-realism pairs, i.e., our method achieves better distortions at high realism and better realism at low distortion than ever before.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "E. Agustsson", "David C. Minnen", "G. Toderici", "Fabian Mentzer" ], "externalIds": { "DBLP": "journals/corr/abs-2212-13824", "ArXiv": "2212.13824", "DOI": "10.1109/CVPR52729.2023.02138", "CorpusId": 255186005 }, "url": "https://www.semanticscholar.org/paper/76a0cb56bed90ba3de90baea1a5f29fe91f6f1cb", "referenceCount": 48, "citationCount": 51, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "The Internet of Senses: Building on Semantic Communications and Edge Intelligence", "abstract": "The Internet of Senses (IoS) holds the promise of flawless telepresence-style communication for all human “receptors” and, therefore, blurs the difference of virtual and real environments. We commence by highlighting the compelling use cases empowered by the IoS and also the key network requirements. We then elaborate on how the emerging semantic communications and Artificial Intelligence (AI)/Machine Learning (ML) paradigms along with 6G technologies may satisfy the requirements of IoS use cases. On one hand, semantic communications can be applied for extracting meaningful and significant information and, hence efficiently exploit the resources and for harnessing former information at the receiver to satisfy IoS requirements. On the other hand, AI/ML facilitates frugal network resource management by making use of the enormous amount of data generated in IoS edge nodes and devices, as well as by optimizing the IoS performance via intelligent agents. However, the intelligent agents deployed at the edge are not completely aware of each others' decisions and the environments of each other, hence, they operate in a partially rather than fully observable environment. Therefore, we present a case study of Partially Observable Markov Decision Processes (POMDP) for improving the User Equipment (UE) throughput and energy consumption, as they are imperative for IoS use cases, using reinforcement learning for astutely activating and deactivating the component carriers in carrier aggregation. Finally, we outline the challenges and open issues of IoS implementations and employing semantic communications, edge intelligence, as well as learning under partial observability in the IoS context.", "year": 2022, "venue": "IEEE Network", "authors": [ "Roghayeh Joda", "Medhat H. M. Elsayed", "H. Abou-zeid", "R. Atawia", "A. B. Sediq", "G. Boudreau", "M. Erol-Kantarci", "L. Hanzo" ], "externalIds": { "DBLP": "journals/corr/abs-2212-10748", "ArXiv": "2212.10748", "DOI": "10.1109/MNET.107.2100627", "CorpusId": 254926618 }, "url": "https://www.semanticscholar.org/paper/3a1760fff844a9dfcfcdff8bc24de436274d5b60", "referenceCount": 19, "citationCount": 9, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "A Theory of Semantic Communication", "abstract": "Semantic communication is an emerging research area that has gained a wide range of attention recently. Despite this growing interest, there remains a notable absence of a comprehensive and widely-accepted framework for characterizing semantic communication. This paper introduces a new conceptualization of semantic communication and formulates two fundamental problems, which we term language exploitation and language design. Our contention is that the challenge of language design can be effectively situated within the broader framework of joint source-channel coding theory, underpinned by a comprehensive end-to-end distortion metric. To tackle the language exploitation problem, we put forth three approaches: semantic encoding, semantic decoding, and a synergistic combination of both in the form of combined semantic encoding and decoding. Furthermore, we establish the semantic distortion-cost region as a critical framework for assessing the language exploitation problem. For each of the three proposed approaches, the achievable distortion-cost region is characterized. Overall, this paper aims to shed light on the intricate dynamics of semantic communication, paving the way for a deeper understanding of this evolving field.", "year": 2022, "venue": "IEEE Transactions on Mobile Computing", "authors": [ "Yulin Shao", "Qingqing Cao", "Deniz Gündüz" ], "externalIds": { "DBLP": "journals/corr/abs-2212-01485", "ArXiv": "2212.01485", "DOI": "10.48550/arXiv.2212.01485", "CorpusId": 254247048 }, "url": "https://www.semanticscholar.org/paper/b0a93537d21f19006d483fe7cc58d4ac363f456e", "referenceCount": 39, "citationCount": 20, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Generative Joint Source-Channel Coding for Semantic Image Transmission", "abstract": "Recent works have shown that joint source-channel coding (JSCC) schemes using deep neural networks (DNNs), called DeepJSCC, provide promising results in wireless image transmission. However, these methods mostly focus on the distortion of the reconstructed signals with respect to the input image, rather than their perception by humans. However, focusing on traditional distortion metrics alone does not necessarily result in high perceptual quality, especially in extreme physical conditions, such as very low bandwidth compression ratio (BCR) and low signal-to-noise ratio (SNR) regimes. In this work, we propose two novel JSCC schemes that leverage the perceptual quality of deep generative models (DGMs) for wireless image transmission, namely InverseJSCC and GenerativeJSCC. While the former is an inverse problem approach to DeepJSCC, the latter is an end-to-end optimized JSCC scheme. In both, we optimize a weighted sum of mean squared error (MSE) and learned perceptual image patch similarity (LPIPS) losses, which capture more semantic similarities than other distortion metrics. InverseJSCC performs denoising on the distorted reconstructions of a DeepJSCC model by solving an inverse optimization problem using the pre-trained style-based generative adversarial network (StyleGAN). Our simulation results show that InverseJSCC significantly improves the state-of-the-art DeepJSCC in terms of perceptual quality in edge cases. In GenerativeJSCC, we carry out end-to-end training of an encoder and a StyleGAN-based decoder, and show that GenerativeJSCC significantly outperforms DeepJSCC both in terms of distortion and perceptual quality.", "year": 2022, "venue": "IEEE Journal on Selected Areas in Communications", "authors": [ "Ece Naz Erdemir", "Tze-Yang Tung", "P. Dragotti", "Deniz Gunduz" ], "externalIds": { "DBLP": "journals/corr/abs-2211-13772", "ArXiv": "2211.13772", "DOI": "10.1109/JSAC.2023.3288243", "CorpusId": 254017443 }, "url": "https://www.semanticscholar.org/paper/f52b9bc98ba8327ccf90802bb02d1a06acaabd59", "referenceCount": 46, "citationCount": 44, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Engineering", "Computer Science" ] }, { "title": "Distributed Deep Joint Source-Channel Coding over a Multiple Access Channel", "abstract": "We consider distributed image transmission over a noisy multiple access channel (MAC) using deep joint source-channel coding (DeepJSCC). It is known that Shannon's separation theorem holds when transmitting independent sources over a MAC in the asymptotic infinite block length regime. However, we are interested in the practical finite block length regime, in which case separate source and channel coding is known to be suboptimal. We introduce a novel joint image compression and transmission scheme, where the devices send their compressed image representations in a non-orthogonal manner. While non-orthogonal multiple access (NOMA) is known to achieve the capacity region, to the best of our knowledge, non-orthogonal joint source-channel coding (JSCC) scheme for practical systems has not been studied before. Through extensive experiments, we show significant improvements in terms of the quality of the reconstructed images compared to orthogonal transmission employing current DeepJSCC approaches particularly for low bandwidth ratios. We publicly share source code to facilitate further research and reproducibility.", "year": 2022, "venue": "ICC 2023 - IEEE International Conference on Communications", "authors": [ "Selim F. Yilmaz", "Can Karamanli", "Deniz Gunduz" ], "externalIds": { "DBLP": "journals/corr/abs-2211-09920", "ArXiv": "2211.09920", "DOI": "10.1109/ICC45041.2023.10279499", "CorpusId": 253707895 }, "url": "https://www.semanticscholar.org/paper/f4ab43023c9738854f9573fc819fcea593eddc61", "referenceCount": 33, "citationCount": 15, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Engineering", "Computer Science", "Mathematics" ] }, { "title": "Deep Joint Source-Channel Coding for Semantic Communications", "abstract": "Semantic communications is considered a promising technology that will increase the efficiency of next-generation communication systems, particularly human-machine and machine-type communications. In contrast to the source-agnostic approach of conventional wireless communication systems, semantic communications seek to ensure that only relevant information for the underlying task is communicated to the receiver. Considering most semantic communication applications have strict latency, bandwidth, and power constraints, a prominent approach is to model them as a joint source-channel coding (JSCC) problem. Although JSCC has been a long-standing open problem in communication and coding theory, remarkable performance gains have been made recently over existing separate source and channel coding systems, particularly in low-la-tency and low-power scenarios. Recent progress has been made thanks to the adoption of deep learning techniques for joint source-channel code design that outperform the concatenation of state-of-the-art compression and channel coding schemes, which are the result of decades-long research efforts. In this article, we present an adaptive deep learning based JSCC (DeepJSCC) architecture for semantic communications, introduce its design principles, highlight its benefits, and outline future research challenges that lie ahead.", "year": 2022, "venue": "IEEE Communications Magazine", "authors": [ "Jia-lin Xu", "Tze-Yang Tung", "Bo Ai", "Wei Chen", "Yuxuan Sun", "Deniz Gunduz" ], "externalIds": { "DBLP": "journals/cm/XuTACSG23", "ArXiv": "2211.08747", "DOI": "10.1109/MCOM.004.2200819", "CorpusId": 253553160 }, "url": "https://www.semanticscholar.org/paper/d43fc888bc6072e59ea973b48c35ad03e3bc3ffd", "referenceCount": 27, "citationCount": 31, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "Deep Joint Source-Channel Coding for Underwater Image Transmission", "abstract": "Traditional methods for coding underwater acoustic communications are bound to be surpassed by methods optimizing for source-channel coding jointly. However, the complexity of joint-optimization has thwarted successful breakthroughs in this area. We, therefore, present a novel approach, where we model the coding problem as the translation problem of the input sequence to another ‘language’, depending on the estimated channel conditions. We use Long Short-Term Memory (LSTM)-based sequence-to-sequence models to enable this and explain our approach in detail.", "year": 2022, "venue": "Workshop/Conference on Underwater Networks & Systems", "authors": [ "Khizar Anjum", "Z. Qi", "D. Pompili" ], "externalIds": { "DBLP": "conf/wuwnet/AnjumQP22", "DOI": "10.1145/3567600.3568138", "CorpusId": 255226779 }, "url": "https://www.semanticscholar.org/paper/b1a38addbfffbd41340e3f54f1d9df65e29de146", "referenceCount": 31, "citationCount": 9, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep Joint Source-Channel Coding Over the Relay Channel", "abstract": "This paper presents a novel deep joint source-channel coding (DeepJSCC) scheme for image transmission over a half-duplex cooperative relay channel. Specifically, we apply DeepJSCC to two basic modes of cooperative communications, namely amplify-and-forward (AF) and decode-and-forward (DF). In DeepJSCC-AF, the relay simply amplifies and forwards its received signal. In DeepJSCC-DF, on the other hand, the relay first reconstructs the transmitted image and then re-encodes it before forwarding. Considering the excessive computation overhead of DeepJSCC-DF for recovering the image at the relay, we propose an alternative scheme, called DeepJSCC-PF, in which the relay processes and forwards its received signal without necessarily recovering the image. Simulation results show that the proposed DeepJSCC-AF, DF, and PF schemes are superior to the digital baselines with BPG compression with polar codes and provide a graceful performance degradation with deteriorating channel quality. Further investigation shows that the PSNR gain of DeepJSCC-DF/PF over DeepJSCC-AF improves as the channel condition between the source and relay improves. Moreover, the DeepJSCC-PF scheme achieves similar performance to DeepJSCC-DF with lower computational complexity.", "year": 2022, "venue": "2024 IEEE International Conference on Machine Learning for Communication and Networking (ICMLCN)", "authors": [ "Chenghong Bian", "Yulin Shao", "Haotian Wu", "Deniz Gündüz" ], "externalIds": { "ArXiv": "2211.06705", "DOI": "10.1109/ICMLCN59089.2024.10624800", "CorpusId": 253511162 }, "url": "https://www.semanticscholar.org/paper/352659a966f4464f96e671b9bbeec783c6bb66d9", "referenceCount": 18, "citationCount": 17, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "Goal-Oriented Communications for the IoT and Application to Data Compression", "abstract": "Internet of Things (IoT) devices will play an important role in emerging applications, since their sensing, actuation, processing, and wireless communication capabilities stimulate data collection, transmission and decision processes of smart applications. However, new challenges arise from the widespread popularity of IoT devices, including the need for processing more complicated data structures and high dimensional data/signals. The unprecedented volume, heterogeneity, and velocity of IoT data calls for a communication paradigm shift from a search for accuracy or fidelity to semantics extraction and goal accomplishment. In this paper, we provide a partial but insightful overview of recent research efforts in this newly formed area of goal-oriented (GO) and semantic communications, focusing on the problem of GO data compression for IoT applications.", "year": 2022, "venue": "IEEE Internet of Things Magazine", "authors": [ "Chao Zhang", "Hang Zou", "S. Lasaulce", "W. Saad", "M. Kountouris", "M. Bennis" ], "externalIds": { "DBLP": "journals/iotm/ZhangZLSKB22", "ArXiv": "2211.05378", "DOI": "10.1109/IOTM.001.2200177", "CorpusId": 253446882 }, "url": "https://www.semanticscholar.org/paper/4406655a6e4647d8ee3a1aa787932f1b90bbda4a", "referenceCount": 19, "citationCount": 22, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Engineering", "Computer Science" ] }, { "title": "Toward Adaptive Semantic Communications: Efficient Data Transmission via Online Learned Nonlinear Transform Source-Channel Coding", "abstract": "The emerging field semantic communication is driving the research of end-to-end data transmission. By utilizing the powerful representation ability of deep learning models, learned data transmission schemes have exhibited superior performance than the established source and channel coding methods. While, so far, research efforts mainly concentrated on architecture and model improvements toward a static target domain. Despite their successes, such learned models are still suboptimal due to the limitations in model capacity and imperfect optimization and generalization, particularly when the testing data distribution or channel response is different from that adopted for model training, as is likely to be the case in real-world. To tackle this, in this paper, we propose a novel online learned joint source and channel coding approach that leverages the deep learning model’s overfitting property. Specifically, we update the off-the-shelf pre-trained models after deployment in a lightweight online fashion to adapt to the distribution shifts in source data and environment domain. We take the overfitting concept to the extreme, proposing a series of implementation-friendly methods to adapt the codec model or representations to an individual data or channel state instance, which can further lead to substantial gains in terms of the end-to-end rate-distortion performance. Accordingly, the streaming ingredients include both the semantic representations of source data and the online updated decoder model parameters. The system design is formulated as a joint optimization problem whose goal is to minimize the loss function, a tripartite trade-off among the data stream bandwidth cost, model stream bandwidth cost, and end-to-end distortion. The proposed methods enable the communication-efficient adaptation for all parameters in the network without sacrificing decoding speed. Extensive experiments, including user study, on continually changing target source data and wireless channel environments, demonstrate the effectiveness and efficiency of our approach, on which we outperform existing state-of-the-art engineered transmission scheme (VVC combined with 5G LDPC coded transmission).", "year": 2022, "venue": "IEEE Journal on Selected Areas in Communications", "authors": [ "Jincheng Dai", "Sixian Wang", "Ke Yang", "Kailin Tan", "Xiaoqi Qin", "Zhongwei Si", "K. Niu", "Ping Zhang" ], "externalIds": { "DBLP": "journals/jsac/DaiWYTQSNZ23", "ArXiv": "2211.04339", "DOI": "10.1109/JSAC.2023.3288246", "CorpusId": 258060292 }, "url": "https://www.semanticscholar.org/paper/744259871e341a1017e3f02bb28406a8eb70e6c2", "referenceCount": 73, "citationCount": 14, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "Double Polar Codes for Joint Source and Channel Coding", "abstract": "In this paper, we design a joint source and channel coding (JSCC) framework combining the source polar coding and the channel polar coding. The source is first compressed using a polar code (PC), and source check decoding is employed to construct an error set containing the index of all source decoding errors. Then, the proposed JSCC system employs another PC or systematic PC (SPC) to protect the compressed source and the error set against noise, which is called double PC (D-PC) or systematic double PC (SD-PC), respectively. For a D-PC JSCC system, we prove a necessary condition for the optimal mapping between the source PC and the channel PC. On the receiver side, by introducing the joint factor graph representation of the D-PC and SD-PC, we propose two joint source and channel decoders: a joint belief propagation (J-BP) decoder, and a systematic joint belief propagation (SJ-BP) decoder. In addition, a biased extrinsic information transfer (B-EXIT) chart is developed for various decoders as a theoretical performance evaluation tool. Both B-EXIT and simulation results show that the performance of the proposed JSCC scheme has no error floor and outperforms the turbo-like BP decoder.", "year": 2022, "venue": "Electronics", "authors": [ "Yanfei Dong", "K. Niu" ], "externalIds": { "DOI": "10.3390/electronics11213557", "CorpusId": 253287578 }, "url": "https://www.semanticscholar.org/paper/77677941d68265cfc82285aa1388f9a71e70fbc7", "referenceCount": 0, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "Space-Time Design for Deep Joint Source Channel Coding of Images over MIMO Channels", "abstract": "We propose novel deep joint source-channel coding (DeepJSCC) algorithms for wireless image transmission over multi-input multi-output (MIMO) Rayleigh fading channels, when channel state information (CSI) is available only at the receiver. We consider two different schemes; one exploiting the spatial diversity and the other exploiting the spatial multiplexing gain of the MIMO channel, respectively. For the former, we utilize an orthogonal space-time block code (OSTBC) to achieve full diversity and increase the robustness against channel variations. In the latter, we directly map the input to the antennas, where the additional degree of freedom can be used to send more information about the source signal. Simulation results show that the diversity scheme outperforms the multiplexing scheme for lower signal-to-noise ratio (SNR) values and a smaller number of receive antennas at the AP. When the number of transmit antennas is greater than two, however, the full-diversity scheme becomes less beneficial. We also show that both the diversity and multiplexing schemes can achieve comparable performance with the state-of-the-art BPG algorithm delivered at the instantaneous capacity of the MIMO channel, which serves as an upper bound on the performance of separation-based practical systems.", "year": 2022, "venue": "International Workshop on Signal Processing Advances in Wireless Communications", "authors": [ "Chenghong Bian", "Yulin Shao", "Haotian Wu", "Deniz Gündüz" ], "externalIds": { "ArXiv": "2210.16985", "DBLP": "journals/corr/abs-2210-16985", "DOI": "10.1109/SPAWC53906.2023.10304536", "CorpusId": 253237092 }, "url": "https://www.semanticscholar.org/paper/0a770b715d81ecae3f39bbf4f146b59a9454320b", "referenceCount": 29, "citationCount": 14, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "Imitation Learning-Based Implicit Semantic-Aware Communication Networks: Multi-Layer Representation and Collaborative Reasoning", "abstract": "Semantic communication has recently attracted significant interest from both industry and academia due to its potential to transform the existing data-focused communication architecture towards a more generally intelligent and goal-oriented semantic-aware networking system. Despite its promising potential, semantic communications and semantic-aware networking are still in their infancy. Most existing works focus on transporting and delivering the explicit semantic information, e.g., labels or features of objects, that can be directly identified from the source signal. The original definition of semantics as well as recent results in cognitive neuroscience suggest that it is the implicit semantic information, in particular the hidden relations connecting different concepts and feature items that play the fundamental role in recognizing, communicating, and delivering the real semantic meanings of messages. Motivated by this observation, we propose a novel reasoning-based implicit semantic-aware communication network architecture that allows destination users to directly learn a reasoning mechanism that can automatically generate complex implicit semantic information based on a limited clue information sent by the source users. Our proposed architecture can be implemented in a multi-tier cloud/edge computing networks in which multiple tiers of cloud data center (CDC) and edge servers can collaborate and support efficient semantic encoding, decoding, and implicit semantic interpretation for multiple end-users. We introduce a new multi-layer representation of semantic information taking into consideration both the hierarchical structure of implicit semantics as well as the personalized inference preference of individual users. We model the semantic reasoning process as a reinforcement learning process and then propose an imitation-based semantic reasoning mechanism learning (iRML) solution to learning a reasoning policy that imitates the inference behavior of the source user. A federated graph convolutional network (GCN)-based collaborative reasoning solution is proposed to allow multiple edge servers to jointly construct a shared semantic interpretation model based on decentralized semantic message samples. Extensive experiments have been conducted based on real-world datasets to evaluate the performance of our proposed architecture. Numerical results confirm that iRML offers up to 25.8 dB improvement on the semantic symbol error rate, compared to the semantic-irrelevant communication solutions.", "year": 2022, "venue": "IEEE Journal on Selected Areas in Communications", "authors": [ "Yong Xiao", "Zijian Sun", "Guangming Shi", "D. Niyato" ], "externalIds": { "DBLP": "journals/corr/abs-2210-16118", "ArXiv": "2210.16118", "DOI": "10.1109/JSAC.2022.3229419", "CorpusId": 253224305 }, "url": "https://www.semanticscholar.org/paper/231e0876861b74ec842424bb1b8a0b13b2471f3c", "referenceCount": 57, "citationCount": 23, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Deep Joint Source-Channel and Encryption Coding: Secure Semantic Communications", "abstract": "Deep learning driven joint source-channel coding (JSCC) for wireless image or video transmission, also called DeepJSCC, has been a topic of interest recently with very promising results. The idea is to map similar source samples to nearby points in the channel input space such that, despite the noise introduced by the channel, the input can be recovered with minimal distortion. However, the inherent correlation between the source sample and channel input makes DeepJSCC vulnerable to eavesdropping attacks. In this paper, we propose the first DeepJSCC scheme for wireless image transmission that is secure against eavesdroppers, called DeepJSCEC. The proposed solution not only preserves the results demonstrated by DeepJSCC, it also provides security against chosen-plaintext attacks from the eavesdropper, without the need to make assumptions about the eavesdropper's channel condition or its intended use of the intercepted signal.", "year": 2022, "venue": "ICC 2023 - IEEE International Conference on Communications", "authors": [ "Tze-Yang Tung", "Deniz Gündüz" ], "externalIds": { "DBLP": "journals/corr/abs-2208-09245", "ArXiv": "2208.09245", "DOI": "10.1109/ICC45041.2023.10278612", "CorpusId": 251710544 }, "url": "https://www.semanticscholar.org/paper/607bb58864d47976ced35a5c46de4e1bbb6a3b85", "referenceCount": 57, "citationCount": 22, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Beyond Transmitting Bits: Context, Semantics, and Task-Oriented Communications", "abstract": "Communication systems to date primarily aim at reliably communicating bit sequences. Such an approach provides efficient engineering designs that are agnostic to the meanings of the messages or to the goal that the message exchange aims to achieve. Next generation systems, however, can be potentially enriched by folding message semantics and goals of communication into their design. Further, these systems can be made cognizant of the context in which communication exchange takes place, thereby providing avenues for novel design insights. This tutorial summarizes the efforts to date, starting from its early adaptations, semantic-aware and task-oriented communications, covering the foundations, algorithms and potential implementations. The focus is on approaches that utilize information theory to provide the foundations, as well as the significant role of learning in semantics and task-aware communications.", "year": 2022, "venue": "IEEE Journal on Selected Areas in Communications", "authors": [ "Deniz Gündüz", "Zhijin Qin", "Iñaki Estella Aguerri", "Harpreet S. Dhillon", "Zhaohui Yang", "A. Yener", "Kai‐Kit Wong", "C. Chae" ], "externalIds": { "DBLP": "journals/jsac/GunduzQADYYWC23a", "ArXiv": "2207.09353", "DOI": "10.1109/JSAC.2022.3223408", "CorpusId": 250644448 }, "url": "https://www.semanticscholar.org/paper/2dcfa0ab3d301f0358f46c280f0bf8614efdfd2c", "referenceCount": 331, "citationCount": 305, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Neural Distributed Image Compression with Cross-Attention Feature Alignment", "abstract": "We consider the problem of compressing an information source when a correlated one is available as side information only at the decoder side, which is a special case of the distributed source coding problem in information theory. In particular, we consider a pair of stereo images, which have overlapping fields of view, and are captured by a synchronized and calibrated pair of cameras as correlated image sources. In previously proposed methods, the encoder transforms the input image to a latent representation using a deep neural network, and compresses the quantized latent representation losslessly using entropy coding. The decoder decodes the entropy-coded quantized latent representation, and reconstructs the input image using this representation and the available side information. In the proposed method, the decoder employs a cross-attention module to align the feature maps obtained from the received latent representation of the input image and a latent representation of the side information. We argue that aligning the correlated patches in the feature maps allows better utilization of the side information. We empirically demonstrate the competitiveness of the proposed algorithm on KITTI and Cityscape datasets of stereo image pairs. Our experimental results show that the proposed architecture is able to exploit the decoder-only side information in a more efficient manner compared to previous works.", "year": 2022, "venue": "IEEE Workshop/Winter Conference on Applications of Computer Vision", "authors": [ "N. Mital", "Ezgi Özyilkan", "Ali Garjani", "Deniz Gündüz" ], "externalIds": { "DBLP": "conf/wacv/MitalOGG23", "ArXiv": "2207.08489", "DOI": "10.1109/WACV56688.2023.00253", "CorpusId": 250627561 }, "url": "https://www.semanticscholar.org/paper/50c34794f6cc8e85150e9e41fedbb967d2ffbb50", "referenceCount": 41, "citationCount": 18, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Engineering", "Computer Science" ] }, { "title": "AirNet: Neural Network Transmission over the Air", "abstract": "State-of-the-art performance for many emerging edge applications is achieved by deep neural networks (DNNs). Often, the employed DNNs are location- and time-dependent, and the parameters of a specific DNN must be delivered from an edge server to the edge device rapidly and efficiently to carry out time-sensitive inference tasks. This can be considered as a joint source-channel coding (JSCC) problem, in which the goal is not to recover the DNN coefficients with the minimal distortion, but in a manner that provides the highest accuracy in the downstream task. For this purpose we introduce AirNet, a novel training and analog transmission method to deliver DNNs over the air. We first train the DNN with noise injection to counter the wireless channel noise. We also employ pruning to identify the most significant DNN parameters that can be delivered within the available channel bandwidth, knowledge distillation, and nonlinear bandwidth expansion to provide better error protection for the most important network parameters. We show that AirNet achieves significantly higher test accuracy compared to the separation-based alternative, and exhibits graceful degradation with channel quality.", "year": 2022, "venue": "International Symposium on Information Theory", "authors": [ "Mikolaj Jankowski", "Deniz Gündüz", "K. Mikolajczyk" ], "externalIds": { "DBLP": "journals/corr/abs-2105-11166", "DOI": "10.1109/ISIT50566.2022.9834372", "CorpusId": 235166423 }, "url": "https://www.semanticscholar.org/paper/e3dc034a6f30ba17d75cd124307dae2bb9a1625c", "referenceCount": 53, "citationCount": 20, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Robust Deep Learning Enabled Semantic Communication System for Text", "abstract": "With the advent of the 6G era, the concept of semantic communication has attracted increasing attention. Compared with conventional communication systems, semantic communication systems are not only affected by physical noise existing in the wireless communication environment, e.g., additional white Gaussian noise, but also by semantic noise due to the source and the nature of deep learning-based systems. In this paper, we elaborate on the mechanism of semantic noise. In particular, we categorize semantic noise into two categories: literal semantic noise and adversarial semantic noise. The former is caused by written errors or expression ambiguity, while the latter is caused by perturbations or attacks added to the embedding layer via the semantic channel. To prevent semantic noise from influencing semantic communication systems, we present a robust deep learning enabled semantic communication system (R-DeepSC) that leverages a calibrated self-attention mechanism and adversarial training to tackle semantic noise. Compared with baseline models that only consider physical noise for text transmission, the proposed R-DeepSC achieves remarkable performance in dealing with semantic noise under different signal-to-noise ratios.", "year": 2022, "venue": "Global Communications Conference", "authors": [ "Xianglan Peng", "Zhijin Qin", "Danlan Huang", "Xiaoming Tao", "Jianhua Lu", "Guangyi Liu", "Chengkang Pan" ], "externalIds": { "ArXiv": "2206.02596", "DBLP": "conf/globecom/PengQHTLLP22", "DOI": "10.1109/GLOBECOM48099.2022.10000901", "CorpusId": 249395585 }, "url": "https://www.semanticscholar.org/paper/8a91f82335f9d69b5d34f3bc18458c83aa529bf7", "referenceCount": 22, "citationCount": 42, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Wireless Deep Video Semantic Transmission", "abstract": "In this paper, we design a new class of high-efficiency deep joint source-channel coding methods to achieve end-to-end video transmission over wireless channels. The proposed methods exploit nonlinear transform and conditional coding architecture to adaptively extract semantic features across video frames, and transmit semantic feature domain representations over wireless channels via deep joint source-channel coding. Our framework is collected under the name deep video semantic transmission (DVST). In particular, benefiting from the strong temporal prior provided by the feature domain context, the learned nonlinear transform function becomes temporally adaptive, resulting in a richer and more accurate entropy model guiding the transmission of current frame. Accordingly, a novel rate adaptive transmission mechanism is developed to customize deep joint source-channel coding for video sources. It learns to allocate the limited channel bandwidth within and among video frames to maximize the overall transmission performance. The whole DVST design is formulated as an optimization problem whose goal is to minimize the end-to-end transmission rate-distortion performance under perceptual quality metrics or machine vision task performance metrics. Across standard video source test sequences and various communication scenarios, experiments show that our DVST can generally surpass traditional wireless video coded transmission schemes. The proposed DVST framework can well support future semantic communications due to its video content-aware and machine vision task integration abilities.", "year": 2022, "venue": "IEEE Journal on Selected Areas in Communications", "authors": [ "Sixian Wang", "Jincheng Dai", "Zijian Liang", "K. Niu", "Zhongwei Si", "Chao Dong", "Xiaoqi Qin", "Ping Zhang" ], "externalIds": { "DBLP": "journals/jsac/WangDLNSDQZ23", "ArXiv": "2205.13129", "DOI": "10.1109/JSAC.2022.3221977", "CorpusId": 249097416 }, "url": "https://www.semanticscholar.org/paper/1f795b57a911a59a34b68ebcddb29bb8238da717", "referenceCount": 54, "citationCount": 104, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Semantic-Preserved Communication System for Highly Efficient Speech Transmission", "abstract": "Deep learning (DL) based semantic communication methods have been explored for the efficient transmission of images, text, and speech in recent years. In contrast to traditional wireless communication methods that focus on the transmission of abstract symbols, semantic communication approaches attempt to achieve better transmission efficiency by only sending the semantic-related information of the source data. In this paper, we consider semantic-oriented speech transmission which transmits only the semantic-relevant information over the channel for the speech recognition task, and a compact additional set of semantic-irrelevant information for the speech reconstruction task. We propose a novel end-to-end DL-based transceiver which extracts and encodes the semantic information from the input speech spectrums at the transmitter and outputs the corresponding transcriptions from the decoded semantic information at the receiver. In particular, we employ a soft alignment module and a redundancy removal module to extract only the text-related semantic features while dropping semantically redundant content, greatly reducing the amount of semantic redundancy compared to existing methods. We also propose a semantic correction module to further correct the predicted transcription with semantic knowledge by leveraging a pretrained language model. For the speech to speech transmission, we further include a CTC alignment module that extracts a small number of additional semantic-irrelevant but speech-related information, such as duration, pitch, power and speaker identification of the speech for the better reconstruction of the original speech signals at the receiver. We also introduce a two-stage training scheme which speeds up the training of the proposed DL model. The simulation results confirm that our proposed method outperforms current methods in terms of the accuracy of the predicted text for the speech to text transmission and the quality of the recovered speech signals for the speech to speech transmission, and significantly improves transmission efficiency. More specifically, the proposed method only sends 16% of the amount of the transmitted symbols required by the existing methods while achieving about a 10% reduction in WER for the speech to text transmission. For the speech to speech transmission, it results in an even more remarkable improvement in terms of transmission efficiency with only 0.2% of the amount of the transmitted symbols required by the existing method while preserving the comparable quality of the reconstructed speech signals.", "year": 2022, "venue": "IEEE Journal on Selected Areas in Communications", "authors": [ "Tian Han", "Qianqian Yang", "Zhiguo Shi", "Shibo He", "Zhaoyang Zhang" ], "externalIds": { "ArXiv": "2205.12727", "DBLP": "journals/jsac/HanYSHZ23", "DOI": "10.1109/JSAC.2022.3221952", "CorpusId": 249062818 }, "url": "https://www.semanticscholar.org/paper/9fbf0aaadc8d1910fa1884a703328d8e04d7eb08", "referenceCount": 43, "citationCount": 89, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Channel-Adaptive Wireless Image Transmission With OFDM", "abstract": "We present a learning-based channel-adaptive joint source and channel coding (CA-JSCC) scheme for wireless image transmission over multipath fading channels. The proposed method is an end-to-end autoencoder architecture with a dual-attention mechanism employing orthogonal frequency division multiplexing (OFDM) transmission. Unlike the previous works, our approach is adaptive to channel-gain and noise-power variations by exploiting the estimated channel state information (CSI). Specifically, with the proposed dual-attention mechanism, our model can learn to map the features and allocate transmission-power resources judiciously to the available subchannels based on the estimated CSI. Extensive numerical experiments verify that CA-JSCC achieves state-of-the-art performance among existing JSCC schemes. In addition, CA-JSCC is robust to varying channel conditions and can better exploit the limited channel resources by transmitting critical features over better subchannels.", "year": 2022, "venue": "IEEE Wireless Communications Letters", "authors": [ "Haotian Wu", "Yulin Shao", "K. Mikolajczyk", "Deniz Gündüz" ], "externalIds": { "DBLP": "journals/wcl/WuSMG22", "ArXiv": "2205.02417", "DOI": "10.1109/LWC.2022.3204837", "CorpusId": 248524878 }, "url": "https://www.semanticscholar.org/paper/740ac1c73ef51adf1062954c661f06dc5aa745c6", "referenceCount": 23, "citationCount": 35, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "Holographic communication in 5G networks", "abstract": null, "year": 2022, "venue": "Ericsson Technology Review", "authors": [ "Ali El Essaili", "Sara Thorson", "Alvin Jude", "Jörg Christian Ewert", "Natalya Tyudina", "Hector Caltenco", "Lukasz Litwic", "Bo Burman" ], "externalIds": { "DOI": "10.23919/etr.2022.9904689", "CorpusId": 253570545 }, "url": "https://www.semanticscholar.org/paper/6bb862038491f36f0b0e82053d1e09adab929044", "referenceCount": 0, "citationCount": 11, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "Deep Learning-Enabled Semantic Communication Systems With Task-Unaware Transmitter and Dynamic Data", "abstract": "Existing deep learning-enabled semantic communication systems often rely on shared background knowledge between the transmitter and receiver that includes empirical data and their associated semantic information. In practice, the semantic information is defined by the pragmatic task of the receiver and cannot be known to the transmitter. The actual observable data at the transmitter can also have non-identical distribution with the empirical data in the shared background knowledge library. To address these practical issues, this paper proposes a new neural network-based semantic communication system for image transmission, where the task is unaware at the transmitter and the data environment is dynamic. The system consists of two main parts, namely the semantic coding (SC) network and the data adaptation (DA) network. The SC network learns how to extract and transmit the semantic information using a receiver-leading training process. By using the domain adaptation technique from transfer learning, the DA network learns how to convert the data observed into a similar form of the empirical data that the SC network can process without re-training. Numerical experiments show that the proposed method can be adaptive to observable datasets while keeping high performance in terms of both data recovery and task execution.", "year": 2022, "venue": "IEEE Journal on Selected Areas in Communications", "authors": [ "Hongwei Zhang", "Shuo Shao", "M. Tao", "X. Bi", "K. Letaief" ], "externalIds": { "ArXiv": "2205.00271", "DBLP": "journals/jsac/ZhangSTBL23", "DOI": "10.1109/JSAC.2022.3221991", "CorpusId": 248496668 }, "url": "https://www.semanticscholar.org/paper/748f2671da17abb91fefdbd1952ec2605818e6e5", "referenceCount": 67, "citationCount": 88, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Deep Joint Source-Channel Coding for CSI Feedback: An End-to-End Approach", "abstract": "The increased throughput brought by MIMO technology relies on the knowledge of channel state information (CSI) acquired in the base station (BS). To make the CSI feedback overhead affordable for the evolution of MIMO technology (e.g., massive MIMO and ultra-massive MIMO), deep learning (DL) is introduced to deal with the CSI compression task. In traditional communication systems, the compressed CSI bits is treated equally and expected to be transmitted accurately over the noisy channel. While the errors occur due to the limited bandwidth or low signal-to-noise ratios (SNRs), the reconstruction performance of the CSI degrades drastically. As a branch of semantic communications, deep joint source-channel coding (DJSCC) scheme performs better than the separate source-channel coding (SSCC) scheme—the cornerstone of traditional communication systems—in the limited bandwidth and low SNRs. In this paper, we propose a DJSCC based framework for the CSI feedback task. In particular, the proposed method can simultaneously learn from the CSI source and the wireless channel. Instead of truncating CSI via Fourier transform in the delay domain in existing methods, we apply non-linear transform networks to compress the CSI. Furthermore, we adopt an SNR adaption mechanism to deal with wireless channel variations. The extensive experiments demonstrate the validity, adaptability, and generality of the proposed framework.", "year": 2022, "venue": "IEEE Journal on Selected Areas in Communications", "authors": [ "Jia-lin Xu", "Bo Ai", "Ning Wang", "Wei Chen" ], "externalIds": { "DBLP": "journals/jsac/XuAWC23", "ArXiv": "2203.16005", "DOI": "10.1109/JSAC.2022.3221963", "CorpusId": 247794182 }, "url": "https://www.semanticscholar.org/paper/328e5747ecb9ff33a7a085574fbfcf854386f0c5", "referenceCount": 60, "citationCount": 26, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "ChannelGAN: Deep Learning-Based Channel Modeling and Generating", "abstract": "The increasing complexity on channel modeling and the cost on collecting plenty of high-quality wireless channel data have become the main bottlenecks of developing deep learning (DL) based wireless communications. In this letter, a DL-based channel modeling and generating approach namely ChannelGAN is proposed. Specifically, the ChannelGAN is designed on a small set of 3rd generation partnerships project (3GPP) link-level multiple-input multiple-output (MIMO) channel. Moreover, two evaluation mechanisms including i) power comparison from the perspective of delay and antenna domain and ii) cross validation are implemented where the power comparison proves the consistency between the modeled fake channel and real channel, and the cross validation verifies the effectiveness and availability of the generated fake channel for supporting related DL-based channel state information (CSI) feedback.", "year": 2022, "venue": "IEEE Wireless Communications Letters", "authors": [ "Han Xiao", "Wenqiang Tian", "Wendong Liu", "J. Shen" ], "externalIds": { "DBLP": "journals/wcl/XiaoTLS22", "DOI": "10.1109/lwc.2021.3140102", "CorpusId": 245727729 }, "url": "https://www.semanticscholar.org/paper/3372d86f3efa259bed9267a61a81ad84eebc5556", "referenceCount": 0, "citationCount": 35, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Rate-Distortion Theory for Strategic Semantic Communication", "abstract": "This paper analyzes the fundamental limit of the strategic semantic communication problem in which a transmitter obtains a limited number of indirect observations of an intrinsic semantic information source and can then influence the receiver’s decoding by sending a limited number of messages over an imperfect channel. The transmitter and the receiver can have different distortion measures and can make rational decisions about their encoding and decoding strategies, respectively. The decoder can also have some side information (e.g., background knowledge and/or information obtained from previous communications) about the semantic source to assist its interpretation of the semantic information. We focus particularly on the case that the transmitter can commit to an encoding strategy and study the impact of the strategic decision making on the rate distortion of semantic communication. Three equilibrium solution concepts including the optimal Stackelberg equilibrium, robust Stackelberg equilibrium, as well as Nash equilibrium are studied and compared. The optimal encoding and decoding strategy profiles under various equilibrium solutions are derived. We prove that committing to an encoding strategy cannot always bring benefit to the encoder. We provide a feasible condition under which committing to an encoding strategy can always reduce the distortion of semantic communication. We consider an example with a dictionary-based semantic information source to verify our observation.", "year": 2022, "venue": "Information Theory Workshop", "authors": [ "Yong Xiao", "Xu Zhang", "Yingyu Li", "Guangming Shi" ], "externalIds": { "ArXiv": "2202.03711", "DBLP": "journals/corr/abs-2202-03711", "DOI": "10.1109/ITW54588.2022.9965825", "CorpusId": 246652290 }, "url": "https://www.semanticscholar.org/paper/28b24265c19d4ddcffc3432b51afc0f47d51f4b5", "referenceCount": 26, "citationCount": 16, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Concatenated Spatially Coupled LDPC Codes With Sliding Window Decoding for Joint Source-Channel Coding", "abstract": "In this paper, a method for joint source-channel coding (JSCC) based on concatenated spatially coupled low-density parity-check (SC-LDPC) codes is investigated. A construction consisting of two SC-LDPC codes is proposed: one for source coding and the other for channel coding, with a joint belief propagation-based decoder. Also, a novel windowed decoding (WD) scheme is presented with significantly reduced latency and complexity requirements. The asymptotic behavior for various graph node degrees is analyzed using a protograph-based Extrinsic Information Transfer (EXIT) chart analysis for both LDPC block codes with block decoding and for SC-LDPC codes with the WD scheme, showing robust performance for concatenated SC-LDPC codes. Simulation results show a notable performance improvement compared to existing state-of-the-art JSCC schemes based on LDPC codes with comparable latency and complexity constraints.", "year": 2022, "venue": "IEEE Transactions on Communications", "authors": [ "A. Golmohammadi", "David G. M. Mitchell" ], "externalIds": { "DBLP": "journals/tcom/GolmohammadiM22", "DOI": "10.1109/tcomm.2021.3126750", "CorpusId": 243931212 }, "url": "https://www.semanticscholar.org/paper/cd4cb4595bbca0bcb0d271a31333f6b26f1c5882", "referenceCount": 35, "citationCount": 16, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "On Sub-optimality of Random Binning for Distributed Hypothesis Testing", "abstract": "We investigate the quantize and binning scheme, known as the Shimokawa-Han-Amari (SHA) scheme, for the distributed hypothesis testing. We develop tools to evaluate the critical rate attainable by the SHA scheme. For a product of binary symmetric double sources, we present a sequential scheme that improves upon the SHA scheme.", "year": 2022, "venue": "International Symposium on Information Theory", "authors": [ "Shun Watanabe" ], "externalIds": { "ArXiv": "2201.13005", "DBLP": "conf/isit/Watanabe22", "DOI": "10.1109/ISIT50566.2022.9834275", "CorpusId": 246431102 }, "url": "https://www.semanticscholar.org/paper/b0204fd3ce103f877321d4efee41393a51f95504", "referenceCount": 31, "citationCount": 7, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "An Indirect Rate-Distortion Characterization for Semantic Sources: General Model and the Case of Gaussian Observation", "abstract": "A new source model, which consists of an intrinsic state part and an extrinsic observation part, is proposed and its information-theoretic characterization, namely its rate-distortion function, is defined and analyzed. Such a source model is motivated by the recent surge of interest in the semantic aspect of information: the intrinsic state corresponds to the semantic feature of the source, which in general is not observable but can only be inferred from the extrinsic observation. There are two distortion measures, one between the intrinsic state and its reproduction, and the other between the extrinsic observation and its reproduction. Under a given code rate, the tradeoff between these two distortion measures is characterized by the rate-distortion function, which is solved via the indirect rate-distortion theory and is termed the semantic rate-distortion function of the source. As an application of the general model and its analysis, the case of Gaussian extrinsic observation is studied, assuming a linear relationship between the intrinsic state and the extrinsic observation, under a quadratic distortion structure. The semantic rate-distortion function is shown to be the solution of a convex programming problem with respect to an error covariance matrix, and a reverse water-filling type of solution is provided when the model further satisfies a diagonalizability condition.", "year": 2022, "venue": "IEEE Transactions on Communications", "authors": [ "Jiakun Liu", "Shuo Shao", "Wenyi Zhang", "H. Poor" ], "externalIds": { "DBLP": "journals/tcom/LiuSZP22", "ArXiv": "2201.12477", "DOI": "10.1109/TCOMM.2022.3194978", "CorpusId": 246431034 }, "url": "https://www.semanticscholar.org/paper/adecfd81cf7b159804bf9d466cd0508ae90bf9d7", "referenceCount": 39, "citationCount": 28, "influentialCitationCount": 10, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Resource Allocation for Text Semantic Communications", "abstract": "Semantic communications have shown its great potential to improve the transmission reliability, especially in the low signal-to-noise regime. However, resource allocation for semantic communications still remains unexplored, which is a critical issue in guaranteeing the semantic transmission reliability and the communication efficiency. To fill this gap, we investigate the spectral efficiency in the semantic domain and rethink the semantic-aware resource allocation issue. Specifically, taking text semantic communication as an example, the semantic spectral efficiency (S-SE) is defined for the first time, and is used to optimize resource allocation in terms of channel assignment and the number of transmitted semantic symbols. Additionally, for fair comparison of semantic and conventional communication systems, a transform method is developed to convert the conventional bit-based spectral efficiency to the S-SE. Simulation results demonstrate the validity and feasibility of the proposed resource allocation method, as well as the superiority of semantic communications in terms of the S-SE.", "year": 2022, "venue": "IEEE Wireless Communications Letters", "authors": [ "Leibing Yan", "Zhijin Qin", "Rui Zhang", "Yongzhao Li", "Geoffrey Y. Li" ], "externalIds": { "DBLP": "journals/wcl/YanQZLL22", "ArXiv": "2201.06023", "DOI": "10.1109/LWC.2022.3170849", "CorpusId": 248392419 }, "url": "https://www.semanticscholar.org/paper/0c4ecac6f1f736a39b368c319e9c8ffe6b7dfbf6", "referenceCount": 19, "citationCount": 113, "influentialCitationCount": 24, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Nonlinear Transform Source-Channel Coding for Semantic Communications", "abstract": "In this paper, we propose a class of high-efficiency deep joint source-channel coding methods that can closely adapt to the source distribution under the nonlinear transform, it can be collected under the name nonlinear transform source-channel coding (NTSCC). In the considered model, the transmitter first learns a nonlinear analysis transform to map the source data into latent space, then transmits the latent representation to the receiver via deep joint source-channel coding. Our model incorporates the nonlinear transform as a strong prior to effectively extract the source semantic features and provide side information for source-channel coding. Unlike existing conventional deep joint source-channel coding methods, the proposed NTSCC essentially learns both the source latent representation and an entropy model as the prior on the latent representation. Accordingly, novel adaptive rate transmission and hyperprior-aided codec refinement mechanisms are developed to upgrade deep joint source-channel coding. The whole system design is formulated as an optimization problem whose goal is to minimize the end-to-end transmission rate-distortion performance under established perceptual quality metrics. Across test image sources with various resolutions, we find that the proposed NTSCC transmission method generally outperforms both the analog transmission using the standard deep joint source-channel coding and the classical separation-based digital transmission. Notably, the proposed NTSCC method can potentially support future semantic communications due to its content-aware ability and perceptual optimization goal.", "year": 2021, "venue": "IEEE Journal on Selected Areas in Communications", "authors": [ "Jincheng Dai", "Sixian Wang", "Kailin Tan", "Zhongwei Si", "Xiaoqi Qin", "K. Niu", "Ping Zhang" ], "externalIds": { "ArXiv": "2112.10961", "DBLP": "journals/jsac/DaiWTSQ0022", "DOI": "10.1109/JSAC.2022.3180802", "CorpusId": 245353647 }, "url": "https://www.semanticscholar.org/paper/decd3df462fae688b5b56cdf392f0460ad8b460b", "referenceCount": 47, "citationCount": 130, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Design of Code Pair for Protograph-LDPC Codes-Based JSCC System With Joint Shuffled Scheduling Decoding Algorithm", "abstract": "Although there are many studies on code optimization of the joint source-channel coding (JSCC) system based on double protograph low-density parity-check codes with the joint belief propagation (JBP) algorithm, but it is still unknown whether the source code and channel code (as a code pair) can perform well when the joint shuffled scheduling decoding (JSSD) algorithm is adopted. In this letter, two decoding threshold analysis algorithms, including joint shuffled protograph extrinsic information transfer (PEXIT) and source shuffled PEXIT algorithm, are proposed to calculate joint/source decoding thresholds for this system with the JSSD algorithm. With the proposed algorithms, it is found that the optimized code pairs for this system with the JBP algorithm may not perform well with the JSSD algorithm, implying that code pair with the JSSD algorithm needs to be redesigned. Then, a two-stage optimized framework is proposed to design the code pair for this system with the JSSD algorithm. Simulations and decoding threshold analysis both show that the proposed code pair for this system with the JSSD algorithm can obtain lower error floor and better waterfall performance than the existing code pairs.", "year": 2021, "venue": "IEEE Communications Letters", "authors": [ "Zhiping Xu", "Lin Wang", "Shaohua Hong", "Guanrong Chen" ], "externalIds": { "DBLP": "journals/icl/XuWHC21", "DOI": "10.1109/lcomm.2021.3112717", "CorpusId": 239097154 }, "url": "https://www.semanticscholar.org/paper/fe6369a6a63e235d6cd3e87495e2b930014aaa25", "referenceCount": 0, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DeepWiVe: Deep-Learning-Aided Wireless Video Transmission", "abstract": "We present DeepWiVe, the first-ever end-to-end joint source-channel coding (JSCC) video transmission scheme that leverages the power of deep neural networks (DNNs) to directly map video signals to channel symbols, combining video compression, channel coding, and modulation steps into a single neural transform. Our DNN decoder predicts residuals without distortion feedback, which improves the video quality by accounting for occlusion/disocclusion and camera movements. We simultaneously train different bandwidth allocation networks for the frames to allow variable bandwidth transmission. Then, we train a bandwidth allocation network using reinforcement learning (RL) that optimizes the allocation of limited available channel bandwidth among video frames to maximize the overall visual quality. Our results show that DeepWiVe can overcome the cliff-effect, which is prevalent in conventional separation-based digital communication schemes, and achieve graceful degradation with the mismatch between the estimated and actual channel qualities. DeepWiVe outperforms H.264 video compression followed by low-density parity check (LDPC) codes in all channel conditions by up to 0.0485 in terms of the multi-scale structural similarity index measure (MS-SSIM), and H.265+ LDPC by up to 0.0069 on average. We also illustrate the importance of optimizing bandwidth allocation in JSCC video transmission by showing that our optimal bandwidth allocation policy is superior to uniform allocation as well as a heuristic policy benchmark.", "year": 2021, "venue": "IEEE Journal on Selected Areas in Communications", "authors": [ "Tze-Yang Tung", "Deniz Gündüz" ], "externalIds": { "DBLP": "journals/jsac/TungG22", "ArXiv": "2111.13034", "DOI": "10.1109/JSAC.2022.3191354", "CorpusId": 244709579 }, "url": "https://www.semanticscholar.org/paper/f9b54803e40a2722ab7a9e2c231870d7b37d08a4", "referenceCount": 66, "citationCount": 86, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Engineering", "Computer Science" ] }, { "title": "Deep Joint Source-Channel Coding for Wireless Image Transmission with Adaptive Rate Control", "abstract": "We present a novel adaptive deep joint source-channel coding (JSCC) scheme for wireless image transmission. The proposed scheme supports multiple rates using a single deep neural network (DNN) model and learns to dynamically control the rate based on the channel condition and image contents. Specifically, a policy network is introduced to exploit the tradeoff space between the rate and signal quality. To train the policy network, the Gumbel-Softmax trick is adopted to make the policy network differentiable and hence the whole JSCC scheme can be trained end-to-end. To the best of our knowledge, this is the first deep JSCC scheme that can automatically adjust its rate using a single network model. Experiments show that our scheme successfully learns a reasonable policy that decreases channel bandwidth utilization for high SNR scenarios or simple image contents. For an arbitrary target rate, our rate-adaptive scheme using a single model achieves similar performance compared to an optimized model specifically trained for that fixed target rate. To reproduce our results, we make the source code publicly available at https://github.com/mingyuyng/Dynamic_JSCC.", "year": 2021, "venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", "authors": [ "Mingyu Yang", "Hun-Seok Kim" ], "externalIds": { "ArXiv": "2110.04456", "DBLP": "conf/icassp/YangK22", "DOI": "10.1109/icassp43922.2022.9746335", "CorpusId": 238583132 }, "url": "https://www.semanticscholar.org/paper/fdb39ff3c97221371ff1b6e260a5ea9ca56bd112", "referenceCount": 23, "citationCount": 73, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Joint Source and Channel Coding Using Double Polar Codes", "abstract": "In this letter, a double polar code (D-Polar) for joint source and channel coding (JSCC) is proposed, in which the source compress is implemented by a polar code, the channel error correction is performed by a systematic polar code. Furthermore, a turbo-like belief propagation (TL-BP) decoder consisted of a channel BP decoder and a source BP decoder is proposed for joint source and channel decoding in the receiver. In this TL-BP decoder, the soft information is exchanged between the channel BP decoder and the source BP decoder so as to improve the efficiency of channel decoding in utilizing the source information residual in the compressed bits. Simulation results show that the performance of the proposed D-Polar JSCC scheme with TL-BP decoder is significantly improved compared with the existing source-channel joint polarization scheme.", "year": 2021, "venue": "IEEE Communications Letters", "authors": [ "Yanfei Dong", "K. Niu", "Jincheng Dai", "Sen Wang", "Yifei Yuan" ], "externalIds": { "DBLP": "journals/icl/DongNDWY21", "MAG": "3170982283", "DOI": "10.1109/LCOMM.2021.3088941", "CorpusId": 236709501 }, "url": "https://www.semanticscholar.org/paper/45f8b04d1ba35555cd32f88f72558790ecd948b9", "referenceCount": 0, "citationCount": 17, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Physics", "Computer Science" ] }, { "title": "Joint Source-Channel Polar Coding for Biased Bernoulli Sources at Short Blocklengths", "abstract": "We study the design of joint source-channel polar coding for biased Bernoulli sources in this paper, focusing on short blocklengths. We first apply the CRC-aided successive cancellation decoding scheme to source polar coding and demonstrate its superior performance at short blocklengths. Then we design a joint decoding scheme for concatenated source-channel polar codes, using soft information from the source decoder and source CRC bits to aid channel decoding. Simulations show that the proposed scheme outperforms separate source-channel polar coding and can even break through the finite blocklength bound of separate source-channel coding in some cases.", "year": 2021, "venue": "International Symposium on Turbo Codes and Iterative Information Processing", "authors": [ "Mengfan Zheng", "Jiaqi Gu", "Mengyao Ma", "Cong Ling" ], "externalIds": { "DBLP": "conf/istc/ZhengGM021", "DOI": "10.1109/ISTC49272.2021.9594160", "CorpusId": 244043624 }, "url": "https://www.semanticscholar.org/paper/e7ef7e717283d1b3b231d3ad36ea57f86bbdc812", "referenceCount": 0, "citationCount": 7, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Effective Communications: A Joint Learning and Communication Framework for Multi-Agent Reinforcement Learning Over Noisy Channels", "abstract": "We propose a novel formulation of the “effectiveness problem” in communications, put forth by Shannon and Weaver in their seminal work “The Mathematical Theory of Communication”, by considering multiple agents communicating over a noisy channel in order to achieve better coordination and cooperation in a multi-agent reinforcement learning (MARL) framework. Specifically, we consider a multi-agent partially observable Markov decision process (MA-POMDP), in which the agents, in addition to interacting with the environment, can also communicate with each other over a noisy communication channel. The noisy communication channel is considered explicitly as part of the dynamics of the environment, and the message each agent sends is part of the action that the agent can take. As a result, the agents learn not only to collaborate with each other but also to communicate “effectively” over a noisy channel. This framework generalizes both the traditional communication problem, where the main goal is to convey a message reliably over a noisy channel, and the “learning to communicate” framework that has received recent attention in the MARL literature, where the underlying communication channels are assumed to be error-free. We show via examples that the joint policy learned using the proposed framework is superior to that where the communication is considered separately from the underlying MA-POMDP. This is a very powerful framework, which has many real world applications, from autonomous vehicle planning to drone swarm control, and opens up the rich toolbox of deep reinforcement learning for the design of multi-user communication systems.", "year": 2021, "venue": "IEEE Journal on Selected Areas in Communications", "authors": [ "Tze-Yang Tung", "S. Kobus", "Joan S. Pujol Roig", "Deniz Gündüz" ], "externalIds": { "DBLP": "journals/jsac/TungKRG21", "DOI": "10.1109/JSAC.2021.3087248", "CorpusId": 232607226 }, "url": "https://www.semanticscholar.org/paper/29a8ab93b8388faa9213eb3f1b6ff2df57008970", "referenceCount": 50, "citationCount": 66, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "6G: The Personal Tactile Internet—And Open Questions for Information Theory", "abstract": "The initial vision of cellular communications was to deliver ubiquitous voice communications to anyone anywhere. In a simplified view, 1G delivered voice services for business customers, and only 2G for consumers. Next, this also initiated the appetite for cellular data, for which 3G was designed. However, Blackberry delivered business smartphones, and 4G made smartphones a consumer device. The promise of 5G is to start the Tactile Internet, to control real and virtual objects in real-time via cellular. However, the hype around 5G is, again, focusing on business customers, in particular in the context of campus networks. Consequently, 6G must provide an infrastructure to enable remote-controlled mobile robotic solutions for everyone—the Personal Tactile Internet. Which role can information and communication theory play in this context, and what are the big challenges ahead?", "year": 2021, "venue": "IEEE BITS the Information Theory Magazine", "authors": [ "G. Fettweis", "H. Boche" ], "externalIds": { "DOI": "10.1109/mbits.2021.3118662", "CorpusId": 242754604 }, "url": "https://www.semanticscholar.org/paper/6c080a309fc902f8cdf8ba00cdf2e56811aa9c56", "referenceCount": 62, "citationCount": 90, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "A Survey on Federated Learning for Resource-Constrained IoT Devices", "abstract": "Federated learning (FL) is a distributed machine learning strategy that generates a global model by learning from multiple decentralized edge clients. FL enables on-device training, keeping the client’s local data private, and further, updating the global model based on the local model updates. While FL methods offer several advantages, including scalability and data privacy, they assume there are available computational resources at each edge-device/client. However, the Internet-of-Things (IoT)-enabled devices, e.g., robots, drone swarms, and low-cost computing devices (e.g., Raspberry Pi), may have limited processing ability, low bandwidth and power, or limited storage capacity. In this survey article, we propose to answer this question: how to train distributed machine learning models for resource-constrained IoT devices? To this end, we first explore the existing studies on FL, relative assumptions for distributed implementation using IoT devices, and explore their drawbacks. We then discuss the implementation challenges and issues when applying FL to an IoT environment. We highlight an overview of FL and provide a comprehensive survey of the problem statements and emerging challenges, particularly during applying FL within heterogeneous IoT environments. Finally, we point out the future research directions for scientists and researchers who are interested in working at the intersection of FL and resource-constrained IoT environments.", "year": 2021, "venue": "IEEE Internet of Things Journal", "authors": [ "Ahmed Imteaj", "Urmish Thakker", "Shiqiang Wang", "Jian Li", "M. Amini" ], "externalIds": { "MAG": "3182125009", "DBLP": "journals/iotj/ImteajTWLA22", "DOI": "10.1109/jiot.2021.3095077", "CorpusId": 237755045 }, "url": "https://www.semanticscholar.org/paper/195b88ba2ab7bc56ed24ea5b5d3f28622d2c6166", "referenceCount": 0, "citationCount": 333, "influentialCitationCount": 11, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Txt2Vid: Ultra-Low Bitrate Compression of Talking-Head Videos via Text", "abstract": "Video represents the majority of internet traffic today, driving a continual race between the generation of higher quality content, transmission of larger file sizes, and the development of network infrastructure. In addition, the recent COVID-19 pandemic fueled a surge in the use of video conferencing tools. Since videos take up considerable bandwidth ( $\\sim 100$ Kbps to a few Mbps), improved video compression can have a substantial impact on network performance for live and pre-recorded content, providing broader access to multimedia content worldwide. We present a novel video compression pipeline, called Txt2Vid, which dramatically reduces data transmission rates by compressing webcam videos (“talking-head videos”) to a text transcript. The text is transmitted and decoded into a realistic reconstruction of the original video using recent advances in deep learning based voice cloning and lip syncing models. Our generative pipeline achieves two to three orders of magnitude reduction in the bitrate as compared to the standard audio-video codecs (encoders-decoders), while maintaining equivalent Quality-of-Experience based on a subjective evaluation by users ( $n=242$ ) in an online study. The Txt2Vid framework opens up the potential for creating novel applications such as enabling audio-video communication during poor internet connectivity, or in remote terrains with limited bandwidth. The code for this work is available at https://github.com/tpulkit/txt2vid.git.", "year": 2021, "venue": "IEEE Journal on Selected Areas in Communications", "authors": [ "Pulkit Tandon", "Shubham Chandak", "Pat Pataranutaporn", "Yimeng Liu", "Anesu M. Mapuranga", "P. Maes", "T. Weissman", "Misha Sra" ], "externalIds": { "ArXiv": "2106.14014", "DBLP": "journals/jsac/TandonCPLMMWS23", "DOI": "10.1109/JSAC.2022.3221953", "CorpusId": 235658854 }, "url": "https://www.semanticscholar.org/paper/559f884a9f15e70d0d4bedc2abdc08eade6c4a87", "referenceCount": 75, "citationCount": 17, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Towards goal-oriented semantic signal processing: Applications and future challenges", "abstract": null, "year": 2021, "venue": "Digit. Signal Process.", "authors": [ "M. Kalfa", "Mehmetcan Gok", "Arda Atalik", "Busra Tegin", "T. Duman", "Orhan Arikan" ], "externalIds": { "MAG": "3167080497", "DBLP": "journals/dsp/KalfaGATDA21", "ArXiv": "2109.11885", "DOI": "10.1016/j.dsp.2021.103134", "CorpusId": 236228752 }, "url": "https://www.semanticscholar.org/paper/e2f1ee87615553cc50355927292e2acd370165bd", "referenceCount": 189, "citationCount": 46, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Engineering", "Computer Science" ] }, { "title": "Communicating Correlated Sources Over MAC and Interference Channels II: Joint Source-Channel Coding", "abstract": "We present the second part of our work on communicating correlated sources over multiple access (MAC) and interference channels (IC). Specifically, we undertake a Shannon-theoretic study of the above scenarios and focus on characterizing sufficient conditions for lossless recoverability of the sources at the decoder(s). We enhance the fixed block-length (B-L) coding technique by incorporating the technique of inducing source correlation onto channel inputs, originally discovered by Cover, El Gamal and Salehi. In contrast to the first part, performance analysis of a joint source-channel decoder poses new challenges. We enhance the earlier developed suite of coding and analytical tools to overcome these challenges and derive (simplified) single-letter characterizations for a new set of sufficient conditions for both scenarios. For both the MAC and IC problems, the derived sufficient conditions are (i) subsumed in the current known tightest, and (ii) strictly weaker for identified examples. Lastly, we propose simple ‘plug-in’ approaches that can further weaken the derived sufficient conditions. Our findings enable us to subsume Dueck’s findings (1981) and go even further for the example considered therein.", "year": 2021, "venue": "IEEE Transactions on Information Theory", "authors": [ "Arun Padakandla" ], "externalIds": { "DBLP": "journals/tit/Padakandla21", "MAG": "3144240116", "DOI": "10.1109/TIT.2021.3069756", "CorpusId": 204874334 }, "url": "https://www.semanticscholar.org/paper/6877d44dca028c599f2db2657965a8ea693b3547", "referenceCount": 20, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "HoloCast+: Hybrid Digital-Analog Transmission for Graceful Point Cloud Delivery With Graph Fourier Transform", "abstract": "Point cloud is an emerging data format useful for various applications such has holographic display, autonomous vehicle, and augmented reality. Conventionally, communications of point cloud data have relied on digital compression and digital modulation for three-dimensional (3D) data streaming. However, such digital-based delivery schemes have fundamental issues called cliff and leveling effects, where the 3D reconstruction quality is a step function in terms of wireless channel quality. We propose a novel scheme of point cloud delivery, called HoloCast$+$, to overcome cliff and leveling effects. Specifically, our method utilizes hybrid digital-analog coding, integrating digital compression and analog coding based on graph Fourier transform (GFT), to gracefully improve 3D reconstruction quality with the improvement of channel quality. We demonstrate that HoloCast$+$ offers better 3D reconstruction quality in terms of the symmetric mean square error (sMSE) by up to 18.3 dB and 10.5 dB, respectively, compared to conventional digital-based and analog-based delivery methods in wireless fading environments.", "year": 2021, "venue": "IEEE transactions on multimedia", "authors": [ "T. Fujihashi", "T. Koike-Akino", "Takashi Watanabe", "P. Orlik" ], "externalIds": { "MAG": "3160358343", "DBLP": "journals/tmm/FujihashiKWO22", "DOI": "10.1109/TMM.2021.3077772", "CorpusId": 234797881 }, "url": "https://www.semanticscholar.org/paper/44b0e56d90625b2099678ac649530932bd3aa447", "referenceCount": 53, "citationCount": 21, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Distributed Learning in Wireless Networks: Recent Progress and Future Challenges", "abstract": "The next-generation of wireless networks will enable many machine learning (ML) tools and applications to efficiently analyze various types of data collected by edge devices for inference, autonomy, and decision making purposes. However, due to resource constraints, delay limitations, and privacy challenges, edge devices cannot offload their entire collected datasets to a cloud server for centrally training their ML models or inference purposes. To overcome these challenges, distributed learning and inference techniques have been proposed as a means to enable edge devices to collaboratively train ML models without raw data exchanges, thus reducing the communication overhead and latency as well as improving data privacy. However, deploying distributed learning over wireless networks faces several challenges including the uncertain wireless environment (e.g., dynamic channel and interference), limited wireless resources (e.g., transmit power and radio spectrum), and hardware resources (e.g., computational power). This paper provides a comprehensive study of how distributed learning can be efficiently and effectively deployed over wireless edge networks. We present a detailed overview of several emerging distributed learning paradigms, including federated learning, federated distillation, distributed inference, and multi-agent reinforcement learning. For each learning framework, we first introduce the motivation for deploying it over wireless networks. Then, we present a detailed literature review on the use of communication techniques for its efficient deployment. We then introduce an illustrative example to show how to optimize wireless networks to improve its performance. Finally, we introduce future research opportunities. In a nutshell, this paper provides a holistic set of guidelines on how to deploy a broad range of distributed learning frameworks over real-world wireless communication networks.", "year": 2021, "venue": "IEEE Journal on Selected Areas in Communications", "authors": [ "Mingzhe Chen", "Deniz Gündüz", "Kaibin Huang", "W. Saad", "M. Bennis", "Aneta Vulgarakis Feljan", "H. Poor" ], "externalIds": { "ArXiv": "2104.02151", "DBLP": "journals/jsac/ChenGHSBFP21a", "DOI": "10.1109/jsac.2021.3118346", "CorpusId": 233033386 }, "url": "https://www.semanticscholar.org/paper/654eaff6b4d8890b7b54dabcfa63822c90da42d5", "referenceCount": 212, "citationCount": 326, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "A Survey of Quantization Methods for Efficient Neural Network Inference", "abstract": "As soon as abstract mathematical computations were adapted to computation on digital computers, the problem of efficient representation, manipulation, and communication of the numerical values in those computations arose. Strongly related to the problem of numerical representation is the problem of quantization: in what manner should a set of continuous real-valued numbers be distributed over a fixed discrete set of numbers to minimize the number of bits required and also to maximize the accuracy of the attendant computations? This perennial problem of quantization is particularly relevant whenever memory and/or computational resources are severely restricted, and it has come to the forefront in recent years due to the remarkable performance of Neural Network models in computer vision, natural language processing, and related areas. Moving from floating-point representations to low-precision fixed integer values represented in four bits or less holds the potential to reduce the memory footprint and latency by a factor of 16x; and, in fact, reductions of 4x to 8x are often realized in practice in these applications. Thus, it is not surprising that quantization has emerged recently as an important and very active sub-area of research in the efficient implementation of computations associated with Neural Networks. In this article, we survey approaches to the problem of quantizing the numerical values in deep Neural Network computations, covering the advantages/disadvantages of current methods. With this survey and its organization, we hope to have presented a useful snapshot of the current research in quantization for Neural Networks and to have given an intelligent organization to ease the evaluation of future research in this area.", "year": 2021, "venue": "Low-Power Computer Vision", "authors": [ "A. Gholami", "Sehoon Kim", "Zhen Dong", "Z. Yao", "Michael W. Mahoney", "K. Keutzer" ], "externalIds": { "DBLP": "journals/corr/abs-2103-13630", "ArXiv": "2103.13630", "DOI": "10.1201/9781003162810-13", "CorpusId": 232352683 }, "url": "https://www.semanticscholar.org/paper/04e283adccf66742130bde4a4dedcda8f549dd7e", "referenceCount": 296, "citationCount": 811, "influentialCitationCount": 54, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Semantic Communications in Networked Systems: A Data Significance Perspective", "abstract": "We present our vision for a departure from the established way of architecting and assessing communication networks, by incorporating the semantics of information, defined not necessarily as the meaning of the messages, but as their significance, possibly within a real-time constraint, relative to the purpose of the data exchange. We argue that research efforts must focus on laying the theoretical foundations of a redesign of the entire process of information generation, transmission, and usage for networked systems in unison by developing advanced semantic metrics for communications and control systems; an optimal sampling theory combining signal sparsity and timeliness, for real-time prediction/reconstruction/control under communication constraints and delays; temporally effective compressed sensing techniques for decision making and inference directly in the compressed domain; and semantic-aware data generation, channel coding, packetization, feedback, and multiple and random access schemes that reduce the volume of data and the energy consumption, increasing the number of supportable devices. This paradigm shift targets jointly optimal information gathering, information dissemination, and decision making policies in networked systems.", "year": 2021, "venue": "IEEE Network", "authors": [ "E. Uysal", "Onur Kaya", "A. Ephremides", "J. Gross", "M. Codreanu", "P. Popovski", "M. Assaad", "G. Liva", "A. Munari", "T. Soleymani", "B. Soret", "K. Johansson" ], "externalIds": { "DBLP": "journals/network/UysalKEGCPALMSS22", "ArXiv": "2103.05391", "DOI": "10.1109/MNET.106.2100636", "CorpusId": 247447260 }, "url": "https://www.semanticscholar.org/paper/7d1a9c693af4f099c3c7b8e2a8c92c5525105dd2", "referenceCount": 28, "citationCount": 112, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "Semantic Communication Systems for Speech Transmission", "abstract": "Semantic communications could improve the transmission efficiency significantly by exploring the semantic information. In this paper, we make an effort to recover the transmitted speech signals in the semantic communication systems, which minimizes the error at the semantic level rather than the bit or symbol level. Particularly, we design a deep learning (DL)-enabled semantic communication system for speech signals, named DeepSC-S. In order to improve the recovery accuracy of speech signals, especially for the essential information, DeepSC-S is developed based on an attention mechanism by utilizing a squeeze-and-excitation (SE) network. The motivation behind the attention mechanism is to identify the essential speech information by providing higher weights to them when training the neural network. Moreover, in order to facilitate the proposed DeepSC-S for dynamic channel environments, we find a general model to cope with various channel conditions without retraining. Furthermore, we investigate DeepSC-S in telephone systems as well as multimedia transmission systems to verify the model adaptation in practice. The simulation results demonstrate that our proposed DeepSC-S outperforms the traditional communications in both cases in terms of the speech signals metrics, such as signal-to-distortion ration and perceptual evaluation of speech distortion. Besides, DeepSC-S is more robust to channel variations, especially in the low signal-to-noise (SNR) regime.", "year": 2021, "venue": "IEEE Journal on Selected Areas in Communications", "authors": [ "Zhenzi Weng", "Zhijin Qin" ], "externalIds": { "DBLP": "journals/jsac/WengQ21", "ArXiv": "2102.12605", "MAG": "3166791908", "DOI": "10.1109/JSAC.2021.3087240", "CorpusId": 232046408 }, "url": "https://www.semanticscholar.org/paper/108d62c4d1f67304cd7b25412967dd0e5b724245", "referenceCount": 42, "citationCount": 328, "influentialCitationCount": 17, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "From Semantic Communication to Semantic-Aware Networking: Model, Architecture, and Open Problems", "abstract": "Existing communication systems are mainly built based on Shannon's information theory, which deliberately ignores the semantic aspects of communication. The recent iteration of wireless technology, 5G and beyond, promises to support a plethora of services enabled by carefully tailored network capabilities based on contents, requirements, as well as semantics. This has sparked significant interest in semantic communication, a novel paradigm that involves the meaning of messages in communication. In this article, we first review classic semantic communication frameworks and then summarize key challenges that hinder its popularity. We observe that some semantic communication processes such as semantic detection, knowledge modeling, and coordination can be resource-consuming and inefficient, especially for communication between a single source and a destination. We therefore propose a novel architecture based on federated edge intelligence for supporting resource-efficient semantic-aware networking. Our architecture allows each user to offload computationally intensive semantic encoding and decoding tasks to edge servers and protect its proprietary model-re-lated information by coordinating via intermediate results. Our simulation result shows that the proposed architecture can reduce resource consumption and significantly improve communication efficiency.", "year": 2020, "venue": "IEEE Communications Magazine", "authors": [ "Guangming Shi", "Yong Xiao", "Yingyu Li", "Xuemei Xie" ], "externalIds": { "DBLP": "journals/corr/abs-2012-15405", "ArXiv": "2012.15405", "DOI": "10.1109/MCOM.001.2001239", "CorpusId": 229924090 }, "url": "https://www.semanticscholar.org/paper/3f82b424213b3c9428193aefaec243c215a1cfe0", "referenceCount": 22, "citationCount": 196, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Wireless Image Transmission Using Deep Source Channel Coding With Attention Modules", "abstract": "Recent research on joint source channel coding (JSCC) for wireless communications has achieved great success owing to the employment of deep learning (DL). However, the existing work on DL based JSCC usually trains the designed network to operate under a specific signal-to-noise ratio (SNR) regime, without taking into account that the SNR level during the deployment stage may differ from that during the training stage. A number of networks are required to cover the scenario with a broad range of SNRs, which is computational inefficiency (in the training stage) and requires large storage. To overcome these drawbacks our paper proposes a novel method called Attention DL based JSCC (ADJSCC) that can successfully operate with different SNR levels during transmission. This design is inspired by the resource assignment strategy in traditional JSCC, which dynamically adjusts the compression ratio in source coding and the channel coding rate according to the channel SNR. This is achieved by resorting to attention mechanisms because these are able to allocate computing resources to more critical tasks. Instead of applying the resource allocation strategy in traditional JSCC, the ADJSCC uses the channel-wise soft attention to scaling features according to SNR conditions. We compare the ADJSCC method with the state-of-the-art DL based JSCC method through extensive experiments to demonstrate its adaptability, robustness and versatility. Compared with the existing methods, the proposed method takes less storage and is more robust in the presence of channel mismatch.", "year": 2020, "venue": "IEEE transactions on circuits and systems for video technology (Print)", "authors": [ "Jia-lin Xu", "B. Ai", "Wei Chen", "Ang Yang", "Peng Sun", "Miguel L. Rodrigues" ], "externalIds": { "ArXiv": "2012.00533", "DBLP": "journals/corr/abs-2012-00533", "MAG": "3107617916", "DOI": "10.1109/TCSVT.2021.3082521", "CorpusId": 227239173 }, "url": "https://www.semanticscholar.org/paper/1d3900c44eb9840205927dcfbde2d370a574c0b9", "referenceCount": 58, "citationCount": 158, "influentialCitationCount": 19, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Towards Self-learning Edge Intelligence in 6G", "abstract": "Edge intelligence, also called edge-native artificial intelligence (AI), is an emerging technological framework focusing on seamless integration of AI, communication networks, and mobile edge computing. It has been considered to be one of the key missing components in the existing 5G network and is widely recognized to be one of the most sought-after functions for tomorrow's wireless 6G cellular systems. In this article, we identify the key requirements and challenges of edge-native AI in 6G. A self-learning architecture based on self-supervised Generative Adversarial Nets (GANs) is introduced to \\blu{demonstrate the potential performance improvement that can be achieved by automatic data learning and synthesizing at the edge of the network}. We evaluate the performance of our proposed self-learning architecture in a university campus shuttle system connected via a 5G network. Our result shows that the proposed architecture has the potential to identify and classify unknown services that emerge in edge computing networks. Future trends and key research problems for self-learning-enabled 6G edge intelligence are also discussed.", "year": 2020, "venue": "arXiv.org", "authors": [ "Yong Xiao", "Guangming Shi", "Yingyu Li", "W. Saad", "H. V. Poor" ], "externalIds": { "DBLP": "journals/corr/abs-2010-00176", "ArXiv": "2010.00176", "MAG": "3089550924", "CorpusId": 222090032 }, "url": "https://www.semanticscholar.org/paper/3d98f5aad887a50d950a76ab8700c1a489fcb59d", "referenceCount": 18, "citationCount": 100, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Communicate to Learn at the Edge", "abstract": "Bringing the success of modern machine learning (ML) techniques to mobile devices can enable many new services and businesses, but also poses significant technical and research challenges. Two factors that are critical for the success of ML algorithms are massive amounts of data and processing power, both of which are plentiful, but highly distributed at the network edge. Moreover, edge devices are connected through bandwidth- and power-limited wireless links that suffer from noise, time variations, and interference. Information and coding theory have laid the foundations of reliable and efficient communications in the presence of channel imperfections, whose application in modern wireless networks has been a tremendous success. However, there is a clear disconnect between the current coding and communication schemes, and the ML algorithms deployed at the network edge. In this article, we challenge the current approach that treats these problems separately, and argue for a joint communication and learning paradigm for both the training and inference stages of edge learning.", "year": 2020, "venue": "IEEE Communications Magazine", "authors": [ "Deniz Gündüz", "David Burth Kurka", "Mikolaj Jankowski", "M. Amiri", "Emre Ozfatura", "Sreejith Sreekumar" ], "externalIds": { "DBLP": "journals/corr/abs-2009-13269", "ArXiv": "2009.13269", "MAG": "3088158085", "DOI": "10.1109/MCOM.001.2000394", "CorpusId": 221970332 }, "url": "https://www.semanticscholar.org/paper/6b8f1c25e47efd019210086d20318488e00c8897", "referenceCount": 19, "citationCount": 61, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "Bandwidth-Agile Image Transmission With Deep Joint Source-Channel Coding", "abstract": "We propose deep learning based communication methods for adaptive-bandwidth transmission of images over wireless channels. We consider the scenario in which images are transmitted progressively in layers over time or frequency, and such layers can be aggregated by receivers in order to increase the quality of their reconstructions. We investigate two scenarios, one in which the layers are sent sequentially, and incrementally contribute to the refinement of a reconstruction, and another in which the layers are independent and can be retrieved in any order. Those scenarios correspond to the well known problems of successive refinement and multiple descriptions, respectively, in the context of joint source-channel coding (JSCC). We propose DeepJSCC- $l$ , an innovative solution that uses convolutional autoencoders, and present three architectures with different complexity trade-offs. To the best of our knowledge, this is the first practical multiple-description JSCC scheme developed and tested for practical information sources and channels. Numerical results show that DeepJSCC- $l$ can learn to transmit the source progressively with negligible losses in the end-to-end performance compared with a single transmission. Moreover, DeepJSCC- $l$ has comparable performance with state of the art digital progressive transmission schemes in the challenging low signal-to-noise ratio (SNR) and small bandwidth regimes, with the additional advantage of graceful degradation with channel SNR.", "year": 2020, "venue": "IEEE Transactions on Wireless Communications", "authors": [ "David Burth Kurka", "Deniz Gündüz" ], "externalIds": { "DBLP": "journals/corr/abs-2009-12480", "ArXiv": "2009.12480", "MAG": "3089229692", "DOI": "10.1109/TWC.2021.3090048", "CorpusId": 221970035 }, "url": "https://www.semanticscholar.org/paper/12f5c8e7e84c8a766f771eb64cc5d3aa3438230c", "referenceCount": 47, "citationCount": 88, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "On the Capacity Enlargement of Gaussian Broadcast Channels With Passive Noisy Feedback", "abstract": "It is well known that the capacity region of an average transmit power constrained Gaussian Broadcast Channel (GBC) with independent noise realizations at the receivers is enlarged by the presence of causal noiseless feedback. When the noise variances at the receivers are identical, even passive feedback via independent memoryless Gaussian links can lead to a capacity region enlargement. The last fact remains true even when the feedback noise variance is very high, and available only from one of the receivers. While such capacity enlargements are feasible for several other feedback models in the Gaussian BC setting, it is also known that feedback does not change the capacity region for physically degraded broadcast channels. In this paper, we consider a two user GBC with independent noise realizations at the receivers, where the feedback links from the receivers are corrupted by independent additive Gaussian noise processes. We investigate the set of four noise variances, two forward and two feedback, for which no capacity enlargement is possible. A sharp characterization of this region is derived, i.e., any quadruple outside the presented region will lead to a capacity enlargement, whereas quadruples inside will leave the capacity region unchanged. Our results lead to the conclusion that when the forward noise variances are different, too noisy a feedback from one of the receivers alone is not always beneficial for enlarging the capacity region, be it from the stronger user or the weaker one, in sharp contrast to the case of equal forward noise variances.", "year": 2020, "venue": "IEEE Transactions on Information Theory", "authors": [ "Aditya Narayan Ravi", "S. R. Pillai", "V. Prabhakaran", "Michèle A. Wigger" ], "externalIds": { "DBLP": "journals/corr/abs-2009-08765", "MAG": "3087569883", "ArXiv": "2009.08765", "DOI": "10.1109/tit.2021.3096639", "CorpusId": 221802457 }, "url": "https://www.semanticscholar.org/paper/8ef8a9e3de1c96bb7e40458f41cd212eab3a7d55", "referenceCount": 20, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "6G Wireless Systems: Vision, Requirements, Challenges, Insights, and Opportunities", "abstract": "Mobile communications have been undergoing a generational change every ten years or so. However, the time difference between the so-called “G’s” is also decreasing. While fifth-generation (5G) systems are becoming a commercial reality, there is already significant interest in systems beyond 5G, which we refer to as the sixth generation (6G) of wireless systems. In contrast to the already published papers on the topic, we take a top-down approach to 6G. More precisely, we present a holistic discussion of 6G systems beginning with lifestyle and societal changes driving the need for next-generation networks. This is followed by a discussion into the technical requirements needed to enable 6G applications, based on which we dissect key challenges and possibilities for practically realizable system solutions across all layers of the Open Systems Interconnection stack (i.e., from applications to the physical layer). Since many of the 6G applications will need access to an order-of-magnitude more spectrum, utilization of frequencies between 100 GHz and 1 THz becomes of paramount importance. As such, the 6G ecosystem will feature a diverse range of frequency bands, ranging from below 6 GHz up to 1 THz. We comprehensively characterize the limitations that must be overcome to realize working systems in these bands and provide a unique perspective on the physical and higher layer challenges relating to the design of next-generation core networks, new modulation and coding methods, novel multiple-access techniques, antenna arrays, wave propagation, radio frequency transceiver design, and real-time signal processing. We rigorously discuss the fundamental changes required in the core networks of the future, such as the redesign or significant reduction of the transport architecture that serves as a major source of latency for time-sensitive applications. This is in sharp contrast to the present hierarchical network architectures that are not suitable to realize many of the anticipated 6G services. While evaluating the strengths and weaknesses of key candidate 6G technologies, we differentiate what may be practically achievable over the next decade, relative to what is possible in theory. Keeping this in mind, we present concrete research challenges for each of the discussed system aspects, providing inspiration for what follows.", "year": 2020, "venue": "Proceedings of the IEEE", "authors": [ "H. Tataria", "M. Shafi", "A. Molisch", "M. Dohler", "H. Sjöland", "F. Tufvesson" ], "externalIds": { "DBLP": "journals/corr/abs-2008-03213", "ArXiv": "2008.03213", "MAG": "3048081097", "DOI": "10.1109/JPROC.2021.3061701", "CorpusId": 221083129 }, "url": "https://www.semanticscholar.org/paper/02453b90a9beff1a7fc932670dc1a7e3b2768c98", "referenceCount": 264, "citationCount": 718, "influentialCitationCount": 42, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "Wireless Image Retrieval at the Edge", "abstract": "We study the image retrieval problem at the wireless edge, where an edge device captures an image, which is then used to retrieve similar images from an edge server. These can be images of the same person or a vehicle taken from other cameras at different times and locations. Our goal is to maximize the accuracy of the retrieval task under power and bandwidth constraints over the wireless link. Due to the stringent delay constraint of the underlying application, sending the whole image at a sufficient quality is not possible. We propose two alternative schemes based on digital and analog communications, respectively. In the digital approach, we first propose a deep neural network (DNN) aided retrieval-oriented image compression scheme, whose output bit sequence is transmitted over the channel using conventional channel codes. In the analog joint source and channel coding (JSCC) approach, the feature vectors are directly mapped into channel symbols. We evaluate both schemes on image based re-identification (re-ID) tasks under different channel conditions, including both static and fading channels. We show that the JSCC scheme significantly increases the end-to-end accuracy, speeds up the encoding process, and provides graceful degradation with channel conditions. The proposed architecture is evaluated through extensive simulations on different datasets and channel conditions, as well as through ablation studies.", "year": 2020, "venue": "IEEE Journal on Selected Areas in Communications", "authors": [ "Mikolaj Jankowski", "Deniz Gündüz", "K. Mikolajczyk" ], "externalIds": { "ArXiv": "2007.10915", "MAG": "3102125291", "DBLP": "journals/corr/abs-2007-10915", "DOI": "10.1109/JSAC.2020.3036955", "CorpusId": 220665570 }, "url": "https://www.semanticscholar.org/paper/df1b8813ff342394183a39af82164e44ea7bde13", "referenceCount": 49, "citationCount": 172, "influentialCitationCount": 9, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Deep Learning Enabled Semantic Communication Systems", "abstract": "Recently, deep learned enabled end-to-end communication systems have been developed to merge all physical layer blocks in the traditional communication systems, which make joint transceiver optimization possible. Powered by deep learning, natural language processing has achieved great success in analyzing and understanding a large amount of language texts. Inspired by research results in both areas, we aim to provide a new view on communication systems from the semantic level. Particularly, we propose a deep learning based semantic communication system, named DeepSC, for text transmission. Based on the Transformer, the DeepSC aims at maximizing the system capacity and minimizing the semantic errors by recovering the meaning of sentences, rather than bit- or symbol-errors in traditional communications. Moreover, transfer learning is used to ensure the DeepSC applicable to different communication environments and to accelerate the model training process. To justify the performance of semantic communications accurately, we also initialize a new metric, named sentence similarity. Compared with the traditional communication system without considering semantic information exchange, the proposed DeepSC is more robust to channel variation and is able to achieve better performance, especially in the low signal-to-noise (SNR) regime, as demonstrated by the extensive simulation results.", "year": 2020, "venue": "IEEE Transactions on Signal Processing", "authors": [ "Huiqiang Xie", "Zhijin Qin", "Geoffrey Y. Li", "B. Juang" ], "externalIds": { "MAG": "3036851434", "DBLP": "journals/tsp/XieQLJ21", "ArXiv": "2006.10685", "DOI": "10.1109/TSP.2021.3071210", "CorpusId": 219792180 }, "url": "https://www.semanticscholar.org/paper/f9314fd99be5f2b1b3efcfab87197d578160d553", "referenceCount": 40, "citationCount": 673, "influentialCitationCount": 102, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Adversarial Networks for Secure Wireless Communications", "abstract": "We propose a data-driven secure wireless communication scheme, in which the goal is to transmit a signal to a legitimate receiver with minimal distortion, while keeping some information about the signal private from an eavesdropping adversary. When the data distribution is known, the optimal trade-off between the reconstruction quality at the legitimate receiver and the leakage to the adversary can be characterised in the information theoretic asymptotic limit. In this paper, we assume that we do not know the data distribution, but instead have access to a dataset, and we are interested in the finite blocklength regime rather than the asymptotic limits. We propose a data-driven adversarially trained deep joint source-channel coding architecture, and demonstrate through experiments with CIFAR-10 dataset that it is possible to transmit to the legitimate receiver with minimal end-to-end distortion while concealing information on the image class from the adversary.", "year": 2020, "venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", "authors": [ "Thomas Marchioro", "N. Laurenti", "Deniz Gündüz" ], "externalIds": { "MAG": "3033181284", "DBLP": "conf/icassp/MarchioroLG20", "DOI": "10.1109/ICASSP40776.2020.9053216", "CorpusId": 216518032 }, "url": "https://www.semanticscholar.org/paper/9fb959f74b7590a7050963902ef76c8d053e7411", "referenceCount": 15, "citationCount": 18, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Analog Coding in Emerging Memory Systems", "abstract": null, "year": 2020, "venue": "Scientific Reports", "authors": [ "Ryan Zarcone", "Jesse Engel", "Jesse Engel", "Jesse Engel", "S. Eryilmaz", "W. Wan", "Sangbum Kim", "M. BrightSky", "C. Lam", "H. Lung", "B. Olshausen", "H.-S. Philip Wong" ], "externalIds": { "MAG": "3018710918", "PubMedCentral": "7176644", "DOI": "10.1038/s41598-020-63723-z", "CorpusId": 216049540, "PubMed": "32322007" }, "url": "https://www.semanticscholar.org/paper/18f45b5bf08cccce84aca4131ea7bfadb5880c8f", "referenceCount": 65, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Distributed Deep Convolutional Compression for Massive MIMO CSI Feedback", "abstract": "Massive multiple-input multiple-output (MIMO) systems require downlink channel state information (CSI) at the base station (BS) to achieve spatial diversity and multiplexing gains. In a frequency division duplex (FDD) multiuser massive MIMO network, each user needs to compress and feedback its downlink CSI to the BS. The CSI overhead scales with the number of antennas, users and subcarriers, and becomes a major bottleneck for the overall spectral efficiency. In this paper, we propose a deep learning (DL)-based CSI compression scheme, called DeepCMC, composed of convolutional layers followed by quantization and entropy coding blocks. In comparison with previous DL-based CSI reduction structures, DeepCMC proposes a novel fully-convolutional neural network (NN) architecture, with residual layers at the decoder, and incorporates quantization and entropy coding blocks into its design. DeepCMC is trained to minimize a weighted rate-distortion cost, which enables a trade-off between the CSI quality and its feedback overhead. Simulation results demonstrate that DeepCMC outperforms the state of the art CSI compression schemes in terms of the reconstruction quality of CSI for the same compression rate. We also propose a distributed version of DeepCMC for a multi-user MIMO scenario to encode and reconstruct the CSI from multiple users in a distributed manner. Distributed DeepCMC not only utilizes the inherent CSI structures of a single MIMO user for compression, but also benefits from the correlations among the channel matrices of nearby users to further improve the performance in comparison with DeepCMC. We also propose a reduced-complexity training method for distributed DeepCMC, allowing to scale it to multiple users, and suggest a cluster-based distributed DeepCMC approach for practical implementation.", "year": 2020, "venue": "IEEE Transactions on Wireless Communications", "authors": [ "Mahdi Boloursaz Mashhadi", "Qianqian Yang", "Deniz Gündüz" ], "externalIds": { "DBLP": "journals/twc/MashhadiYG21", "ArXiv": "2003.04684", "MAG": "3010790189", "DOI": "10.1109/TWC.2020.3043502", "CorpusId": 233227249 }, "url": "https://www.semanticscholar.org/paper/aec177b6be6bca1db0fc17ea13f73224ecbcbc56", "referenceCount": 34, "citationCount": 66, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Engineering", "Computer Science", "Mathematics" ] }, { "title": "Joint Source-Channel Coding of Images with (not very) Deep Learning", "abstract": "—Almost all wireless communication systems today are designed based on essentially the same digital approach, that separately optimizes the compression and channel coding stages. Using machine learning techniques, we investigate whether end-to-end transmission can be learned from scratch, thus using joint source-channel coding (JSCC) rather than the separation approach. This paper reviews and advances recent developments on our proposed technique, deep-JSCC , an autoencoder-based solution for generating robust and compact codes directly from images pixels, being comparable or even superior in performance to state-of-the-art (SoA) separation-based schemes (BPG+LDPC). Additionally, we show that deep-JSCC can be expanded to exploit a series of important features, such as graceful degradation, versatility to different channels and domains, variable transmission rate through successive refinement, and its capability to exploit channel output feedback.", "year": 2020, "venue": "", "authors": [ "Deniz Gündüz" ], "externalIds": { "MAG": "3119819694", "DOI": "10.3929/ETHZ-B-000402967", "CorpusId": 231932078 }, "url": "https://www.semanticscholar.org/paper/c5c23966e59268f215a88db9beac7124373f83ef", "referenceCount": 29, "citationCount": 25, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DeepJSCC-f: Deep Joint Source-Channel Coding of Images With Feedback", "abstract": "We consider wireless transmission of images in the presence of channel output feedback. From a Shannon theoretic perspective feedback does not improve the asymptotic end-to-end performance, and separate source coding followed by capacity-achieving channel coding, which ignores the feedback signal, achieves the optimal performance. It is well known that separation is not optimal in the practical finite blocklength regime; however, there are no known practical joint source-channel coding (JSCC) schemes that can exploit the feedback signal and surpass the performance of separation-based schemes. Inspired by the recent success of deep learning methods for JSCC, we investigate how noiseless or noisy channel output feedback can be incorporated into the transmission system to improve the reconstruction quality at the receiver. We introduce an autoencoder-based JSCC scheme, which we call DeepJSCC- $f$ , that exploits the channel output feedback, and provides considerable improvements in terms of the end-to-end reconstruction quality for fixed-length transmission, or in terms of the average delay for variable-length transmission. To the best of our knowledge, this is the first practical JSCC scheme that can fully exploit channel output feedback, demonstrating yet another setting in which modern machine learning techniques can enable the design of new and efficient communication methods that surpass the performance of traditional structured coding-based designs.", "year": 2019, "venue": "IEEE Journal on Selected Areas in Information Theory", "authors": [ "David Burth Kurka", "Deniz Gündüz" ], "externalIds": { "MAG": "2991154148", "DBLP": "journals/jsait/KurkaG20", "ArXiv": "1911.11174", "DOI": "10.1109/JSAIT.2020.2987203", "CorpusId": 218846365 }, "url": "https://www.semanticscholar.org/paper/3bdb100ddc701f20c428c66a257dbf791bd9fddf", "referenceCount": 55, "citationCount": 230, "influentialCitationCount": 20, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "Distributed Hypothesis Testing With Variable-Length Coding", "abstract": "The problem of distributed testing against independence with variable-length coding is considered when the average and not the maximum communication load is constrained as in previous works. The paper characterizes the optimum type-II error exponent of a single-sensor single-decision center system given a maximum type-I error probability when communication is either over a noise-free rate- $R$ link or over a noisy discrete memoryless channel (DMC) with stop-feedback. Specifically, let $\\epsilon $ denote the maximum allowed type-I error probability. Then the optimum exponent of the system with a rate- $R$ link under a constraint on the average communication load coincides with the optimum exponent of such a system with a rate $R/(1-\\epsilon)$ link under a maximum communication load constraint. A strong converse thus does not hold under an average communication load constraint. A similar observation also holds for testing against independence over DMCs. With variable-length coding and stop-feedback and under an average communication load constraint, the optimum type-II error exponent over a DMC of capacity $C$ equals the optimum exponent under fixed-length coding and a maximum communication load constraint when communication is over a DMC of capacity $C/(1-\\epsilon)$ .", "year": 2019, "venue": "IEEE Journal on Selected Areas in Information Theory", "authors": [ "Sadaf Salehkalaibar", "Michèle A. Wigger" ], "externalIds": { "MAG": "2980980649", "DBLP": "journals/corr/abs-2005-08610", "ArXiv": "1910.08261", "DOI": "10.1109/JSAIT.2020.3039839", "CorpusId": 204788705 }, "url": "https://www.semanticscholar.org/paper/9387d3713e90e79f3858051a887de4a2d42998e4", "referenceCount": 36, "citationCount": 12, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "CSI Feedback Based on Deep Learning for Massive MIMO Systems", "abstract": "Aiming at the problem of high complexity and low feedback accuracy of existing channel state information (CSI) feedback algorithms for frequency-division duplexing (FDD) massive multiple-input multiple-output (MIMO) systems, this paper proposes a CSI compression feedback algorithm based on deep learning (DL), which is suitable for single-user and multi-user scenarios in massive MIMO systems. This algorithm considers the spatial correlation of massive MIMO channel and uses bidirectional long short-term memory (Bi-LSTM) and bidirectional convolutional long short-term memory (Bi-ConvLSTM) network to decompress and recover the CSI for single-user and multi-user, respectively. The proposed DL-based CSI feedback network is trained offline by massive MIMO channel data and could learn the structural characteristics of the massive MIMO channel by fully exploiting the channel information in the training samples. The simulation results show that compared with several classical CSI compression feedback algorithms, the proposed CSI compression feedback algorithm has lower computational complexity, higher feedback accuracy, and better system performance in massive MIMO systems.", "year": 2019, "venue": "IEEE Access", "authors": [ "Yong Liao", "Haimei Yao", "Yuanxiao Hua", "Chunguo Li" ], "externalIds": { "DBLP": "journals/access/LiaoYHL19", "MAG": "2953889685", "DOI": "10.1109/ACCESS.2019.2924673", "CorpusId": 198145693 }, "url": "https://www.semanticscholar.org/paper/816fef9b6e91afba2c01bdf6eaeb276621ece199", "referenceCount": 20, "citationCount": 41, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The Reliability Function of Variable-Length Lossy Joint Source-Channel Coding With Feedback", "abstract": "We consider transmission of discrete memoryless sources (DMSes) across discrete memoryless channels (DMCs) using variable-length lossy source-channel codes with feedback. The reliability function (optimum error exponent) is shown to be equal to $\\max \\{0, B(1-R(D)/C)\\}$ , where $R(D)$ is the rate-distortion function of the source, $B$ is the maximum relative entropy between output distributions of the DMC, and $C$ is the Shannon capacity of the channel. We show that in this asymptotic regime, separate source-channel coding is, in fact, optimal.", "year": 2019, "venue": "IEEE Transactions on Information Theory", "authors": [ "Lan V. Truong", "V. Tan" ], "externalIds": { "DBLP": "journals/tit/TruongT19a", "MAG": "2946562560", "DOI": "10.1109/TIT.2019.2911527", "CorpusId": 54835300 }, "url": "https://www.semanticscholar.org/paper/74a2dc0f7742f11db8af27bb833a449f28e32815", "referenceCount": 24, "citationCount": 4, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "HoloCast: Graph Signal Processing for Graceful Point Cloud Delivery", "abstract": "In conventional point cloud delivery, a sender uses octree-based digital video compression to stream three-dimensional (3D) points and the corresponding color attributes over band-limited links, e.g., wireless channels, for 3D scene reconstructions. However, the digital-based delivery schemes have an issue called cliff effect, where the 3D reconstruction quality is a step function in terms of wireless channel quality. We propose a novel scheme of point cloud delivery, called HoloCast, to gracefully improve the reconstruction quality with the improvement of wireless channel quality. HoloCast regards the 3D points and color components as graph signals and directly transmits linear-transformed signals based on graph Fourier transform (GFT), without digital quantization and entropy coding operations. One of main contributions in HoloCast is that the use of GFT can deal with non-ordered and non-uniformly distributed multidimensional signals such as holographic data unlike conventional delivery schemes. Performance results with point cloud data show that HoloCast yields better 3D reconstruction quality compared to digital-based methods in noisy wireless environment.", "year": 2019, "venue": "ICC 2019 - 2019 IEEE International Conference on Communications (ICC)", "authors": [ "T. Fujihashi", "T. Koike-Akino", "Takashi Watanabe", "P. Orlik" ], "externalIds": { "MAG": "2958816915", "DBLP": "journals/corr/abs-1903-03247", "ArXiv": "1903.03247", "DOI": "10.1109/ICC.2019.8761819", "CorpusId": 72940923 }, "url": "https://www.semanticscholar.org/paper/f1136d2a3b1a8fa0e4f691b5cb4a01e154b8bd4a", "referenceCount": 23, "citationCount": 15, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "A Lower Bound on the Expected Distortion of Joint Source-Channel Coding", "abstract": "We consider the classic joint source-channel coding problem of transmitting a memoryless source over a memoryless channel. The focus of this work is on the long-standing open problem of finding the rate of convergence of the smallest attainable expected distortion to its asymptotic value, as a function of the blocklength $n$ . Our main result is that in general the convergence rate is not faster than $n^{-1/2}$ . In particular, we show that for the problem of transmitting i.i.d uniform bits over a binary symmetric channels with Hamming distortion, the smallest attainable distortion (bit error rate) is at least $\\Omega (n^{-1/2})$ above the asymptotic value, if the “bandwidth expansion ratio” is above 1.", "year": 2019, "venue": "IEEE Transactions on Information Theory", "authors": [ "Yuval Kochman", "Or Ordentlich", "Yury Polyanskiy" ], "externalIds": { "DBLP": "journals/tit/KochmanOP20", "MAG": "3013605664", "ArXiv": "1902.07979", "DOI": "10.1109/TIT.2020.2983148", "CorpusId": 67789494 }, "url": "https://www.semanticscholar.org/paper/287dbe67f18593f90fdee63282565656031f80f7", "referenceCount": 42, "citationCount": 9, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Integrated Design of JSCC Scheme Based on Double Protograph LDPC Codes System", "abstract": "In this letter, an integrated design of joint source-channel coding scheme based on the double protograph low-density parity-check (DP-LDPC) codes is proposed. As degree-2 variable nodes (VNs) structure plays an important role in the performance of both water-fall and error-floor regions, the maximum number of degree-2 VNs for DP-LDPC codes is determined from a global perspective. By comparing different allocation schemes for degree-2 VNs, new design principles are proposed. Several DP-LDPC codes with different numbers of degree-2 VNs are proposed. The simulated results reveal the superiority of the optimized DP-LDPC codes, which are in line with the decoding threshold analysis by a joint protograph extrinsic information transfer algorithm.", "year": 2019, "venue": "IEEE Communications Letters", "authors": [ "Qiwang Chen", "Lin Wang", "Shaohua Hong", "Yifan Chen" ], "externalIds": { "MAG": "2906955189", "DBLP": "journals/icl/ChenWHC19", "DOI": "10.1109/LCOMM.2018.2890243", "CorpusId": 59311486 }, "url": "https://www.semanticscholar.org/paper/5f2226bcca40800d782ca19a0cdc5b59f41dd244", "referenceCount": 14, "citationCount": 22, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Error-Resilient Analog Image Storage and Compression with Analog-Valued RRAM Arrays: An Adaptive Joint Source-Channel Coding Approach", "abstract": "We demonstrate by experiment an image storage and compression task by directly storing analog image data onto an analog-valued RRAM array. A joint source-channel coding algorithm is developed with a neural network to encode and retrieve natural images. The encoder and decoder adapt jointly to the statistics of the images and the statistics of the RRAM array in order to minimize distortion. This adaptive joint source-channel coding method is resilient to RRAM array non-idealities such as cycle-to-cycle and device-to-device variations, time-dependent variability, and non-functional storage cells, while achieving a reasonable reconstruction performance of ∼ 20 dB using only 0.1 devices/pixel for the analog image.", "year": 2018, "venue": "International Electron Devices Meeting", "authors": [ "Xin Zheng", "Ryan Zarcone", "Dylan M. Paiton", "Joon Sohn", "W. Wan", "B. Olshausen", "H. P. Wong" ], "externalIds": { "MAG": "2913334599", "DOI": "10.1109/IEDM.2018.8614612", "CorpusId": 58676060 }, "url": "https://www.semanticscholar.org/paper/a13333f8311006cbfe29088bc16f93ccb4e8a161", "referenceCount": 1, "citationCount": 21, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Asymptotic Task-Based Quantization With Application to Massive MIMO", "abstract": "Quantizers take part in nearly every digital signal processing system that operates on physical signals. They are commonly designed to accurately represent the underlying signal, regardless of the specific task to be performed on the quantized data. In systems working with high-dimensional signals, such as massive multiple-input multiple-output (MIMO) systems, it is beneficial to utilize low-resolution quantizers, due to cost, power, and memory constraints. In this paper, we study quantization of high-dimensional inputs, aiming at improving performance under resolution constraints by accounting for the system task in the quantizers design. We focus on the task of recovering a desired signal statistically related to the high-dimensional input, and analyze two quantization approaches. We, first, consider vector quantization, which is typically computationally infeasible, and characterize the optimal performance achievable with this approach. Next, we focus on practical systems that utilize hardware-limited scalar uniform analog-to-digital converters (ADCs), and design a task-based quantizer under this model. The resulting system accounts for the task by linearly combining the observed signal into a lower dimension prior to quantization. We then apply our proposed technique to channel estimation in massive MIMO networks. Our results demonstrate that a system utilizing low-resolution scalar ADCs can approach the optimal channel estimation performance by properly accounting for the task in the system design.", "year": 2018, "venue": "IEEE Transactions on Signal Processing", "authors": [ "Nir Shlezinger", "Yonina C. Eldar", "M. Rodrigues" ], "externalIds": { "MAG": "2950689817", "ArXiv": "1811.10077", "DBLP": "journals/tsp/ShlezingerER19", "DOI": "10.1109/TSP.2019.2923149", "CorpusId": 69612306 }, "url": "https://www.semanticscholar.org/paper/92ca482eb4bf8ce8f344b7fab430c530406143fa", "referenceCount": 55, "citationCount": 45, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Psychology", "Engineering" ] }, { "title": "Neural Joint Source-Channel Coding", "abstract": "For reliable transmission across a noisy communication channel, classical results from information theory show that it is asymptotically optimal to separate out the source and channel coding processes. However, this decomposition can fall short in the finite bit-length regime, as it requires non-trivial tuning of hand-crafted codes and assumes infinite computational power for decoding. In this work, we propose to jointly learn the encoding and decoding processes using a new discrete variational autoencoder model. By adding noise into the latent codes to simulate the channel during training, we learn to both compress and error-correct given a fixed bit-length and computational budget. We obtain codes that are not only competitive against several separation schemes, but also learn useful robust representations of the data for downstream tasks such as classification. Finally, inference amortization yields an extremely fast neural decoder, almost an order of magnitude faster compared to standard decoding methods based on iterative belief propagation.", "year": 2018, "venue": "International Conference on Machine Learning", "authors": [ "Kristy Choi", "Kedar Tatwawadi", "Aditya Grover", "T. Weissman", "Stefano Ermon" ], "externalIds": { "MAG": "2936697624", "DBLP": "conf/icml/ChoiTGWE19", "CorpusId": 59553574 }, "url": "https://www.semanticscholar.org/paper/75c4710de21e72ca3d9e153f7b93f8f12799933b", "referenceCount": 78, "citationCount": 100, "influentialCitationCount": 8, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] } ] }, "Reducing and Exploiting Data Augmentation Noise through Meta Reweighting Contrastive Learning for Text Classification": { "paper_title": "Reducing and Exploiting Data Augmentation Noise through Meta Reweighting Contrastive Learning for Text Classification", "arxiv_id": "2409.17474v1", "keyword": "deep learning", "authors": [ "Guanyi Mou", "Yichuan Li", "Kyumin Lee" ], "references": [ { "title": "A Survey on Data Augmentation for Text Classification", "abstract": "Data augmentation, the artificial creation of training data for machine learning by transformations, is a widely studied research field across machine learning disciplines. While it is useful for increasing a model's generalization capabilities, it can also address many other challenges and problems, from overcoming a limited amount of training data to regularizing the objective, to limiting the amount of data used to protect privacy. Based on a precise description of the goals and applications of data augmentation and a taxonomy for existing works, this survey is concerned with data augmentation methods for textual classification and aims at providing a concise and comprehensive overview for researchers and practitioners. Derived from the taxonomy, we divide more than 100 methods into 12 different groupings and give state-of-the-art references expounding which methods are highly promising by relating them to each other. Finally, research perspectives that may constitute a building block for future work are provided.", "year": 2021, "venue": "ACM Computing Surveys", "authors": [ "Markus Bayer", "M. Kaufhold", "Christian Reuter" ], "externalIds": { "DBLP": "journals/corr/abs-2107-03158", "ArXiv": "2107.03158", "DOI": "10.1145/3544558", "CorpusId": 235755489 }, "url": "https://www.semanticscholar.org/paper/2f4a0e9ece489f7b35fcf42358fda7d77b4ccf21", "referenceCount": 191, "citationCount": 257, "influentialCitationCount": 18, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Reweighting Augmented Samples by Minimizing the Maximal Expected Loss", "abstract": "Data augmentation is an effective technique to improve the generalization of deep neural networks. However, previous data augmentation methods usually treat the augmented samples equally without considering their individual impacts on the model. To address this, for the augmented samples from the same training example, we propose to assign different weights to them. We construct the maximal expected loss which is the supremum over any reweighted loss on augmented samples. Inspired by adversarial training, we minimize this maximal expected loss (MMEL) and obtain a simple and interpretable closed-form solution: more attention should be paid to augmented samples with large loss values (i.e., harder examples). Minimizing this maximal expected loss enables the model to perform well under any reweighting strategy. The proposed method can generally be applied on top of any data augmentation methods. Experiments are conducted on both natural language understanding tasks with token-level data augmentation, and image classification tasks with commonly-used image augmentation techniques like random crop and horizontal flip. Empirical results show that the proposed method improves the generalization performance of the model.", "year": 2021, "venue": "International Conference on Learning Representations", "authors": [ "Mingyang Yi", "Lu Hou", "Lifeng Shang", "Xin Jiang", "Qun Liu", "Zhi-Ming Ma" ], "externalIds": { "DBLP": "journals/corr/abs-2103-08933", "ArXiv": "2103.08933", "CorpusId": 232240244 }, "url": "https://www.semanticscholar.org/paper/601e490881aa6baac15640b0dfcc0e7ad2731475", "referenceCount": 50, "citationCount": 18, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Meta Back-translation", "abstract": "Back-translation is an effective strategy to improve the performance of Neural Machine Translation~(NMT) by generating pseudo-parallel data. However, several recent works have found that better translation quality of the pseudo-parallel data does not necessarily lead to better final translation models, while lower-quality but more diverse data often yields stronger results. In this paper, we propose a novel method to generate pseudo-parallel data from a pre-trained back-translation model. Our method is a meta-learning algorithm which adapts a pre-trained back-translation model so that the pseudo-parallel data it generates would train a forward-translation model to do well on a validation set. In our evaluations in both the standard datasets WMT En-De'14 and WMT En-Fr'14, as well as a multilingual translation setting, our method leads to significant improvements over strong baselines. Our code will be made available.", "year": 2021, "venue": "International Conference on Learning Representations", "authors": [ "Hieu Pham", "Xinyi Wang", "Yiming Yang", "Graham Neubig" ], "externalIds": { "DBLP": "conf/iclr/PhamWYN21", "ArXiv": "2102.07847", "CorpusId": 231933756 }, "url": "https://www.semanticscholar.org/paper/fcdac45272543b4f8b8eaa59d66044d1b7018494", "referenceCount": 37, "citationCount": 22, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MetaAugment: Sample-Aware Data Augmentation Policy Learning", "abstract": "Automated data augmentation has shown superior performance in image recognition. Existing works search for dataset-level augmentation policies without considering individual sample variations, which are likely to be sub-optimal. On the other hand, learning different policies for different samples naively could greatly increase the computing cost. In this paper, we learn a sample-aware data augmentation policy efficiently by formulating it as a sample reweighting problem. Specifically, an augmentation policy network takes a transformation and the corresponding augmented image as inputs, and outputs a weight to adjust the augmented image loss computed by a task network. At training stage, the task network minimizes the weighted losses of augmented training images, while the policy network minimizes the loss of the task network on a validation set via meta-learning. We theoretically prove the convergence of the training procedure and further derive the exact convergence rate. Superior performance is achieved on widely-used benchmarks including CIFAR-10/100, Omniglot, and ImageNet.", "year": 2020, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Fengwei Zhou", "Jiawei Li", "Chuanlong Xie", "Fei Chen", "Lanqing Hong", "Rui Sun", "Zhenguo Li" ], "externalIds": { "DBLP": "journals/corr/abs-2012-12076", "ArXiv": "2012.12076", "DOI": "10.1609/aaai.v35i12.17324", "CorpusId": 229349119 }, "url": "https://www.semanticscholar.org/paper/951f0cd7a4b43623cb4ad7a4a6215ae0113eb0ab", "referenceCount": 38, "citationCount": 22, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "HateGAN: Adversarial Generative-Based Data Augmentation for Hate Speech Detection", "abstract": "Academia and industry have developed machine learning and natural language processing models to detect online hate speech automatically. However, most of these existing methods adopt a supervised approach that heavily depends on labeled datasets for training. This results in the methods’ poor detection performance of the hate speech class as the training datasets are highly imbalanced. In this paper, we propose HateGAN, a deep generative reinforcement learning model, which addresses the challenge of imbalance class by augmenting the dataset with hateful tweets. We conduct extensive experiments to augment two commonly-used hate speech detection datasets with the HateGAN generated tweets. Our experiment results show that HateGAN improves the detection performance of the hate speech class regardless of the classifiers and datasets used in the detection task. Specifically, we observe an average 5% improvement for the hate class F1 scores across all state-of-the-art hate speech classifiers. We also conduct case studies to empirically examine the HateGAN generated hate speeches and show that the generated tweets are diverse, coherent, and relevant to hate speech detection.", "year": 2020, "venue": "International Conference on Computational Linguistics", "authors": [ "Rui Cao", "R. Lee" ], "externalIds": { "MAG": "3118142489", "ACL": "2020.coling-main.557", "DBLP": "conf/coling/CaoL20", "DOI": "10.18653/V1/2020.COLING-MAIN.557", "CorpusId": 227230383 }, "url": "https://www.semanticscholar.org/paper/7d96eaaa71a9556ab3b0c04c691af0b27b769d03", "referenceCount": 44, "citationCount": 32, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Supervised Contrastive Learning for Pre-trained Language Model Fine-tuning", "abstract": "State-of-the-art natural language understanding classification models follow two-stages: pre-training a large language model on an auxiliary task, and then fine-tuning the model on a task-specific labeled dataset using cross-entropy loss. Cross-entropy loss has several shortcomings that can lead to sub-optimal generalization and instability. Driven by the intuition that good generalization requires capturing the similarity between examples in one class and contrasting them with examples in other classes, we propose a supervised contrastive learning (SCL) objective for the fine-tuning stage. Combined with cross-entropy, the SCL loss we propose obtains improvements over a strong RoBERTa-Large baseline on multiple datasets of the GLUE benchmark in both the high-data and low-data regimes, and it does not require any specialized architecture, data augmentation of any kind, memory banks, or additional unsupervised data. We also demonstrate that the new objective leads to models that are more robust to different levels of noise in the training data, and can generalize better to related tasks with limited labeled task data.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Beliz Gunel", "Jingfei Du", "Alexis Conneau", "Ves Stoyanov" ], "externalIds": { "MAG": "3096565276", "ArXiv": "2011.01403", "DBLP": "conf/iclr/GunelDCS21", "CorpusId": 226237047 }, "url": "https://www.semanticscholar.org/paper/96c22a88ec3b9d3799daa41098555ab665c24ea8", "referenceCount": 66, "citationCount": 435, "influentialCitationCount": 46, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "CoDA: Contrast-enhanced and Diversity-promoting Data Augmentation for Natural Language Understanding", "abstract": "Data augmentation has been demonstrated as an effective strategy for improving model generalization and data efficiency. However, due to the discrete nature of natural language, designing label-preserving transformations for text data tends to be more challenging. In this paper, we propose a novel data augmentation framework dubbed CoDA, which synthesizes diverse and informative augmented examples by integrating multiple transformations organically. Moreover, a contrastive regularization objective is introduced to capture the global relationship among all the data samples. A momentum encoder along with a memory bank is further leveraged to better estimate the contrastive loss. To verify the effectiveness of the proposed framework, we apply CoDA to Transformer-based models on a wide range of natural language understanding tasks. On the GLUE benchmark, CoDA gives rise to an average improvement of 2.2% while applied to the RoBERTa-large model. More importantly, it consistently exhibits stronger results relative to several competitive data augmentation and adversarial training base-lines (including the low-resource settings). Extensive experiments show that the proposed contrastive objective can be flexibly combined with various data augmentation approaches to further boost their performance, highlighting the wide applicability of the CoDA framework.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Yanru Qu", "Dinghan Shen", "Yelong Shen", "Sandra Sajeev", "Jiawei Han", "Weizhu Chen" ], "externalIds": { "DBLP": "journals/corr/abs-2010-08670", "MAG": "3092806700", "ArXiv": "2010.08670", "CorpusId": 224726548 }, "url": "https://www.semanticscholar.org/paper/77f08ec1fc1a26d5e2c493be06a305d1480ad1c0", "referenceCount": 54, "citationCount": 59, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Data Augmentation for Meta-Learning", "abstract": "Conventional image classifiers are trained by randomly sampling mini-batches of images. To achieve state-of-the-art performance, sophisticated data augmentation schemes are used to expand the amount of training data available for sampling. In contrast, meta-learning algorithms sample not only images, but classes as well. We investigate how data augmentation can be used not only to expand the number of images available per class, but also to generate entirely new classes. We systematically dissect the meta-learning pipeline and investigate the distinct ways in which data augmentation can be integrated at both the image and class levels. Our proposed meta-specific data augmentation significantly improves the performance of meta-learners on few-shot classification benchmarks.", "year": 2020, "venue": "International Conference on Machine Learning", "authors": [ "Renkun Ni", "Micah Goldblum", "Amr Sharaf", "Kezhi Kong", "T. Goldstein" ], "externalIds": { "DBLP": "journals/corr/abs-2010-07092", "MAG": "3092742756", "ArXiv": "2010.07092", "CorpusId": 222341683 }, "url": "https://www.semanticscholar.org/paper/03542a713c3b92bc09b3a9ccda20c84846910544", "referenceCount": 34, "citationCount": 67, "influentialCitationCount": 13, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Contrastive Learning with Hard Negative Samples", "abstract": "How can you sample good negative examples for contrastive learning? We argue that, as with metric learning, contrastive learning of representations benefits from hard negative samples (i.e., points that are difficult to distinguish from an anchor point). The key challenge toward using hard negatives is that contrastive methods must remain unsupervised, making it infeasible to adopt existing negative sampling strategies that use true similarity information. In response, we develop a new family of unsupervised sampling methods for selecting hard negative samples where the user can control the hardness. A limiting case of this sampling results in a representation that tightly clusters each class, and pushes different classes as far apart as possible. The proposed method improves downstream performance across multiple modalities, requires only few additional lines of code to implement, and introduces no computational overhead.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Joshua Robinson", "Ching-Yao Chuang", "S. Sra", "S. Jegelka" ], "externalIds": { "MAG": "3091940082", "DBLP": "journals/corr/abs-2010-04592", "ArXiv": "2010.04592", "CorpusId": 222272463 }, "url": "https://www.semanticscholar.org/paper/7097137596f6755675f6aafcdd80969a747322ae", "referenceCount": 63, "citationCount": 636, "influentialCitationCount": 48, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "A Simple but Tough-to-Beat Data Augmentation Approach for Natural Language Understanding and Generation", "abstract": "Adversarial training has been shown effective at endowing the learned representations with stronger generalization ability. However, it typically requires expensive computation to determine the direction of the injected perturbations. In this paper, we introduce a set of simple yet effective data augmentation strategies dubbed cutoff, where part of the information within an input sentence is erased to yield its restricted views (during the fine-tuning stage). Notably, this process relies merely on stochastic sampling and thus adds little computational overhead. A Jensen-Shannon Divergence consistency loss is further utilized to incorporate these augmented samples into the training objective in a principled manner. To verify the effectiveness of the proposed strategies, we apply cutoff to both natural language understanding and generation problems. On the GLUE benchmark, it is demonstrated that cutoff, in spite of its simplicity, performs on par or better than several competitive adversarial-based approaches. We further extend cutoff to machine translation and observe significant gains in BLEU scores (based upon the Transformer Base model). Moreover, cutoff consistently outperforms adversarial training and achieves state-of-the-art results on the IWSLT2014 German-English dataset.", "year": 2020, "venue": "arXiv.org", "authors": [ "Dinghan Shen", "Ming Zheng", "Yelong Shen", "Yanru Qu", "Weizhu Chen" ], "externalIds": { "MAG": "3089659770", "DBLP": "journals/corr/abs-2009-13818", "ArXiv": "2009.13818", "CorpusId": 221995719 }, "url": "https://www.semanticscholar.org/paper/5a11bd4e678fcb05cb8f5d30c45877fb58bdd3b3", "referenceCount": 46, "citationCount": 121, "influentialCitationCount": 18, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "OnlineAugment: Online Data Augmentation with Less Domain Knowledge", "abstract": null, "year": 2020, "venue": "European Conference on Computer Vision", "authors": [ "Zhiqiang Tang", "Yunhe Gao", "Leonid Karlinsky", "P. Sattigeri", "R. Feris", "Dimitris N. Metaxas" ], "externalIds": { "MAG": "3043297518", "DBLP": "conf/eccv/0001GKSFM20", "ArXiv": "2007.09271", "DOI": "10.1007/978-3-030-58571-6_19", "CorpusId": 220647484 }, "url": "https://www.semanticscholar.org/paper/484ab770c3e96554247388c724b9ed83cf65adca", "referenceCount": 45, "citationCount": 50, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Meta-Learning Requires Meta-Augmentation", "abstract": "Meta-learning algorithms aim to learn two components: a model that predicts targets for a task, and a base learner that quickly updates that model when given examples from a new task. This additional level of learning can be powerful, but it also creates another potential source for overfitting, since we can now overfit in either the model or the base learner. We describe both of these forms of metalearning overfitting, and demonstrate that they appear experimentally in common meta-learning benchmarks. We then use an information-theoretic framework to discuss meta-augmentation, a way to add randomness that discourages the base learner and model from learning trivial solutions that do not generalize to new tasks. We demonstrate that meta-augmentation produces large complementary benefits to recently proposed meta-regularization techniques.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Janarthanan Rajendran", "A. Irpan", "Eric Jang" ], "externalIds": { "DBLP": "conf/nips/RajendranIJ20", "ArXiv": "2007.05549", "MAG": "3040863728", "CorpusId": 220496701 }, "url": "https://www.semanticscholar.org/paper/759af360f450ef76d903a907e8a39b10845cdeaf", "referenceCount": 41, "citationCount": 86, "influentialCitationCount": 14, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Towards Hate Speech Detection at Large via Deep Generative Modeling", "abstract": "Hate speech detection is a critical problem in social media, being often accused for enabling the spread of hatred and igniting violence. Hate speech detection requires overwhelming computing resources for online monitoring as well as thousands of human experts for daily screening of suspected posts or tweets. Recently, deep learning (DL)-based solutions have been proposed for hate speech detection, using modest-sized datasets of few thousands of sequences. While these methods perform well on the specific datasets, their ability to generalize to new hate speech sequences is limited. Being a data-driven approach, it is known that DL surpasses other methods whenever scale-up in trainset size and diversity is achieved. Therefore, we first present a dataset of 1 million hate and nonhate sequences, produced by a deep generative model. We further utilize the generated data to train a well-studied DL detector, demonstrating significant performance improvements across five hate speech datasets.", "year": 2020, "venue": "IEEE Internet Computing", "authors": [ "Tomer Wullach", "A. Adler", "Einat Minkov" ], "externalIds": { "MAG": "3093890791", "ArXiv": "2005.06370", "DBLP": "journals/internet/WullachAM21", "DOI": "10.1109/MIC.2020.3033161", "CorpusId": 218614055 }, "url": "https://www.semanticscholar.org/paper/d2aef43feecedb79b3cf367a5c0abbd64c4458f9", "referenceCount": 42, "citationCount": 34, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Beyond Accuracy: Behavioral Testing of NLP Models with CheckList", "abstract": "Although measuring held-out accuracy has been the primary approach to evaluate generalization, it often overestimates the performance of NLP models, while alternative approaches for evaluating models either focus on individual tasks or on specific behaviors. Inspired by principles of behavioral testing in software engineering, we introduce CheckList, a task-agnostic methodology for testing NLP models. CheckList includes a matrix of general linguistic capabilities and test types that facilitate comprehensive test ideation, as well as a software tool to generate a large and diverse number of test cases quickly. We illustrate the utility of CheckList with tests for three tasks, identifying critical failures in both commercial and state-of-art models. In a user study, a team responsible for a commercial sentiment analysis model found new and actionable bugs in an extensively tested model. In another user study, NLP practitioners with CheckList created twice as many tests, and found almost three times as many bugs as users without it.", "year": 2020, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Marco Tulio Ribeiro", "Tongshuang Sherry Wu", "Carlos Guestrin", "Sameer Singh" ], "externalIds": { "DBLP": "journals/corr/abs-2005-04118", "ACL": "2020.acl-main.442", "MAG": "3035507081", "ArXiv": "2005.04118", "DOI": "10.18653/v1/2020.acl-main.442", "CorpusId": 218551201 }, "url": "https://www.semanticscholar.org/paper/33ec7eb2168e37e3007d1059aa96b9a63254b4da", "referenceCount": 33, "citationCount": 962, "influentialCitationCount": 150, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "TextAttack: A Framework for Adversarial Attacks, Data Augmentation, and Adversarial Training in NLP", "abstract": "While there has been substantial research using adversarial attacks to analyze NLP models, each attack is implemented in its own code repository. It remains challenging to develop NLP attacks and utilize them to improve model performance. This paper introduces TextAttack, a Python framework for adversarial attacks, data augmentation, and adversarial training in NLP. TextAttack builds attacks from four components: a goal function, a set of constraints, a transformation, and a search method. TextAttack’s modular design enables researchers to easily construct attacks from combinations of novel and existing components. TextAttack provides implementations of 16 adversarial attacks from the literature and supports a variety of models and datasets, including BERT and other transformers, and all GLUE tasks. TextAttack also includes data augmentation and adversarial training modules for using components of adversarial attacks to improve model accuracy and robustness.TextAttack is democratizing NLP: anyone can try data augmentation and adversarial training on any model or dataset, with just a few lines of code. Code and tutorials are available at https://github.com/QData/TextAttack.", "year": 2020, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "John X. Morris", "Eli Lifland", "Jin Yong Yoo", "J. Grigsby", "Di Jin", "Yanjun Qi" ], "externalIds": { "MAG": "3105604018", "DBLP": "conf/emnlp/MorrisLYGJQ20", "ACL": "2020.emnlp-demos.16", "DOI": "10.18653/v1/2020.emnlp-demos.16", "CorpusId": 220714040 }, "url": "https://www.semanticscholar.org/paper/c9b56cb026a38e39bb0228faac57accd6f65e6f7", "referenceCount": 43, "citationCount": 632, "influentialCitationCount": 89, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MixText: Linguistically-Informed Interpolation of Hidden Space for Semi-Supervised Text Classification", "abstract": "This paper presents MixText, a semi-supervised learning method for text classification, which uses our newly designed data augmentation method called TMix. TMix creates a large amount of augmented training samples by interpolating text in hidden space. Moreover, we leverage recent advances in data augmentation to guess low-entropy labels for unlabeled data, hence making them as easy to use as labeled data. By mixing labeled, unlabeled and augmented data, MixText significantly outperformed current pre-trained and fined-tuned models and other state-of-the-art semi-supervised learning methods on several text classification benchmarks. The improvement is especially prominent when supervision is extremely limited. We have publicly released our code at https://github.com/GT-SALT/MixText.", "year": 2020, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Jiaao Chen", "Zichao Yang", "Diyi Yang" ], "externalIds": { "MAG": "3035542229", "ArXiv": "2004.12239", "DBLP": "journals/corr/abs-2004-12239", "ACL": "2020.acl-main.194", "DOI": "10.18653/v1/2020.acl-main.194", "CorpusId": 216553182 }, "url": "https://www.semanticscholar.org/paper/ae2c03cbe6162dadf65edd2ff7dfc5333524dca5", "referenceCount": 48, "citationCount": 317, "influentialCitationCount": 58, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Supervised Contrastive Learning", "abstract": "Cross entropy is the most widely used loss function for supervised training of image classification models. In this paper, we propose a novel training methodology that consistently outperforms cross entropy on supervised learning tasks across different architectures and data augmentations. We modify the batch contrastive loss, which has recently been shown to be very effective at learning powerful representations in the self-supervised setting. We are thus able to leverage label information more effectively than cross entropy. Clusters of points belonging to the same class are pulled together in embedding space, while simultaneously pushing apart clusters of samples from different classes. In addition to this, we leverage key ingredients such as large batch sizes and normalized embeddings, which have been shown to benefit self-supervised learning. On both ResNet-50 and ResNet-200, we outperform cross entropy by over 1%, setting a new state of the art number of 78.8% among methods that use AutoAugment data augmentation. The loss also shows clear benefits for robustness to natural corruptions on standard benchmarks on both calibration and accuracy. Compared to cross entropy, our supervised contrastive loss is more stable to hyperparameter settings such as optimizers or data augmentations.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Prannay Khosla", "Piotr Teterwak", "Chen Wang", "Aaron Sarna", "Yonglong Tian", "Phillip Isola", "Aaron Maschinot", "Ce Liu", "Dilip Krishnan" ], "externalIds": { "MAG": "3018378048", "ArXiv": "2004.11362", "DBLP": "journals/corr/abs-2004-11362", "CorpusId": 216080787 }, "url": "https://www.semanticscholar.org/paper/38643c2926b10f6f74f122a7037e2cd20d77c0f1", "referenceCount": 75, "citationCount": 3662, "influentialCitationCount": 619, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Meta-Learning in Neural Networks: A Survey", "abstract": "The field of meta-learning, or learning-to-learn, has seen a dramatic rise in interest in recent years. Contrary to conventional approaches to AI where tasks are solved from scratch using a fixed learning algorithm, meta-learning aims to improve the learning algorithm itself, given the experience of multiple learning episodes. This paradigm provides an opportunity to tackle many conventional challenges of deep learning, including data and computation bottlenecks, as well as generalization. This survey describes the contemporary meta-learning landscape. We first discuss definitions of meta-learning and position it with respect to related fields, such as transfer learning and hyperparameter optimization. We then propose a new taxonomy that provides a more comprehensive breakdown of the space of meta-learning methods today. We survey promising applications and successes of meta-learning such as few-shot learning and reinforcement learning. Finally, we discuss outstanding challenges and promising areas for future research.", "year": 2020, "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", "authors": [ "Timothy M. Hospedales", "Antreas Antoniou", "P. Micaelli", "A. Storkey" ], "externalIds": { "DBLP": "journals/pami/HospedalesAMS22", "MAG": "3015606043", "ArXiv": "2004.05439", "DOI": "10.1109/TPAMI.2021.3079209", "CorpusId": 215744839, "PubMed": "33974543" }, "url": "https://www.semanticscholar.org/paper/020bb2ba5f3923858cd6882ba5c5a44ea8041ab6", "referenceCount": 333, "citationCount": 1594, "influentialCitationCount": 71, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "BAE: BERT-based Adversarial Examples for Text Classification", "abstract": "Modern text classification models are susceptible to adversarial examples, perturbed versions of the original text indiscernible by humans but which get misclassified by the model. We present BAE, a powerful black box attack for generating grammatically correct and semantically coherent adversarial examples. BAE replaces and inserts tokens in the original text by masking a portion of the text and leveraging a language model to generate alternatives for the masked tokens. Compared to prior work, we show that BAE performs a stronger attack on three widely used models for seven text classification datasets.", "year": 2020, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Siddhant Garg", "Goutham Ramakrishnan" ], "externalIds": { "MAG": "3104423855", "ACL": "2020.emnlp-main.498", "DBLP": "conf/emnlp/GargR20", "ArXiv": "2004.01970", "DOI": "10.18653/v1/2020.emnlp-main.498", "CorpusId": 214802269 }, "url": "https://www.semanticscholar.org/paper/06a427e1688f92053a38c73cb4e0da25177c89e7", "referenceCount": 35, "citationCount": 474, "influentialCitationCount": 69, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Leveraging Multi-Source Weak Social Supervision for Early Detection of Fake News", "abstract": "Social media has greatly enabled people to participate in online activities at an unprecedented rate. However, this unrestricted access also exacerbates the spread of misinformation and fake news online which might cause confusion and chaos unless being detected early for its mitigation. Given the rapidly evolving nature of news events and the limited amount of annotated data, state-of-the-art systems on fake news detection face challenges due to the lack of large numbers of annotated training instances that are hard to come by for early detection. In this work, we exploit multiple weak signals from different sources given by user and content engagements (referred to as weak social supervision), and their complementary utilities to detect fake news. We jointly leverage the limited amount of clean data along with weak signals from social engagements to train deep neural networks in a meta-learning framework to estimate the quality of different weak instances. Experiments on realworld datasets demonstrate that the proposed framework outperforms state-of-the-art baselines for early detection of fake news without using any user engagements at prediction time.", "year": 2020, "venue": "arXiv.org", "authors": [ "Kai Shu", "Guoqing Zheng", "Yichuan Li", "Subhabrata Mukherjee", "Ahmed Hassan Awadallah", "Scott W. Ruston", "Huan Liu" ], "externalIds": { "ArXiv": "2004.01732", "MAG": "3036332725", "DBLP": "journals/corr/abs-2004-01732", "CorpusId": 214802454 }, "url": "https://www.semanticscholar.org/paper/26702363241fdd8baccff33cac5cb2978294bb88", "referenceCount": 36, "citationCount": 51, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Data Augmentation using Pre-trained Transformer Models", "abstract": "Language model based pre-trained models such as BERT have provided significant gains across different NLP tasks. In this paper, we study different types of transformer based pre-trained models such as auto-regressive models (GPT-2), auto-encoder models (BERT), and seq2seq models (BART) for conditional data augmentation. We show that prepending the class labels to text sequences provides a simple yet effective way to condition the pre-trained models for data augmentation. Additionally, on three classification benchmarks, pre-trained Seq2Seq model outperforms other data augmentation methods in a low-resource setting. Further, we explore how different pre-trained model based data augmentation differs in-terms of data diversity, and how well such methods preserve the class-label information.", "year": 2020, "venue": "LIFELONGNLP", "authors": [ "Varun Kumar", "Ashutosh Choudhary", "Eunah Cho" ], "externalIds": { "DBLP": "journals/corr/abs-2003-02245", "ArXiv": "2003.02245", "ACL": "2020.lifelongnlp-1.3", "MAG": "3010293452", "CorpusId": 211987786 }, "url": "https://www.semanticscholar.org/paper/33496cb3a5623925267528fa6b726f015e4dcda2", "referenceCount": 35, "citationCount": 316, "influentialCitationCount": 33, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Momentum Contrast for Unsupervised Visual Representation Learning", "abstract": "We present Momentum Contrast (MoCo) for unsupervised visual representation learning. From a perspective on contrastive learning as dictionary look-up, we build a dynamic dictionary with a queue and a moving-averaged encoder. This enables building a large and consistent dictionary on-the-fly that facilitates contrastive unsupervised learning. MoCo provides competitive results under the common linear protocol on ImageNet classification. More importantly, the representations learned by MoCo transfer well to downstream tasks. MoCo can outperform its supervised pre-training counterpart in 7 detection/segmentation tasks on PASCAL VOC, COCO, and other datasets, sometimes surpassing it by large margins. This suggests that the gap between unsupervised and supervised representation learning has been largely closed in many vision tasks.", "year": 2019, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Kaiming He", "Haoqi Fan", "Yuxin Wu", "Saining Xie", "Ross B. Girshick" ], "externalIds": { "ArXiv": "1911.05722", "DBLP": "conf/cvpr/He0WXG20", "MAG": "2987283559", "DOI": "10.1109/cvpr42600.2020.00975", "CorpusId": 207930212 }, "url": "https://www.semanticscholar.org/paper/add2f205338d70e10ce5e686df4a690e2851bdfc", "referenceCount": 65, "citationCount": 10391, "influentialCitationCount": 1799, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Meta Label Correction for Noisy Label Learning", "abstract": "Leveraging weak or noisy supervision for building effective machine learning models has long been an important research problem. Its importance has further increased recently due to the growing need for large-scale datasets to train deep learning models. Weak or noisy supervision could originate from multiple sources including non-expert annotators or automatic labeling based on heuristics or user interaction signals. There is an extensive amount of previous work focusing on leveraging noisy labels. Most notably, recent work has shown impressive gains by using a meta-learned instance re-weighting approach where a meta-learning framework is used to assign instance weights to noisy labels. In this paper, we extend this approach via posing the problem as a label correction problem within a meta-learning framework. We view the label correction procedure as a meta-process and propose a new meta-learning based framework termed MLC (Meta Label Correction) for learning with noisy labels. Specifically, a label correction network is adopted as a meta-model to produce corrected labels for noisy labels while the main model is trained to leverage the corrected labels. Both models are jointly trained by solving a bi-level optimization problem. We run extensive experiments with different label noise levels and types on both image recognition and text classification tasks. We compare the re-weighing and correction approaches showing that the correction framing addresses some of the limitations of re-weighting. We also show that the proposed MLC approach outperforms previous methods in both image and language tasks.", "year": 2019, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Guoqing Zheng", "Ahmed Hassan Awadallah", "S. Dumais" ], "externalIds": { "DBLP": "conf/aaai/ZhengAD21", "MAG": "3110687497", "ArXiv": "1911.03809", "DOI": "10.1609/aaai.v35i12.17319", "CorpusId": 230552020 }, "url": "https://www.semanticscholar.org/paper/96c6f34594279844ca8fd901649fa06491ef822c", "referenceCount": 45, "citationCount": 152, "influentialCitationCount": 12, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Do Not Have Enough Data? Deep Learning to the Rescue!", "abstract": "Based on recent advances in natural language modeling and those in text generation capabilities, we propose a novel data augmentation method for text classification tasks. We use a powerful pre-trained neural network model to artificially synthesize new labeled data for supervised learning. We mainly focus on cases with scarce labeled data. Our method, referred to as language-model-based data augmentation (LAMBADA), involves fine-tuning a state-of-the-art language generator to a specific task through an initial training phase on the existing (usually small) labeled data. Using the fine-tuned model and given a class label, new sentences for the class are generated. Our process then filters these new sentences by using a classifier trained on the original data. In a series of experiments, we show that LAMBADA improves classifiers' performance on a variety of datasets. Moreover, LAMBADA significantly improves upon the state-of-the-art techniques for data augmentation, specifically those applicable to text classification tasks with little data.", "year": 2019, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Ateret Anaby-Tavor", "Boaz Carmeli", "Esther Goldbraich", "Amir Kantor", "George Kour", "Segev Shlomov", "N. Tepper", "Naama Zwerdling" ], "externalIds": { "ArXiv": "1911.03118", "MAG": "2998184481", "DBLP": "conf/aaai/Anaby-TavorCGKK20", "DOI": "10.1609/AAAI.V34I05.6233", "CorpusId": 212821571 }, "url": "https://www.semanticscholar.org/paper/7eba731a7fd8de712b7b79b5af41a6e2d4dbd191", "referenceCount": 45, "citationCount": 316, "influentialCitationCount": 27, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Augment to Prevent: Short-Text Data Augmentation in Deep Learning for Hate-Speech Classification", "abstract": "In this paper, we address the issue of augmenting text data in supervised Natural Language Processing problems, exemplified by deep online hate speech classification. A great challenge in this domain is that although the presence of hate speech can be deleterious to the quality of service provided by social platforms, it still comprises only a tiny fraction of the content that can be found online, which can lead to performance deterioration due to majority class overfitting. To this end, we perform a thorough study on the application of deep learning to the hate speech detection problem: a) we propose three text-based data augmentation techniques aimed at reducing the degree of class imbalance and to maximise the amount of information we can extract from our limited resources and b) we apply them on a selection of top-performing deep architectures and hate speech databases in order to showcase their generalisation properties. The data augmentation techniques are based on a) synonym replacement based on word embedding vector closeness, b) warping of the word tokens along the padded sequence or c) class-conditional, recurrent neural language generation. Our proposed framework yields a significant increase in multi-class hate speech detection, outperforming the baseline in the largest online hate speech database by an absolute 5.7% increase in Macro-F1 score and 30% in hate speech class recall.", "year": 2019, "venue": "International Conference on Information and Knowledge Management", "authors": [ "Georgios Rizos", "Konstantin Hemker", "Björn Schuller" ], "externalIds": { "DBLP": "conf/cikm/RizosHS19", "MAG": "2983149555", "DOI": "10.1145/3357384.3358040", "CorpusId": 207757246 }, "url": "https://www.semanticscholar.org/paper/b46194ac5696379cfa920ff08cda8d7c4cb6579c", "referenceCount": 50, "citationCount": 98, "influentialCitationCount": 9, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning Data Manipulation for Augmentation and Weighting", "abstract": "Manipulating data, such as weighting data examples or augmenting with new instances, has been increasingly used to improve model training. Previous work has studied various rule- or learning-based approaches designed for specific types of data manipulation. In this work, we propose a new method that supports learning different manipulation schemes with the same gradient-based algorithm. Our approach builds upon a recent connection of supervised learning and reinforcement learning (RL), and adapts an off-the-shelf reward learning algorithm from RL for joint data manipulation learning and model training. Different parameterization of the ``data reward'' function instantiates different manipulation schemes. We showcase data augmentation that learns a text transformation network, and data weighting that dynamically adapts the data sample importance. Experiments show the resulting algorithms significantly improve the image and text classification performance in low data regime and class-imbalance problems.", "year": 2019, "venue": "Neural Information Processing Systems", "authors": [ "Zhiting Hu", "Bowen Tan", "R. Salakhutdinov", "Tom Michael Mitchell", "E. Xing" ], "externalIds": { "MAG": "2971252690", "DBLP": "conf/nips/HuTSMX19", "ArXiv": "1910.12795", "CorpusId": 202764268 }, "url": "https://www.semanticscholar.org/paper/16908e1d9b47ebca816d8cc92c5aa101eb7d7605", "referenceCount": 52, "citationCount": 111, "influentialCitationCount": 16, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Textual Adversarial Attack as Combinatorial Optimization", "abstract": "Adversarial attack is carried out to reveal the vulnerability of deep neural networks. Textual adversarial attack is challenging because text is discrete and any perturbation might bring big semantic change. Word substitution is a class of effective textual attack method and has been extensively explored. However, all existing word substitution-based attack methods suffer the problems of bad semantic preservation, insufficient adversarial examples or suboptimal attack results. In this paper, we formalize the word substitution-based attack as a combinatorial optimization problem. We also propose a novel attack model, which comprises a sememe-based word substitution strategy and the particle swarm optimization algorithm, to tackle the existing problems. In experiments, we evaluate our attack model on the sentiment analysis task. Experimental results demonstrate our model achieves higher attack success rates and less modification than the baseline methods. The ablation study also verifies the superiority of the two parts of our model over previous ones.", "year": 2019, "venue": "", "authors": [ "Yuan Zang", "Chenghao Yang", "Fanchao Qi", "Zhiyuan Liu", "Meng Zhang", "Qun Liu", "Maosong Sun" ], "externalIds": { "ArXiv": "1910.12196", "CorpusId": 207852561 }, "url": "https://www.semanticscholar.org/paper/309b906fed883e5efe4acf676c655ead21f6c17b", "referenceCount": 27, "citationCount": 88, "influentialCitationCount": 12, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Meta Label Correction for Learning with Weak Supervision", "abstract": "Leveraging weak or noisy supervision for building effective machine learning models has long been an important research problem. The growing need for large-scale datasets to train deep learning models has increased its importance. Weak or noisy supervision could originate from multiple sources including non-expert annotators or automatic labeling based on heuristics or user interaction signals. Previous work on modeling and correcting weak labels have been focused on various aspects, including loss correction, training instance re-weighting, etc. In this paper, we approach this problem from a novel perspective based on meta-learning. We view the label correction procedure as a meta-process and propose a new meta-learning based framework termed MLC for learning with weak supervision. Experiments with different label noise levels on multiple datasets show that MLC can achieve large improvement over previous methods incorporating weak labels for learning.", "year": 2019, "venue": "arXiv.org", "authors": [ "Guoqing Zheng", "Ahmed Hassan Awadallah", "S. Dumais" ], "externalIds": { "DBLP": "journals/corr/abs-1911-03809", "MAG": "2984952064", "CorpusId": 207852707 }, "url": "https://www.semanticscholar.org/paper/3f04c6bfca0d360b9851d80c0d6c9869531aa472", "referenceCount": 26, "citationCount": 21, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Atalaya at TASS 2019: Data Augmentation and Robust Embeddings for Sentiment Analysis", "abstract": "In this article we describe our participation in TASS 2019, a shared task aimed at the detection of sentiment polarity of Spanish tweets. We combined different representations such as bag-of-words, bag-of-characters, and tweet embeddings. In particular, we trained robust subword-aware word embeddings and computed tweet representations using a weighted-averaging strategy. We also used two data augmentation techniques to deal with data scarcity: two-way translation augmentation, and instance crossover augmentation, a novel technique that generates new instances by combining halves of tweets. In experiments, we trained linear classifiers and ensemble models, obtaining highly competitive results despite the simplicity of our approaches.", "year": 2019, "venue": "IberLEF@SEPLN", "authors": [ "F. Luque" ], "externalIds": { "MAG": "2971202292", "DBLP": "journals/corr/abs-1909-11241", "ArXiv": "1909.11241", "CorpusId": 199448337 }, "url": "https://www.semanticscholar.org/paper/bb6e5a4e25243b274d5c9e8db46ce5c6402a31a3", "referenceCount": 10, "citationCount": 25, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "TinyBERT: Distilling BERT for Natural Language Understanding", "abstract": "Language model pre-training, such as BERT, has significantly improved the performances of many natural language processing tasks. However, pre-trained language models are usually computationally expensive, so it is difficult to efficiently execute them on resource-restricted devices. To accelerate inference and reduce model size while maintaining accuracy, we first propose a novel Transformer distillation method that is specially designed for knowledge distillation (KD) of the Transformer-based models. By leveraging this new KD method, the plenty of knowledge encoded in a large “teacher” BERT can be effectively transferred to a small “student” TinyBERT. Then, we introduce a new two-stage learning framework for TinyBERT, which performs Transformer distillation at both the pre-training and task-specific learning stages. This framework ensures that TinyBERT can capture the general-domain as well as the task-specific knowledge in BERT. TinyBERT4 with 4 layers is empirically effective and achieves more than 96.8% the performance of its teacher BERT-Base on GLUE benchmark, while being 7.5x smaller and 9.4x faster on inference. TinyBERT4 is also significantly better than 4-layer state-of-the-art baselines on BERT distillation, with only ~28% parameters and ~31% inference time of them. Moreover, TinyBERT6 with 6 layers performs on-par with its teacher BERT-Base.", "year": 2019, "venue": "Findings", "authors": [ "Xiaoqi Jiao", "Yichun Yin", "Lifeng Shang", "Xin Jiang", "Xiao Chen", "Linlin Li", "F. Wang", "Qun Liu" ], "externalIds": { "MAG": "3105966348", "DBLP": "conf/emnlp/JiaoYSJCL0L20", "ArXiv": "1909.10351", "ACL": "2020.findings-emnlp.372", "DOI": "10.18653/v1/2020.findings-emnlp.372", "CorpusId": 202719327 }, "url": "https://www.semanticscholar.org/paper/0cbf97173391b0430140117027edcaf1a37968c7", "referenceCount": 57, "citationCount": 1595, "influentialCitationCount": 256, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "RoBERTa: A Robustly Optimized BERT Pretraining Approach", "abstract": "Language model pretraining has led to significant performance gains but careful comparison between different approaches is challenging. Training is computationally expensive, often done on private datasets of different sizes, and, as we will show, hyperparameter choices have significant impact on the final results. We present a replication study of BERT pretraining (Devlin et al., 2019) that carefully measures the impact of many key hyperparameters and training data size. We find that BERT was significantly undertrained, and can match or exceed the performance of every model published after it. Our best model achieves state-of-the-art results on GLUE, RACE and SQuAD. These results highlight the importance of previously overlooked design choices, and raise questions about the source of recently reported improvements. We release our models and code.", "year": 2019, "venue": "arXiv.org", "authors": [ "Yinhan Liu", "Myle Ott", "Naman Goyal", "Jingfei Du", "Mandar Joshi", "Danqi Chen", "Omer Levy", "M. Lewis", "Luke Zettlemoyer", "Veselin Stoyanov" ], "externalIds": { "DBLP": "journals/corr/abs-1907-11692", "MAG": "2965373594", "ArXiv": "1907.11692", "CorpusId": 198953378 }, "url": "https://www.semanticscholar.org/paper/077f8329a7b6fa3b7c877a57b81eb6c18b5f87de", "referenceCount": 68, "citationCount": 20963, "influentialCitationCount": 4860, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Augmenting Data with Mixup for Sentence Classification: An Empirical Study", "abstract": "Mixup, a recent proposed data augmentation method through linearly interpolating inputs and modeling targets of random samples, has demonstrated its capability of significantly improving the predictive accuracy of the state-of-the-art networks for image classification. However, how this technique can be applied to and what is its effectiveness on natural language processing (NLP) tasks have not been investigated. In this paper, we propose two strategies for the adaption of Mixup on sentence classification: one performs interpolation on word embeddings and another on sentence embeddings. We conduct experiments to evaluate our methods using several benchmark datasets. Our studies show that such interpolation strategies serve as an effective, domain independent data augmentation approach for sentence classification, and can result in significant accuracy improvement for both CNN and LSTM models.", "year": 2019, "venue": "arXiv.org", "authors": [ "Hongyu Guo", "Yongyi Mao", "Richong Zhang" ], "externalIds": { "MAG": "2946411231", "DBLP": "journals/corr/abs-1905-08941", "ArXiv": "1905.08941", "CorpusId": 162168620 }, "url": "https://www.semanticscholar.org/paper/e601c09867dfbee176333067b2e79b8548e993a9", "referenceCount": 25, "citationCount": 218, "influentialCitationCount": 26, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Combating Adversarial Misspellings with Robust Word Recognition", "abstract": "To combat adversarial spelling mistakes, we propose placing a word recognition model in front of the downstream classifier. Our word recognition models build upon the RNN semi-character architecture, introducing several new backoff strategies for handling rare and unseen words. Trained to recognize words corrupted by random adds, drops, swaps, and keyboard mistakes, our method achieves 32% relative (and 3.3% absolute) error reduction over the vanilla semi-character model. Notably, our pipeline confers robustness on the downstream classifier, outperforming both adversarial training and off-the-shelf spell checkers. Against a BERT model fine-tuned for sentiment analysis, a single adversarially-chosen character attack lowers accuracy from 90.3% to 45.8%. Our defense restores accuracy to 75%. Surprisingly, better word recognition does not always entail greater robustness. Our analysis reveals that robustness also depends upon a quantity that we denote the sensitivity.", "year": 2019, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Danish Pruthi", "Bhuwan Dhingra", "Zachary Chase Lipton" ], "externalIds": { "MAG": "2952872637", "DBLP": "conf/acl/PruthiDL19", "ACL": "P19-1561", "ArXiv": "1905.11268", "DOI": "10.18653/v1/P19-1561", "CorpusId": 166228669 }, "url": "https://www.semanticscholar.org/paper/162515d87256f13888d9d7ba95275ac4b6c35396", "referenceCount": 30, "citationCount": 283, "influentialCitationCount": 42, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Unsupervised Data Augmentation for Consistency Training", "abstract": "Semi-supervised learning lately has shown much promise in improving deep learning models when labeled data is scarce. Common among recent approaches is the use of consistency training on a large amount of unlabeled data to constrain model predictions to be invariant to input noise. In this work, we present a new perspective on how to effectively noise unlabeled examples and argue that the quality of noising, specifically those produced by advanced data augmentation methods, plays a crucial role in semi-supervised learning. By substituting simple noising operations with advanced data augmentation methods such as RandAugment and back-translation, our method brings substantial improvements across six language and three vision tasks under the same consistency training framework. On the IMDb text classification dataset, with only 20 labeled examples, our method achieves an error rate of 4.20, outperforming the state-of-the-art model trained on 25,000 labeled examples. On a standard semi-supervised learning benchmark, CIFAR-10, our method outperforms all previous approaches and achieves an error rate of 5.43 with only 250 examples. Our method also combines well with transfer learning, e.g., when finetuning from BERT, and yields improvements in high-data regime, such as ImageNet, whether when there is only 10% labeled data or when a full labeled set with 1.3M extra unlabeled examples is used. Code is available at this https URL.", "year": 2019, "venue": "Neural Information Processing Systems", "authors": [ "Qizhe Xie", "Zihang Dai", "E. Hovy", "Minh-Thang Luong", "Quoc V. Le" ], "externalIds": { "MAG": "2962369866", "DBLP": "conf/nips/XieDHL020", "ArXiv": "1904.12848", "CorpusId": 195873898 }, "url": "https://www.semanticscholar.org/paper/0feea94f89d395436bf41bd10c797447eecbc128", "referenceCount": 84, "citationCount": 2066, "influentialCitationCount": 286, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Meta-Weight-Net: Learning an Explicit Mapping For Sample Weighting", "abstract": "Current deep neural networks (DNNs) can easily overfit to biased training data with corrupted labels or class imbalance. Sample re-weighting strategy is commonly used to alleviate this issue by designing a weighting function mapping from training loss to sample weight, and then iterating between weight recalculating and classifier updating. Current approaches, however, need manually pre-specify the weighting function as well as its additional hyper-parameters. It makes them fairly hard to be generally applied in practice due to the significant variation of proper weighting schemes relying on the investigated problem and training data. To address this issue, we propose a method capable of adaptively learning an explicit weighting function directly from data. The weighting function is an MLP with one hidden layer, constituting a universal approximator to almost any continuous functions, making the method able to fit a wide range of weighting functions including those assumed in conventional research. Guided by a small amount of unbiased meta-data, the parameters of the weighting function can be finely updated simultaneously with the learning process of the classifiers. Synthetic and real experiments substantiate the capability of our method for achieving proper weighting functions in class imbalance and noisy label cases, fully complying with the common settings in traditional methods, and more complicated scenarios beyond conventional cases. This naturally leads to its better accuracy than other state-of-the-art methods.", "year": 2019, "venue": "Neural Information Processing Systems", "authors": [ "Jun Shu", "Qi Xie", "Lixuan Yi", "Qian Zhao", "Sanping Zhou", "Zongben Xu", "Deyu Meng" ], "externalIds": { "MAG": "2947380870", "DBLP": "conf/nips/ShuXY0ZXM19", "CorpusId": 173188221 }, "url": "https://www.semanticscholar.org/paper/4c909ca74217234831bf2900aa83a4761823f2b1", "referenceCount": 73, "citationCount": 729, "influentialCitationCount": 110, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "EDA: Easy Data Augmentation Techniques for Boosting Performance on Text Classification Tasks", "abstract": "We present EDA: easy data augmentation techniques for boosting performance on text classification tasks. EDA consists of four simple but powerful operations: synonym replacement, random insertion, random swap, and random deletion. On five text classification tasks, we show that EDA improves performance for both convolutional and recurrent neural networks. EDA demonstrates particularly strong results for smaller datasets; on average, across five datasets, training with EDA while using only 50% of the available training set achieved the same accuracy as normal training with all available data. We also performed extensive ablation studies and suggest parameters for practical use.", "year": 2019, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Jason Wei", "Kai Zou" ], "externalIds": { "ArXiv": "1901.11196", "MAG": "2911588830", "DBLP": "conf/emnlp/WeiZ19", "ACL": "D19-1670", "DOI": "10.18653/v1/D19-1670", "CorpusId": 59523656 }, "url": "https://www.semanticscholar.org/paper/162cad5df347bdac469331df540440b320b5aa21", "referenceCount": 36, "citationCount": 1718, "influentialCitationCount": 265, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Conditional BERT Contextual Augmentation", "abstract": null, "year": 2018, "venue": "International Conference on Conceptual Structures", "authors": [ "Xing Wu", "Shangwen Lv", "Liangjun Zang", "Jizhong Han", "Songlin Hu" ], "externalIds": { "DBLP": "conf/iccS/WuLZHH19", "MAG": "2956130159", "ArXiv": "1812.06705", "DOI": "10.1007/978-3-030-22747-0_7", "CorpusId": 56482333 }, "url": "https://www.semanticscholar.org/paper/188024469a2443f262b3cbb5c5d4a96851949d68", "referenceCount": 42, "citationCount": 290, "influentialCitationCount": 39, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "TextBugger: Generating Adversarial Text Against Real-world Applications", "abstract": "Deep Learning-based Text Understanding (DLTU) is the backbone technique behind various applications, including question answering, machine translation, and text classification. Despite its tremendous popularity, the security vulnerabilities of DLTU are still largely unknown, which is highly concerning given its increasing use in security-sensitive applications such as sentiment analysis and toxic content detection. In this paper, we show that DLTU is inherently vulnerable to adversarial text attacks, in which maliciously crafted texts trigger target DLTU systems and services to misbehave. Specifically, we present TextBugger, a general attack framework for generating adversarial texts. In contrast to prior works, TextBugger differs in significant ways: (i) effective -- it outperforms state-of-the-art attacks in terms of attack success rate; (ii) evasive -- it preserves the utility of benign text, with 94.9\\% of the adversarial text correctly recognized by human readers; and (iii) efficient -- it generates adversarial text with computational complexity sub-linear to the text length. We empirically evaluate TextBugger on a set of real-world DLTU systems and services used for sentiment analysis and toxic content detection, demonstrating its effectiveness, evasiveness, and efficiency. For instance, TextBugger achieves 100\\% success rate on the IMDB dataset based on Amazon AWS Comprehend within 4.61 seconds and preserves 97\\% semantic similarity. We further discuss possible defense mechanisms to mitigate such attack and the adversary's potential countermeasures, which leads to promising directions for further research.", "year": 2018, "venue": "Network and Distributed System Security Symposium", "authors": [ "Jinfeng Li", "S. Ji", "Tianyu Du", "Bo Li", "Ting Wang" ], "externalIds": { "MAG": "2963859254", "ArXiv": "1812.05271", "DBLP": "journals/corr/abs-1812-05271", "DOI": "10.14722/ndss.2019.23138", "CorpusId": 54815878 }, "url": "https://www.semanticscholar.org/paper/f91175950edf3804ff1573f570b03db9b108dece", "referenceCount": 40, "citationCount": 653, "influentialCitationCount": 119, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Text Data Augmentation Made Simple By Leveraging NLP Cloud APIs", "abstract": "In practice, it is common to find oneself with far too little text data to train a deep neural network. This \"Big Data Wall\" represents a challenge for minority language communities on the Internet, organizations, laboratories and companies that compete the GAFAM (Google, Amazon, Facebook, Apple, Microsoft). While most of the research effort in text data augmentation aims on the long-term goal of finding end-to-end learning solutions, which is equivalent to \"using neural networks to feed neural networks\", this engineering work focuses on the use of practical, robust, scalable and easy-to-implement data augmentation pre-processing techniques similar to those that are successful in computer vision. Several text augmentation techniques have been experimented. Some existing ones have been tested for comparison purposes such as noise injection or the use of regular expressions. Others are modified or improved techniques like lexical replacement. Finally more innovative ones, such as the generation of paraphrases using back-translation or by the transformation of syntactic trees, are based on robust, scalable, and easy-to-use NLP Cloud APIs. All the text augmentation techniques studied, with an amplification factor of only 5, increased the accuracy of the results in a range of 4.3% to 21.6%, with significant statistical fluctuations, on a standardized task of text polarity prediction. Some standard deep neural network architectures were tested: the multilayer perceptron (MLP), the long short-term memory recurrent network (LSTM) and the bidirectional LSTM (biLSTM). Classical XGBoost algorithm has been tested with up to 2.5% improvements.", "year": 2018, "venue": "arXiv.org", "authors": [ "Claude Coulombe" ], "externalIds": { "MAG": "2903745629", "ArXiv": "1812.04718", "DBLP": "journals/corr/abs-1812-04718", "CorpusId": 54481373 }, "url": "https://www.semanticscholar.org/paper/982aa0ee48a5fd228fb9fb3b3edd319b8af6f76d", "referenceCount": 52, "citationCount": 101, "influentialCitationCount": 6, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DARTS: Differentiable Architecture Search", "abstract": "This paper addresses the scalability challenge of architecture search by formulating the task in a differentiable manner. Unlike conventional approaches of applying evolution or reinforcement learning over a discrete and non-differentiable search space, our method is based on the continuous relaxation of the architecture representation, allowing efficient search of the architecture using gradient descent. Extensive experiments on CIFAR-10, ImageNet, Penn Treebank and WikiText-2 show that our algorithm excels in discovering high-performance convolutional architectures for image classification and recurrent architectures for language modeling, while being orders of magnitude faster than state-of-the-art non-differentiable techniques. Our implementation has been made publicly available to facilitate further research on efficient architecture search algorithms.", "year": 2018, "venue": "International Conference on Learning Representations", "authors": [ "Hanxiao Liu", "K. Simonyan", "Yiming Yang" ], "externalIds": { "DBLP": "journals/corr/abs-1806-09055", "MAG": "2810075754", "ArXiv": "1806.09055", "CorpusId": 49411844 }, "url": "https://www.semanticscholar.org/paper/c1f457e31b611da727f9aef76c283a18157dfa83", "referenceCount": 48, "citationCount": 3991, "influentialCitationCount": 1238, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "AutoAugment: Learning Augmentation Policies from Data", "abstract": "Data augmentation is an effective technique for improving the accuracy of modern image classifiers. However, current data augmentation implementations are manually designed. In this paper, we describe a simple procedure called AutoAugment to automatically search for improved data augmentation policies. In our implementation, we have designed a search space where a policy consists of many sub-policies, one of which is randomly chosen for each image in each mini-batch. A sub-policy consists of two operations, each operation being an image processing function such as translation, rotation, or shearing, and the probabilities and magnitudes with which the functions are applied. We use a search algorithm to find the best policy such that the neural network yields the highest validation accuracy on a target dataset. Our method achieves state-of-the-art accuracy on CIFAR-10, CIFAR-100, SVHN, and ImageNet (without additional data). On ImageNet, we attain a Top-1 accuracy of 83.5% which is 0.4% better than the previous record of 83.1%. On CIFAR-10, we achieve an error rate of 1.5%, which is 0.6% better than the previous state-of-the-art. Augmentation policies we find are transferable between datasets. The policy learned on ImageNet transfers well to achieve significant improvements on other datasets, such as Oxford Flowers, Caltech-101, Oxford-IIT Pets, FGVC Aircraft, and Stanford Cars.", "year": 2018, "venue": "arXiv.org", "authors": [ "E. D. Cubuk", "Barret Zoph", "Dandelion Mané", "Vijay Vasudevan", "Quoc V. Le" ], "externalIds": { "ArXiv": "1805.09501", "MAG": "2804047946", "DBLP": "journals/corr/abs-1805-09501", "CorpusId": 43928340 }, "url": "https://www.semanticscholar.org/paper/f723eb3e7159f07b97464c8d947d15e78612abe4", "referenceCount": 78, "citationCount": 1637, "influentialCitationCount": 201, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Generating Natural Language Adversarial Examples", "abstract": "Deep neural networks (DNNs) are vulnerable to adversarial examples, perturbations to correctly classified examples which can cause the model to misclassify. In the image domain, these perturbations can often be made virtually indistinguishable to human perception, causing humans and state-of-the-art models to disagree. However, in the natural language domain, small perturbations are clearly perceptible, and the replacement of a single word can drastically alter the semantics of the document. Given these challenges, we use a black-box population-based optimization algorithm to generate semantically and syntactically similar adversarial examples that fool well-trained sentiment analysis and textual entailment models with success rates of 97% and 70%, respectively. We additionally demonstrate that 92.3% of the successful sentiment analysis adversarial examples are classified to their original label by 20 human annotators, and that the examples are perceptibly quite similar. Finally, we discuss an attempt to use adversarial training as a defense, but fail to yield improvement, demonstrating the strength and diversity of our adversarial examples. We hope our findings encourage researchers to pursue improving the robustness of DNNs in the natural language domain.", "year": 2018, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "M. Alzantot", "Yash Sharma", "Ahmed Elgohary", "Bo-Jhang Ho", "M. Srivastava", "Kai-Wei Chang" ], "externalIds": { "MAG": "2798966449", "ArXiv": "1804.07998", "ACL": "D18-1316", "DBLP": "journals/corr/abs-1804-07998", "DOI": "10.18653/v1/D18-1316", "CorpusId": 5076191 }, "url": "https://www.semanticscholar.org/paper/c68fbc1f4aa72d30974f8a3071054e3b227137fd", "referenceCount": 26, "citationCount": 863, "influentialCitationCount": 133, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "GLUE: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding", "abstract": "Human ability to understand language is general, flexible, and robust. In contrast, most NLU models above the word level are designed for a specific task and struggle with out-of-domain data. If we aspire to develop models with understanding beyond the detection of superficial correspondences between inputs and outputs, then it is critical to develop a unified model that can execute a range of linguistic tasks across different domains. To facilitate research in this direction, we present the General Language Understanding Evaluation (GLUE, gluebenchmark.com): a benchmark of nine diverse NLU tasks, an auxiliary dataset for probing models for understanding of specific linguistic phenomena, and an online platform for evaluating and comparing models. For some benchmark tasks, training data is plentiful, but for others it is limited or does not match the genre of the test set. GLUE thus favors models that can represent linguistic knowledge in a way that facilitates sample-efficient learning and effective knowledge-transfer across tasks. While none of the datasets in GLUE were created from scratch for the benchmark, four of them feature privately-held test data, which is used to ensure that the benchmark is used fairly. We evaluate baselines that use ELMo (Peters et al., 2018), a powerful transfer learning technique, as well as state-of-the-art sentence representation models. The best models still achieve fairly low absolute scores. Analysis with our diagnostic dataset yields similarly weak performance over all phenomena tested, with some exceptions.", "year": 2018, "venue": "BlackboxNLP@EMNLP", "authors": [ "Alex Wang", "Amanpreet Singh", "Julian Michael", "Felix Hill", "Omer Levy", "Samuel R. Bowman" ], "externalIds": { "MAG": "2963310665", "DBLP": "conf/emnlp/WangSMHLB18", "ACL": "W18-5446", "ArXiv": "1804.07461", "DOI": "10.18653/v1/W18-5446", "CorpusId": 5034059 }, "url": "https://www.semanticscholar.org/paper/451d4a16e425ecbf38c4b1cca0dcf5d9bec8255c", "referenceCount": 77, "citationCount": 6140, "influentialCitationCount": 1171, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning to Reweight Examples for Robust Deep Learning", "abstract": "Deep neural networks have been shown to be very powerful modeling tools for many supervised learning tasks involving complex input patterns. However, they can also easily overfit to training set biases and label noises. In addition to various regularizers, example reweighting algorithms are popular solutions to these problems, but they require careful tuning of additional hyperparameters, such as example mining schedules and regularization hyperparameters. In contrast to past reweighting methods, which typically consist of functions of the cost value of each example, in this work we propose a novel meta-learning algorithm that learns to assign weights to training examples based on their gradient directions. To determine the example weights, our method performs a meta gradient descent step on the current mini-batch example weights (which are initialized from zero) to minimize the loss on a clean unbiased validation set. Our proposed method can be easily implemented on any type of deep network, does not require any additional hyperparameter tuning, and achieves impressive performance on class imbalance and corrupted label problems where only a small amount of clean validation data is available.", "year": 2018, "venue": "International Conference on Machine Learning", "authors": [ "Mengye Ren", "Wenyuan Zeng", "Binh Yang", "R. Urtasun" ], "externalIds": { "MAG": "2795282075", "ArXiv": "1803.09050", "DBLP": "conf/icml/RenZYU18", "CorpusId": 4321928 }, "url": "https://www.semanticscholar.org/paper/c5420ef59d7508d82e53671b0d623027eb58e6ed", "referenceCount": 46, "citationCount": 1319, "influentialCitationCount": 165, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Black-Box Generation of Adversarial Text Sequences to Evade Deep Learning Classifiers", "abstract": "Although various techniques have been proposed to generate adversarial samples for white-box attacks on text, little attention has been paid to a black-box attack, which is a more realistic scenario. In this paper, we present a novel algorithm, DeepWordBug, to effectively generate small text perturbations in a black-box setting that forces a deep-learning classifier to misclassify a text input. We develop novel scoring strategies to find the most important words to modify such that the deep classifier makes a wrong prediction. Simple character-level transformations are applied to the highest-ranked words in order to minimize the edit distance of the perturbation. We evaluated DeepWordBug on two real-world text datasets: Enron spam emails and IMDB movie reviews. Our experimental results indicate that DeepWordBug can reduce the classification accuracy from 99% to 40% on Enron and from 87% to 26% on IMDB. Our results strongly demonstrate that the generated adversarial sequences from a deep-learning model can similarly evade other deep models.", "year": 2018, "venue": "2018 IEEE Security and Privacy Workshops (SPW)", "authors": [ "Ji Gao", "Jack Lanchantin", "M. Soffa", "Yanjun Qi" ], "externalIds": { "MAG": "2783097478", "ArXiv": "1801.04354", "DBLP": "conf/sp/GaoLSQ18", "DOI": "10.1109/SPW.2018.00016", "CorpusId": 4858173 }, "url": "https://www.semanticscholar.org/paper/fa12574c228542151ccd7d4e3f42cc4896cd274a", "referenceCount": 33, "citationCount": 632, "influentialCitationCount": 104, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The Effectiveness of Data Augmentation in Image Classification using Deep Learning", "abstract": "In this paper, we explore and compare multiple solutions to the problem of data augmentation in image classification. Previous work has demonstrated the effectiveness of data augmentation through simple techniques, such as cropping, rotating, and flipping input images. We artificially constrain our access to data to a small subset of the ImageNet dataset, and compare each data augmentation technique in turn. One of the more successful data augmentations strategies is the traditional transformations mentioned above. We also experiment with GANs to generate images of different styles. Finally, we propose a method to allow a neural net to learn augmentations that best improve the classifier, which we call neural augmentation. We discuss the successes and shortcomings of this method on various datasets.", "year": 2017, "venue": "arXiv.org", "authors": [ "Luis Perez", "Jason Wang" ], "externalIds": { "MAG": "2775795276", "ArXiv": "1712.04621", "DBLP": "journals/corr/abs-1712-04621", "CorpusId": 12219403 }, "url": "https://www.semanticscholar.org/paper/1f6c3f1def78919f06efe050e9403e85d5fa3ac9", "referenceCount": 18, "citationCount": 2564, "influentialCitationCount": 54, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Data Noising as Smoothing in Neural Network Language Models", "abstract": "Data noising is an effective technique for regularizing neural network models. While noising is widely adopted in application domains such as vision and speech, commonly used noising primitives have not been developed for discrete sequence-level settings such as language modeling. In this paper, we derive a connection between input noising in neural network language models and smoothing in $n$-gram models. Using this connection, we draw upon ideas from smoothing to develop effective noising schemes. We demonstrate performance gains when applying the proposed schemes to language modeling and machine translation. Finally, we provide empirical analysis validating the relationship between noising and smoothing.", "year": 2017, "venue": "International Conference on Learning Representations", "authors": [ "Ziang Xie", "Sida I. Wang", "Jiwei Li", "Daniel Lévy", "Allen Nie", "Dan Jurafsky", "A. Ng" ], "externalIds": { "DBLP": "journals/corr/XieWLLNJN17", "ArXiv": "1703.02573", "MAG": "2963443335", "CorpusId": 10635893 }, "url": "https://www.semanticscholar.org/paper/2d5069a99bfa0b47c095bbb5cefd6dba974f72a7", "referenceCount": 37, "citationCount": 230, "influentialCitationCount": 18, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Human-level concept learning through probabilistic program induction", "abstract": "Handwritten characters drawn by a model Not only do children learn effortlessly, they do so quickly and with a remarkable ability to use what they have learned as the raw material for creating new stuff. Lake et al. describe a computational model that learns in a similar fashion and does so better than current deep learning algorithms. The model classifies, parses, and recreates handwritten characters, and can generate new letters of the alphabet that look “right” as judged by Turing-like tests of the model's output in comparison to what real humans produce. Science, this issue p. 1332 Combining the capacity to handle noise with probabilistic learning yields humanlike performance in a computational model. People learning new concepts can often generalize successfully from just a single example, yet machine learning algorithms typically require tens or hundreds of examples to perform with similar accuracy. People can also use learned concepts in richer ways than conventional algorithms—for action, imagination, and explanation. We present a computational model that captures these human learning abilities for a large class of simple visual concepts: handwritten characters from the world’s alphabets. The model represents concepts as simple programs that best explain observed examples under a Bayesian criterion. On a challenging one-shot classification task, the model achieves human-level performance while outperforming recent deep learning approaches. We also present several “visual Turing tests” probing the model’s creative generalization abilities, which in many cases are indistinguishable from human behavior.", "year": 2015, "venue": "Science", "authors": [ "B. Lake", "R. Salakhutdinov", "J. Tenenbaum" ], "externalIds": { "MAG": "2341708432", "DOI": "10.1126/science.aab3050", "CorpusId": 11790493, "PubMed": "26659050" }, "url": "https://www.semanticscholar.org/paper/815c84ab906e43f3e6322f2ca3fd5e1360c64285", "referenceCount": 97, "citationCount": 2862, "influentialCitationCount": 276, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Improving Neural Machine Translation Models with Monolingual Data", "abstract": "Neural Machine Translation (NMT) has obtained state-of-the art performance for several language pairs, while only using parallel data for training. Target-side monolingual data plays an important role in boosting fluency for phrase-based statistical machine translation, and we investigate the use of monolingual data for NMT. In contrast to previous work, which combines NMT models with separately trained language models, we note that encoder-decoder NMT architectures already have the capacity to learn the same information as a language model, and we explore strategies to train with monolingual data without changing the neural network architecture. By pairing monolingual training data with an automatic back-translation, we can treat it as additional parallel training data, and we obtain substantial improvements on the WMT 15 task English German (+2.8-3.7 BLEU), and for the low-resourced IWSLT 14 task Turkish->English (+2.1-3.4 BLEU), obtaining new state-of-the-art results. We also show that fine-tuning on in-domain monolingual and parallel data gives substantial improvements for the IWSLT 15 task English->German.", "year": 2015, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Rico Sennrich", "B. Haddow", "Alexandra Birch" ], "externalIds": { "ArXiv": "1511.06709", "MAG": "2284660317", "ACL": "P16-1009", "DBLP": "journals/corr/SennrichHB15a", "DOI": "10.18653/v1/P16-1009", "CorpusId": 15600925 }, "url": "https://www.semanticscholar.org/paper/f3b96ef2dc1fc5e14982f1b963db8db6a54183bb", "referenceCount": 31, "citationCount": 2553, "influentialCitationCount": 456, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "That’s So Annoying!!!: A Lexical and Frame-Semantic Embedding Based Data Augmentation Approach to Automatic Categorization of Annoying Behaviors using #petpeeve Tweets", "abstract": "We propose a novel data augmentation approach to enhance computational behavioral analysis using social media text. In particular, we collect a Twitter corpus of the descriptions of annoying behaviors using the #petpeeve hashtags. In the qualitative analysis, we study the language use in these tweets, with a special focus on the fine-grained categories and the geographic variation of the language. In quantitative analysis, we show that lexical and syntactic features are useful for automatic categorization of annoying behaviors, and frame-semantic features further boost the performance; that leveraging large lexical embeddings to create additional training instances significantly improves the lexical model; and incorporating frame-semantic embedding achieves the best overall performance.", "year": 2015, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "William Yang Wang", "Diyi Yang" ], "externalIds": { "ACL": "D15-1306", "MAG": "2251658415", "DBLP": "conf/emnlp/WangY15", "DOI": "10.18653/v1/D15-1306", "CorpusId": 3257353 }, "url": "https://www.semanticscholar.org/paper/0d13dae976c95853039395d8544b7cd31987783f", "referenceCount": 44, "citationCount": 280, "influentialCitationCount": 14, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Convolutional Neural Networks for Sentence Classification", "abstract": "We report on a series of experiments with convolutional neural networks (CNN) trained on top of pre-trained word vectors for sentence-level classification tasks. We show that a simple CNN with little hyperparameter tuning and static vectors achieves excellent results on multiple benchmarks. Learning task-specific vectors through fine-tuning offers further gains in performance. We additionally propose a simple modification to the architecture to allow for the use of both task-specific and static vectors. The CNN models discussed herein improve upon the state of the art on 4 out of 7 tasks, which include sentiment analysis and question classification.", "year": 2014, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Yoon Kim" ], "externalIds": { "DBLP": "journals/corr/Kim14f", "MAG": "1832693441", "ACL": "D14-1181", "ArXiv": "1408.5882", "DOI": "10.3115/v1/D14-1181", "CorpusId": 9672033 }, "url": "https://www.semanticscholar.org/paper/1f6ba0782862ec12a5ec6d7fb608523d55b0c6ba", "referenceCount": 33, "citationCount": 12850, "influentialCitationCount": 2059, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Recursive Deep Models for Semantic Compositionality Over a Sentiment Treebank", "abstract": "Semantic word spaces have been very useful but cannot express the meaning of longer phrases in a principled way. Further progress towards understanding compositionality in tasks such as sentiment detection requires richer supervised training and evaluation resources and more powerful models of composition. To remedy this, we introduce a Sentiment Treebank. It includes fine grained sentiment labels for 215,154 phrases in the parse trees of 11,855 sentences and presents new challenges for sentiment compositionality. To address them, we introduce the Recursive Neural Tensor Network. When trained on the new treebank, this model outperforms all previous methods on several metrics. It pushes the state of the art in single sentence positive/negative classification from 80% up to 85.4%. The accuracy of predicting fine-grained sentiment labels for all phrases reaches 80.7%, an improvement of 9.7% over bag of features baselines. Lastly, it is the only model that can accurately capture the effects of negation and its scope at various tree levels for both positive and negative phrases.", "year": 2013, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "R. Socher", "Alex Perelygin", "Jean Wu", "Jason Chuang", "Christopher D. Manning", "A. Ng", "Christopher Potts" ], "externalIds": { "ACL": "D13-1170", "DBLP": "conf/emnlp/SocherPWCMNP13", "MAG": "2251939518", "CorpusId": 990233 }, "url": "https://www.semanticscholar.org/paper/687bac2d3320083eb4530bf18bb8f8f721477600", "referenceCount": 45, "citationCount": 7795, "influentialCitationCount": 1276, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Perspective View and Survey of Meta-Learning", "abstract": null, "year": 2002, "venue": "Artificial Intelligence Review", "authors": [ "R. Vilalta", "Youssef Drissi" ], "externalIds": { "MAG": "2145680191", "DBLP": "journals/air/VilaltaD02", "DOI": "10.1023/A:1019956318069", "CorpusId": 12156084 }, "url": "https://www.semanticscholar.org/paper/98f8a0055bb28133efcff359a92937324d0e6f51", "referenceCount": 83, "citationCount": 1151, "influentialCitationCount": 29, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "WordNet: A Lexical Database for English", "abstract": "Because meaningful sentences are composed of meaningful words, any system that hopes to process natural languages as people do must have information about words and their meanings. This information is traditionally provided through dictionaries, and machine-readable dictionaries are now widely available. But dictionary entries evolved for the convenience of human readers, not for machines. WordNet1 provides a more effective combination of traditional lexicographic information and modern computing. WordNet is an online lexical database designed for use under program control. English nouns, verbs, adjectives, and adverbs are organized into sets of synonyms, each representing a lexicalized concept. Semantic relations link the synonym sets [4].", "year": 1995, "venue": "Human Language Technology - The Baltic Perspectiv", "authors": [ "G. Miller" ], "externalIds": { "DBLP": "conf/naacl/Miller93", "ACL": "H92-1116", "MAG": "2081580037", "DOI": "10.1145/219717.219748", "CorpusId": 1671874 }, "url": "https://www.semanticscholar.org/paper/68c03788224000794d5491ab459be0b2a2c38677", "referenceCount": 8, "citationCount": 16159, "influentialCitationCount": 2530, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "CERT: Contrastive Self-supervised Learning for Language Understanding", "abstract": "Pretrained language models such as BERT, GPT have shown great effectiveness in language understanding. The auxiliary predictive tasks in existing pretraining approaches are mostly defined on tokens, thus may not be able to capture sentence-level semantics very well. To address this issue, we propose CERT: Contrastive self-supervised Encoder Representations from Transformers, which pretrains language representation models using contrastive self-supervised learning at the sentence level. CERT creates augmentations of original sentences using back-translation. Then it finetunes a pretrained language encoder (e.g., BERT) by predicting whether two augmented sentences originate from the same sentence. CERT is simple to use and can be flexibly plugged into any pretraining-finetuning NLP pipeline. We evaluate CERT on 11 natural language understanding tasks in the GLUE benchmark where CERT outperforms BERT on 7 tasks, achieves the same performance as BERT on 2 tasks, and performs worse than BERT on 2 tasks. On the averaged score of the 11 tasks, CERT outperforms BERT. The data and code are available at https://github.com/UCSD-AI4H/CERT", "year": 2020, "venue": "arXiv.org", "authors": [ "Hongchao Fang", "Sicheng Wang", "Meng Zhou", "Jiayuan Ding", "P. Xie" ], "externalIds": { "DBLP": "journals/corr/abs-2005-12766", "ArXiv": "2005.12766", "MAG": "3026732421", "DOI": "10.36227/techrxiv.12308378", "CorpusId": 218889852 }, "url": "https://www.semanticscholar.org/paper/17d5884215b5afa53545cd7cb6135de5478da4ec", "referenceCount": 56, "citationCount": 308, "influentialCitationCount": 17, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Nlp augmentation", "abstract": null, "year": 2019, "venue": "https://github.com/makcedward/nlpaug, 2019.", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Language Models", "abstract": null, "year": 2009, "venue": "Encyclopedia of Database Systems", "authors": [ "Jordan L. Boyd-Graber", "Philipp Koehn" ], "externalIds": { "DBLP": "reference/db/Hiemstra18", "DOI": "10.1007/978-0-387-39940-9_923", "CorpusId": 10924669 }, "url": "https://www.semanticscholar.org/paper/70d6dfdc40c4681ba5d51d60116db0311b5126ce", "referenceCount": 27, "citationCount": 429, "influentialCitationCount": 32, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "How to write plain english: Let’s start with the formula", "abstract": null, "year": 1979, "venue": "University of Canterbury, 1979.", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "A Hybrid Quantum-Classical AI-Based Detection Strategy for Generative Adversarial Network-Based Deepfake Attacks on an Autonomous Vehicle Traffic Sign Classification System": { "paper_title": "A Hybrid Quantum-Classical AI-Based Detection Strategy for Generative Adversarial Network-Based Deepfake Attacks on an Autonomous Vehicle Traffic Sign Classification System", "arxiv_id": "2409.17311v1", "keyword": "deep learning", "authors": [ "M Sabbir Salek", "Shaozhi Li", "Mashrur Chowdhury" ], "references": [ { "title": "Quantum-inspired activation functions in the convolutional neural network", "abstract": "Driven by the significant advantages offered by quantum computing, research in quantum machine learning has increased in recent years. While quantum speed-up has been demonstrated in some applications of quantum machine learning, a comprehensive understanding of its underlying mechanisms for improved performance remains elusive. Our study fills this gap by examining the expressibility of quantum circuits integrated within a convolutional neural network (CNN). Through numerical training on the MNIST dataset, our hybrid quantum-classical CNN model exhibited superior feature selection capabilities and significantly reduced the required training steps compared to the classical CNN. To understand the root of this enhanced performance, we conducted an analytical investigation of the functional expressibility of quantum circuits and derived a quantum activation function. We demonstrated that this quantum activation is more efficient in selecting important features and discarding unimportant information of input images. These findings not only deepen our comprehension of quantum-enhanced machine-learning models but also advance the classical machine-learning technique by introducing the quantum-inspired activation function.", "year": 2024, "venue": "", "authors": [ "Shaozhi Li", "Sabbir Salek", "Yao Wang", "Mashrur Chowdhury" ], "externalIds": { "ArXiv": "2404.05901", "CorpusId": 269009987 }, "url": "https://www.semanticscholar.org/paper/2366c2470d028d9ac316283a14c59f0b1027ea6b", "referenceCount": 78, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Physics", "Mathematics" ] }, { "title": "Identity-Driven DeepFake Detection", "abstract": "DeepFake detection has so far been dominated by ``artifact-driven'' methods and the detection performance significantly degrades when either the type of image artifacts is unknown or the artifacts are simply too hard to find. In this work, we present an alternative approach: Identity-Driven DeepFake Detection. Our approach takes as input the suspect image/video as well as the target identity information (a reference image or video). We output a decision on whether the identity in the suspect image/video is the same as the target identity. Our motivation is to prevent the most common and harmful DeepFakes that spread false information of a targeted person. The identity-based approach is fundamentally different in that it does not attempt to detect image artifacts. Instead, it focuses on whether the identity in the suspect image/video is true. To facilitate research on identity-based detection, we present a new large scale dataset ``Vox-DeepFake\", in which each suspect content is associated with multiple reference images collected from videos of a target identity. We also present a simple identity-based detection algorithm called the OuterFace, which may serve as a baseline for further research. Even trained without fake videos, the OuterFace algorithm achieves superior detection accuracy and generalizes well to different DeepFake methods, and is robust with respect to video degradation techniques -- a performance not achievable with existing detection algorithms.", "year": 2020, "venue": "arXiv.org", "authors": [ "Xiaoyi Dong", "Jianmin Bao", "Dongdong Chen", "Weiming Zhang", "Nenghai Yu", "Dong Chen", "Fang Wen", "B. Guo" ], "externalIds": { "MAG": "3112657560", "DBLP": "journals/corr/abs-2012-03930", "ArXiv": "2012.03930", "CorpusId": 227344299 }, "url": "https://www.semanticscholar.org/paper/bb334069f6f76a029195932e7325718de525bb41", "referenceCount": 39, "citationCount": 26, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Face Forgery Detection by 3D Decomposition", "abstract": "Detecting digital face manipulation has attracted extensive attention due to fake media’s potential harms to the public. However, recent advances have been able to reduce the forgery signals to a low magnitude. Decomposition, which reversibly decomposes an image into several constituent elements, is a promising way to highlight the hidden forgery details. In this paper, we consider a face image as the production of the intervention of the underlying 3D geometry and the lighting environment, and decompose it in a computer graphics view. Specifically, by disentangling the face image into 3D shape, common texture, identity texture, ambient light, and direct light, we find the devil lies in the direct light and the identity texture. Based on this observation, we propose to utilize facial detail, which is the combination of direct light and identity texture, as the clue to detect the subtle forgery patterns. Besides, we highlight the manipulated region with a supervised attention mechanism and introduce a two-stream structure to exploit both face image and facial detail together as a multi-modality task. Extensive experiments indicate the effectiveness of the extra features extracted from the facial detail, and our method achieves the state-of-the-art performance.", "year": 2020, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Xiangyu Zhu", "Hao Wang", "Hongyan Fei", "Zhen Lei", "S. Li" ], "externalIds": { "DBLP": "journals/corr/abs-2011-09737", "MAG": "3101179759", "ArXiv": "2011.09737", "DOI": "10.1109/CVPR46437.2021.00295", "CorpusId": 227054386 }, "url": "https://www.semanticscholar.org/paper/0e95ab55fbccc38710599fa604ab980005e1894d", "referenceCount": 70, "citationCount": 82, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Spatio-temporal Features for Generalized Detection of Deepfake Videos", "abstract": "For deepfake detection, video-level detectors have not been explored as extensively as image-level detectors, which do not exploit temporal data. In this paper, we empirically show that existing approaches on image and sequence classifiers generalize poorly to new manipulation techniques. To this end, we propose spatio-temporal features, modeled by 3D CNNs, to extend the generalization capabilities to detect new sorts of deepfake videos. We show that spatial features learn distinct deepfake-method-specific attributes, while spatio-temporal features capture shared attributes between deepfake methods. We provide an in-depth analysis of how the sequential and spatio-temporal video encoders are utilizing temporal information using DFDC dataset arXiv:2006.07397. Thus, we unravel that our approach captures local spatio-temporal relations and inconsistencies in the deepfake videos while existing sequence encoders are indifferent to it. Through large scale experiments conducted on the FaceForensics++ arXiv:1901.08971 and Deeper Forensics arXiv:2001.03024 datasets, we show that our approach outperforms existing methods in terms of generalization capabilities.", "year": 2020, "venue": "arXiv.org", "authors": [ "Ipek Ganiyusufoglu", "L. Ngô", "N. Savov", "Sezer Karaoglu", "T. Gevers" ], "externalIds": { "MAG": "3093810527", "ArXiv": "2010.11844", "DBLP": "journals/corr/abs-2010-11844", "CorpusId": 225039780 }, "url": "https://www.semanticscholar.org/paper/c6b9e7f0d35690a30a41b090ddba33fd5fe4b8c6", "referenceCount": 35, "citationCount": 31, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Eyebrow Recognition for Identifying Deepfake Videos", "abstract": "Deepfake imagery that contains altered faces has become a threat to online content. Current anti-deepfake approaches usually do so by detecting image anomalies, such as visible artifacts or inconsistencies. However, with deepfake advances, these visual artifacts are becoming harder to detect. In this paper, we show that one can use biometric eyebrow matching as a tool to detect manipulated faces. Our method could provide an 0.88 AUC and 20.7% EER for deepfake detection when applied to the highest quality deepfake dataset, Celeb-DF.", "year": 2020, "venue": "Biometrics and Electronic Signatures", "authors": [ "H. Nguyen", "R. Derakhshani" ], "externalIds": { "MAG": "3093346400", "DBLP": "conf/biosig/NguyenD20", "CorpusId": 222112154 }, "url": "https://www.semanticscholar.org/paper/ae6442ff7f96a69148e37f2e0b0da48a424a32fd", "referenceCount": 18, "citationCount": 25, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DeepfakeStack: A Deep Ensemble-based Learning Technique for Deepfake Detection", "abstract": "Recent advances in technology have made the deep learning (DL) models available for use in a wide variety of novel applications; for example, generative adversarial network (GAN) models are capable of producing hyper-realistic images, speech, and even videos, such as the so-called “Deepfake” produced by GANs with manipulated audio and/or video clips, which are so realistic as to be indistinguishable from the real ones in human perception. Aside from innovative and legitimate applications, there are numerous nefarious or unlawful ways to use such counterfeit contents in propaganda, political campaigns, cybercrimes, extortion, etc. To meet the challenges posed by Deepfake multimedia, we propose a deep ensemble learning technique called DeepfakeStack for detecting such manipulated videos. The proposed technique combines a series of DL based state-of-art classification models and creates an improved composite classifier. Based on our experiments, it is shown that DeepfakeStack outperforms other classifiers by achieving an accuracy of 99.65% and AUROC of 1.0 score in detecting Deepfake. Therefore, our method provides a solid basis for building a Realtime Deepfake detector.", "year": 2020, "venue": "International Conference on Cyber Security and Cloud Computing", "authors": [ "M. Rana", "A. Sung" ], "externalIds": { "DBLP": "conf/cscloud/RanaS20", "MAG": "3080632971", "DOI": "10.1109/CSCloud-EdgeCom49738.2020.00021", "CorpusId": 221283584 }, "url": "https://www.semanticscholar.org/paper/3076cb5a2e336832d8adf8a75dcf4399075b642f", "referenceCount": 27, "citationCount": 63, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DeepFake Video Detection: A Time-Distributed Approach", "abstract": null, "year": 2020, "venue": "SN Computer Science", "authors": [ "Amritpal Singh", "Amanpreet Singh Saimbhi", "Navjot Singh", "Mamta Mittal" ], "externalIds": { "DBLP": "journals/sncs/SinghSSM20", "MAG": "3035925304", "DOI": "10.1007/s42979-020-00225-9", "CorpusId": 220795007 }, "url": "https://www.semanticscholar.org/paper/36e487a7025d97cfe492a0dd82ce6bf152619fb6", "referenceCount": 40, "citationCount": 29, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Forensics and Analysis of Deepfake Videos", "abstract": "The spread of smartphones with high quality digital cameras in combination with easy access to a myriad of software apps for recording, editing and sharing videos and digital images in combination with deep learning AI platforms has spawned a new phenomenon of faking videos known as Deepfake. We design and implement a deep-fake detection model with mouth features (DFT-MF), using deep learning approach to detect Deepfake videos by isolating, analyzing and verifying lip/mouth movement. Experiments conducted against datasets that contain both fake and real videos showed favorable classification performance for DFT-MF model especially when compared with other work in this area.", "year": 2020, "venue": "International Conference on Information, Communications and Signal Processing", "authors": [ "Mousa Tayseer Jafar", "M. Ababneh", "Mohammad Al-Zoube", "Ammar Elhassan" ], "externalIds": { "MAG": "3023887666", "DOI": "10.1109/ICICS49469.2020.239493", "CorpusId": 216588334 }, "url": "https://www.semanticscholar.org/paper/1192554856d9eca4f5eabe99ca1f9e6fdb340d2e", "referenceCount": 22, "citationCount": 61, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The Mapillary Traffic Sign Dataset for Detection and Classification on a Global Scale", "abstract": null, "year": 2019, "venue": "European Conference on Computer Vision", "authors": [ "C. Ertler", "Jerneja Mislej", "Tobias Ollmann", "L. Porzi", "Gerhard Neuhold", "Yubin Kuang" ], "externalIds": { "ArXiv": "1909.04422", "MAG": "3108907096", "DBLP": "conf/eccv/ErtlerMOPNK20", "DOI": "10.1007/978-3-030-58592-1_5", "CorpusId": 218538477 }, "url": "https://www.semanticscholar.org/paper/4286427a685ddea72aec0afa6f8bdbd714765cd4", "referenceCount": 33, "citationCount": 62, "influentialCitationCount": 11, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The Emergence of Deepfake Technology: A Review", "abstract": "/", "year": 2019, "venue": "Technology Innovation Management Review", "authors": [ "M. Westerlund" ], "externalIds": { "MAG": "2989851933", "DOI": "10.22215/timreview/1282", "CorpusId": 214014129 }, "url": "https://www.semanticscholar.org/paper/17734113f254a64b3bae312713edba3b1e34fb56", "referenceCount": 29, "citationCount": 499, "influentialCitationCount": 24, "isOpenAccess": true, "fieldsOfStudy": [ "Sociology" ] }, { "title": "MesoNet: a Compact Facial Video Forgery Detection Network", "abstract": "This paper presents a method to automatically and efficiently detect face tampering in videos, and particularly focuses on two recent techniques used to generate hyper-realistic forged videos: Deepfake and Face2Face. Traditional image forensics techniques are usually not well suited to videos due to the compression that strongly degrades the data. Thus, this paper follows a deep learning approach and presents two networks, both with a low number of layers to focus on the mesoscopic properties of images. We evaluate those fast networks on both an existing dataset and a dataset we have constituted from online videos. The tests demonstrate a very successful detection rate with more than 98% for Deepfake and 95% for Face2Face.", "year": 2018, "venue": "International Workshop on Information Forensics and Security", "authors": [ "Darius Afchar", "Vincent Nozick", "J. Yamagishi", "I. Echizen" ], "externalIds": { "DBLP": "conf/wifs/AfcharNYE18", "ArXiv": "1809.00888", "MAG": "2952515199", "DOI": "10.1109/WIFS.2018.8630761", "CorpusId": 52157475 }, "url": "https://www.semanticscholar.org/paper/4258fd74a8bbb03b0997e76ba8ab6684b161f9d3", "referenceCount": 29, "citationCount": 1048, "influentialCitationCount": 194, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Autonomous vehicle perception: The technology of today and tomorrow", "abstract": null, "year": 2018, "venue": "", "authors": [ "Jessica Van Brummelen", "Marie O'Brien", "D. Gruyer", "H. Najjaran" ], "externalIds": { "MAG": "2792919579", "DOI": "10.1016/J.TRC.2018.02.012", "CorpusId": 115843016 }, "url": "https://www.semanticscholar.org/paper/05507c5f462c60b4802e30810e2c68f1e0d34b37", "referenceCount": 88, "citationCount": 560, "influentialCitationCount": 20, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Improved Training of Wasserstein GANs", "abstract": "Generative Adversarial Networks (GANs) are powerful generative models, but suffer from training instability. The recently proposed Wasserstein GAN (WGAN) makes progress toward stable training of GANs, but sometimes can still generate only low-quality samples or fail to converge. We find that these problems are often due to the use of weight clipping in WGAN to enforce a Lipschitz constraint on the critic, which can lead to undesired behavior. We propose an alternative to clipping weights: penalize the norm of gradient of the critic with respect to its input. Our proposed method performs better than standard WGAN and enables stable training of a wide variety of GAN architectures with almost no hyperparameter tuning, including 101-layer ResNets and language models over discrete data. We also achieve high quality generations on CIFAR-10 and LSUN bedrooms.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "Ishaan Gulrajani", "Faruk Ahmed", "Martín Arjovsky", "Vincent Dumoulin", "Aaron C. Courville" ], "externalIds": { "ArXiv": "1704.00028", "MAG": "2605135824", "DBLP": "conf/nips/GulrajaniAADC17", "CorpusId": 10894094 }, "url": "https://www.semanticscholar.org/paper/edf73ab12595c6709f646f542a0d2b33eb20a3f4", "referenceCount": 37, "citationCount": 8758, "influentialCitationCount": 1421, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Face2Face: Real-Time Face Capture and Reenactment of RGB Videos", "abstract": "We present a novel approach for real-time facial reenactment of a monocular target video sequence (e.g., Youtube video). The source sequence is also a monocular video stream, captured live with a commodity webcam. Our goal is to animate the facial expressions of the target video by a source actor and re-render the manipulated output video in a photo-realistic fashion. To this end, we first address the under-constrained problem of facial identity recovery from monocular video by non-rigid model-based bundling. At run time, we track facial expressions of both source and target video using a dense photometric consistency measure. Reenactment is then achieved by fast and efficient deformation transfer between source and target. The mouth interior that best matches the re-targeted expression is retrieved from the target sequence and warped to produce an accurate fit. Finally, we convincingly re-render the synthesized target face on top of the corresponding video stream such that it seamlessly blends with the real-world illumination. We demonstrate our method in a live setup, where Youtube videos are reenacted in real time.", "year": 2016, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Justus Thies", "M. Zollhöfer", "M. Stamminger", "C. Theobalt", "M. Nießner" ], "externalIds": { "MAG": "3046121507", "DBLP": "conf/cvpr/ThiesZSTN16", "ArXiv": "2007.14808", "DOI": "10.1145/3292039", "CorpusId": 52858569 }, "url": "https://www.semanticscholar.org/paper/ba11b4feb04a472cb5e5962697ed6faa653dc647", "referenceCount": 43, "citationCount": 1761, "influentialCitationCount": 207, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep Residual Learning for Image Recognition", "abstract": "Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. We provide comprehensive empirical evidence showing that these residual networks are easier to optimize, and can gain accuracy from considerably increased depth. On the ImageNet dataset we evaluate residual nets with a depth of up to 152 layers - 8× deeper than VGG nets [40] but still having lower complexity. An ensemble of these residual nets achieves 3.57% error on the ImageNet test set. This result won the 1st place on the ILSVRC 2015 classification task. We also present analysis on CIFAR-10 with 100 and 1000 layers. The depth of representations is of central importance for many visual recognition tasks. Solely due to our extremely deep representations, we obtain a 28% relative improvement on the COCO object detection dataset. Deep residual nets are foundations of our submissions to ILSVRC & COCO 2015 competitions1, where we also won the 1st places on the tasks of ImageNet detection, ImageNet localization, COCO detection, and COCO segmentation.", "year": 2015, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Kaiming He", "X. Zhang", "Shaoqing Ren", "Jian Sun" ], "externalIds": { "DBLP": "conf/cvpr/HeZRS16", "MAG": "2949650786", "ArXiv": "1512.03385", "DOI": "10.1109/cvpr.2016.90", "CorpusId": 206594692 }, "url": "https://www.semanticscholar.org/paper/2c03df8b48bf3fa39054345bafabfeff15bfd11d", "referenceCount": 53, "citationCount": 172713, "influentialCitationCount": 28229, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks", "abstract": "In recent years, supervised learning with convolutional networks (CNNs) has seen huge adoption in computer vision applications. Comparatively, unsupervised learning with CNNs has received less attention. In this work we hope to help bridge the gap between the success of CNNs for supervised learning and unsupervised learning. We introduce a class of CNNs called deep convolutional generative adversarial networks (DCGANs), that have certain architectural constraints, and demonstrate that they are a strong candidate for unsupervised learning. Training on various image datasets, we show convincing evidence that our deep convolutional adversarial pair learns a hierarchy of representations from object parts to scenes in both the generator and discriminator. Additionally, we use the learned features for novel tasks - demonstrating their applicability as general image representations.", "year": 2015, "venue": "International Conference on Learning Representations", "authors": [ "Alec Radford", "Luke Metz", "Soumith Chintala" ], "externalIds": { "MAG": "2949811265", "ArXiv": "1511.06434", "DBLP": "journals/corr/RadfordMC15", "CorpusId": 11758569 }, "url": "https://www.semanticscholar.org/paper/8388f1be26329fa45e5807e968a641ce170ea078", "referenceCount": 45, "citationCount": 13240, "influentialCitationCount": 1665, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning", "abstract": "Deep learning tools have gained tremendous attention in applied machine learning. However such tools for regression and classification do not capture model uncertainty. In comparison, Bayesian models offer a mathematically grounded framework to reason about model uncertainty, but usually come with a prohibitive computational cost. In this paper we develop a new theoretical framework casting dropout training in deep neural networks (NNs) as approximate Bayesian inference in deep Gaussian processes. A direct result of this theory gives us tools to model uncertainty with dropout NNs -- extracting information from existing models that has been thrown away so far. This mitigates the problem of representing uncertainty in deep learning without sacrificing either computational complexity or test accuracy. We perform an extensive study of the properties of dropout's uncertainty. Various network architectures and non-linearities are assessed on tasks of regression and classification, using MNIST as an example. We show a considerable improvement in predictive log-likelihood and RMSE compared to existing state-of-the-art methods, and finish by using dropout's uncertainty in deep reinforcement learning.", "year": 2015, "venue": "International Conference on Machine Learning", "authors": [ "Y. Gal", "Zoubin Ghahramani" ], "externalIds": { "MAG": "2964059111", "DBLP": "conf/icml/GalG16", "ArXiv": "1506.02142", "CorpusId": 160705 }, "url": "https://www.semanticscholar.org/paper/f35de4f9b1a7c4d3fa96a0d2ab1bf8937671f6b6", "referenceCount": 56, "citationCount": 8177, "influentialCitationCount": 1281, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Vision-Based Traffic Sign Detection and Analysis for Intelligent Driver Assistance Systems: Perspectives and Survey", "abstract": "In this paper, we provide a survey of the traffic sign detection literature, detailing detection systems for traffic sign recognition (TSR) for driver assistance. We separately describe the contributions of recent works to the various stages inherent in traffic sign detection: segmentation, feature extraction, and final sign detection. While TSR is a well-established research area, we highlight open research issues in the literature, including a dearth of use of publicly available image databases and the over-representation of European traffic signs. Furthermore, we discuss future directions of TSR research, including the integration of context and localization. We also introduce a new public database containing U.S. traffic signs.", "year": 2012, "venue": "IEEE transactions on intelligent transportation systems (Print)", "authors": [ "Andreas Møgelmose", "M. Trivedi", "T. Moeslund" ], "externalIds": { "MAG": "2126628495", "DBLP": "journals/tits/MogelmoseTM12", "DOI": "10.1109/TITS.2012.2209421", "CorpusId": 206739910 }, "url": "https://www.semanticscholar.org/paper/c8b5f38617a202816486acd2a1e79556ff7f1f70", "referenceCount": 76, "citationCount": 624, "influentialCitationCount": 37, "isOpenAccess": true, "fieldsOfStudy": [ "Engineering", "Computer Science" ] }, { "title": "Authentication theory and hypothesis testing", "abstract": "By interpreting message authentication as a hypothesis testing problem, this paper provides a generalized treatment of information-theoretic lower bounds on an opponent's probability of cheating in one-way message authentication. We consider the authentication of an arbitrary sequence of messages, using the same secret key shared between sender and receiver. The adversary tries to deceive the receiver by forging one of the messages in the sequence. The classical two types of cheating are considered, impersonation and substitution attacks, and lower bounds on the cheating probability for any authentication system are derived for three types of goals the adversary might wish to achieve. These goals are: (1) that the fraudulent message should be accepted by the receiver, or, in addition, (2) that the adversary wishes to know or (3) wants to even choose the value of the plaintext message obtained by the legitimate receiver after decoding with the secret key.", "year": 2000, "venue": "IEEE Transactions on Information Theory", "authors": [ "U. Maurer" ], "externalIds": { "MAG": "2169128714", "DBLP": "journals/tit/Maurer00", "DOI": "10.1109/18.850674", "CorpusId": 16934257 }, "url": "https://www.semanticscholar.org/paper/6d18995074d9e3b7b13ed6dd64147d8b41d63bb1", "referenceCount": 19, "citationCount": 181, "influentialCitationCount": 13, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Cybersecurity of Autonomous Vehicles: A Systematic Literature Review of Adversarial Attacks and Defense Models", "abstract": "Autonomous driving (AD) has developed tremendously in parallel with the ongoing development and improvement of deep learning (DL) technology. However, the uptake of artificial intelligence (AI) in AD as the core enabling technology raises serious cybersecurity issues. An enhanced attack surface has been spurred on by the rising digitization of vehicles and the integration of AI features. The performance of the autonomous vehicle (AV)-based applications is constrained by the DL models' susceptibility to adversarial attacks despite their great potential. Hence, AI-enabled AVs face numerous security threats, which prevent the large-scale adoption of AVs. Therefore, it becomes crucial to evolve existing cybersecurity practices to deal with risks associated with the increased uptake of AI. Furthermore, putting defense models into practice against adversarial attacks has grown in importance as a field of study amongst researchers. Therefore, this study seeks to provide an overview of the most recent adversarial defensive and attack models developed in the domain of AD.", "year": 2023, "venue": "IEEE Open Journal of Vehicular Technology", "authors": [ "Mansi Girdhar", "Junho Hong", "John Moore" ], "externalIds": { "DOI": "10.1109/OJVT.2023.3265363", "CorpusId": 258035165 }, "url": "https://www.semanticscholar.org/paper/1ef8c59d615f8416f7039428d4416bd5eafb3748", "referenceCount": 249, "citationCount": 21, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "Quantum Transfer Learning Approach for Deepfake Detection", "abstract": "Deepfake image manipulation has achieved great attention in the previous year’s owing to brings solemn challenges from the public self-confidence. Forgery detection in face imaging has made considerable developments in detecting manipulated images. However, there is still a need for an efficient deepfake detection approach in complex background environments. This paper applies the state-of-the-art quantum transfer learning approach for classifying deepfake images from original face images. The proposed model comprises classical pre-trained ResNet-18 and quantum neural network layers that provide efficient features extraction to learn the different patterns of the deepfake face images. The proposed model is validated on a real-world deepfake dataset created using commercial software. An accuracy of 96.1 % was obtained.", "year": 2022, "venue": "Sparklinglight Transactions on Artificial Intelligence and Quantum Computing", "authors": [ "Bishwas Mishra", "Abhishek Samanta" ], "externalIds": { "DOI": "10.55011/staiqc.2022.2103", "CorpusId": 249906467 }, "url": "https://www.semanticscholar.org/paper/30f92885554b72a6a659078ecb045d1ed23dcc40", "referenceCount": 0, "citationCount": 3, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "Deepfake Detection: A Systematic Literature Review", "abstract": "Over the last few decades, rapid progress in AI, machine learning, and deep learning has resulted in new techniques and various tools for manipulating multimedia. Though the technology has been mostly used in legitimate applications such as for entertainment and education, etc., malicious users have also exploited them for unlawful or nefarious purposes. For example, high-quality and realistic fake videos, images, or audios have been created to spread misinformation and propaganda, foment political discord and hate, or even harass and blackmail people. The manipulated, high-quality and realistic videos have become known recently as Deepfake. Various approaches have since been described in the literature to deal with the problems raised by Deepfake. To provide an updated overview of the research works in Deepfake detection, we conduct a systematic literature review (SLR) in this paper, summarizing 112 relevant articles from 2018 to 2020 that presented a variety of methodologies. We analyze them by grouping them into four different categories: deep learning-based techniques, classical machine learning-based methods, statistical techniques, and blockchain-based techniques. We also evaluate the performance of the detection capability of the various methods with respect to different datasets and conclude that the deep learning-based methods outperform other methods in Deepfake detection.", "year": 2022, "venue": "IEEE Access", "authors": [ "M. Rana", "M. N. Nobi", "B. Murali", "A. Sung" ], "externalIds": { "DBLP": "journals/access/RanaNMS22", "DOI": "10.1109/access.2022.3154404", "CorpusId": 247116742 }, "url": "https://www.semanticscholar.org/paper/31e284d01686e257dabb978eb8ca10ceefca5be6", "referenceCount": 0, "citationCount": 119, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deepfake: An Overview", "abstract": null, "year": 2021, "venue": "", "authors": [ "Anupama Chadha", "Vaibhav Kumar", "Sonu Kashyap", "Mayank Gupta" ], "externalIds": { "MAG": "3164861235", "DOI": "10.1007/978-981-16-0733-2_39", "CorpusId": 236666289 }, "url": "https://www.semanticscholar.org/paper/1781552eab89e5cf1be32b3471482ade18320096", "referenceCount": 26, "citationCount": 25, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Detection of Deepfakes Using Visual Artifacts and Neural Network Classifier", "abstract": null, "year": 2020, "venue": "", "authors": [ "M. A. S. Habeeba", "A. Lijiya", "A. Chacko" ], "externalIds": { "MAG": "3045320999", "DOI": "10.1007/978-981-15-4692-1_31", "CorpusId": 224877668 }, "url": "https://www.semanticscholar.org/paper/8d27f72e0ce5384e43f7e1e13905d55573c79bb7", "referenceCount": 14, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Created Fake", "abstract": null, "year": 2018, "venue": "Ictu Oculi: Exposing AI", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Unpaired Image-To-Image Translation Using Cycle-Consistent Adversarial Networks", "abstract": null, "year": 2017, "venue": "Proceedings of the IEEE International Conference on Computer Vision", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "the baseline classical convolutional NNs in most cases while requiring less than one-third of the memory required by the shallowest classical convolutional NN considered in this study", "abstract": null, "year": null, "venue": "Keywords: Deepfake detection, Adversarial attack, Autonomous vehicle, Traffic sign classification", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Informed deep hierarchical classification: a non-standard analysis inspired approach": { "paper_title": "Informed deep hierarchical classification: a non-standard analysis inspired approach", "arxiv_id": "2409.16956v1", "keyword": "deep learning", "authors": [ "Lorenzo Fiaschi", "Marco Cococcioni" ], "references": [ { "title": "A new cutting plane method for lexicographic multi-objective integer linear programming", "abstract": null, "year": 2023, "venue": "Communications in nonlinear science & numerical simulation", "authors": [ "M. Cococcioni", "Alessandro Cudazzo", "Lorenzo Fiaschi", "Massimo Pappalardo", "Yaroslav D. Sergeyev" ], "externalIds": { "DBLP": "journals/cnsns/CococcioniCFPS24", "DOI": "10.1016/j.cnsns.2023.107674", "CorpusId": 265072231 }, "url": "https://www.semanticscholar.org/paper/7dacb02f573be74a015e4a4a13a4fd9c91c983db", "referenceCount": 37, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Non-Archimedean Interior Point Method and Its Application to the Lexicographic Multi-Objective Quadratic Programming", "abstract": "This work presents a generalized implementation of the infeasible primal-dual interior point method (IPM) achieved by the use of non-Archimedean values, i.e., infinite and infinitesimal numbers. The extended version, called here the non-Archimedean IPM (NA-IPM), is proved to converge in polynomial time to a global optimum and to be able to manage infeasibility and unboundedness transparently, i.e., without considering them as corner cases: by means of a mild embedding (addition of two variables and one constraint), the NA-IPM implicitly and transparently manages their possible presence. Moreover, the new algorithm is able to solve a wider variety of linear and quadratic optimization problems than its standard counterpart. Among them, the lexicographic multi-objective one deserves particular attention, since the NA-IPM overcomes the issues that standard techniques (such as scalarization or preemptive approach) have. To support the theoretical properties of the NA-IPM, the manuscript also shows four linear and quadratic non-Archimedean programming test cases where the effectiveness of the algorithm is verified. This also stresses that the NA-IPM is not just a mere symbolic or theoretical algorithm but actually a concrete numerical tool, paving the way for its use in real-world problems in the near future.", "year": 2021, "venue": "Mathematics", "authors": [ "Lorenzo Fiaschi", "M. Cococcioni" ], "externalIds": { "ArXiv": "2110.15658", "DOI": "10.3390/math10234536", "CorpusId": 254240723 }, "url": "https://www.semanticscholar.org/paper/dfbf25ed68364fc35cbbb748a99798dd146c2a58", "referenceCount": 49, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "An improved deep learning model for hierarchical classification of protein families", "abstract": "Although genes carry information, proteins are the main role player in providing all the functionalities of a living organism. Massive amounts of different proteins involve in every function that occurs in a cell. These amino acid sequences can be hierarchically classified into a set of families and subfamilies depending on their evolutionary relatedness and similarities in their structure or function. Protein characterization to identify protein structure and function is done accurately using laboratory experiments. With the rapidly increasing huge amount of novel protein sequences, these experiments have become difficult to carry out since they are expensive, time-consuming, and laborious. Therefore, many computational classification methods are introduced to classify proteins and predict their functional properties. With the progress of the performance of the computational techniques, deep learning plays a key role in many areas. Novel deep learning models such as DeepFam, ProtCNN have been presented to classify proteins into their families recently. However, these deep learning models have been used to carry out the non-hierarchical classification of proteins. In this research, we propose a deep learning neural network model named DeepHiFam with high accuracy to classify proteins hierarchically into different levels simultaneously. The model achieved an accuracy of 98.38% for protein family classification and more than 80% accuracy for the classification of protein subfamilies and sub-subfamilies. Further, DeepHiFam performed well in the non-hierarchical classification of protein families and achieved an accuracy of 98.62% and 96.14% for the popular Pfam dataset and COG dataset respectively.", "year": 2021, "venue": "PLoS ONE", "authors": [ "Pahalage Dhanushka Sandaruwan", "C. T. Wannige" ], "externalIds": { "PubMedCentral": "8528337", "DOI": "10.1371/journal.pone.0258625", "CorpusId": 239048562, "PubMed": "34669708" }, "url": "https://www.semanticscholar.org/paper/e0ad44c6581bcf20a02d7e6194e4026b4b5bd34f", "referenceCount": 41, "citationCount": 11, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Solving Mixed Pareto-Lexicographic Multiobjective Optimization Problems: The Case of Priority Levels", "abstract": "This article concerns the study of mixed Pareto-lexicographic multiobjective optimization problems where the objectives must be partitioned in multiple priority levels (PLs). A PL is a group of objectives having the same importance in terms of optimization and subsequent decision making, while between PLs a lexicographic ordering exists. A naive approach would be to define a multilevel dominance relationship and apply a standard EMO/EMaO algorithm, but the concept does not conform to a stable optimization process as the resulting dominance relationship violates the transitive property needed to achieve consistent comparisons. To overcome this, we present a novel approach that merges a custom nondominance relation with the Grossone methodology, a mathematical framework to handle infinite and infinitesimal quantities. The proposed method is implemented on a popular multiobjective optimization algorithm (NSGA-II), deriving a generalization of it called by us PL-NSGA-II. We also demonstrate the usability of our strategy by quantitatively comparing the results obtained by PL-NSGA-II against other priority and nonpriority-based approaches. Among the test cases, we include two real-world applications: one 10-objective aircraft design problem and one 3-objective crash safety vehicle design task. The obtained results show that PL-NSGA-II is more suited to solve lexicographical many-objective problems than the general purpose EMaO algorithms.", "year": 2021, "venue": "IEEE Transactions on Evolutionary Computation", "authors": [ "Leonardo Lai", "Lorenzo Fiaschi", "M. Cococcioni", "K. Deb" ], "externalIds": { "MAG": "3139981417", "DBLP": "journals/tec/LaiFCD21", "DOI": "10.1109/TEVC.2021.3068816", "CorpusId": 233689761 }, "url": "https://www.semanticscholar.org/paper/209377ce3b142681b06a6bf266705b899238c088", "referenceCount": 0, "citationCount": 13, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Multi-Label Classification Neural Networks with Hard Logical Constraints", "abstract": "Multi-label classification (MC) is a standard machine learning problem in which a data point can be associated with a set of classes. A more challenging scenario is given by hierarchical multi-label classification (HMC) problems, in which every prediction must satisfy a given set of hard constraints expressing subclass relationships between classes. In this article, we propose C-HMCNN(h), a novel approach for solving HMC problems, which, given a network h for the underlying MC problem, exploits the hierarchy information in order to produce predictions coherent with the constraints and to improve performance. Furthermore, we extend the logic used to express HMC constraints in order to be able to specify more complex relations among the classes and propose a new model CCN(h), which extends C-HMCNN(h) and is again able to satisfy and exploit the constraints to improve performance. We conduct an extensive experimental analysis showing the superior performance of both C-HMCNN(h) and CCN(h) when compared to state-of-the-art models in both the HMC and the general MC setting with hard logical constraints.", "year": 2021, "venue": "Journal of Artificial Intelligence Research", "authors": [ "Eleonora Giunchiglia", "Thomas Lukasiewicz" ], "externalIds": { "ArXiv": "2103.13427", "DBLP": "journals/corr/abs-2103-13427", "DOI": "10.1613/jair.1.12850", "CorpusId": 232352814 }, "url": "https://www.semanticscholar.org/paper/f6f43a3044af380b7e6959806f0c45b8e032d3f6", "referenceCount": 86, "citationCount": 36, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Non-Archimedean zero-sum games", "abstract": null, "year": 2021, "venue": "Journal of Computational and Applied Mathematics", "authors": [ "M. Cococcioni", "Lorenzo Fiaschi", "L. Lambertini" ], "externalIds": { "DBLP": "journals/jcam/CococcioniFL21", "MAG": "3130199688", "DOI": "10.1016/J.CAM.2021.113483", "CorpusId": 233899871 }, "url": "https://www.semanticscholar.org/paper/d416ce8c8e544143b778d5bc2bf390afe3d94f9a", "referenceCount": 27, "citationCount": 8, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "HierarchyNet: Hierarchical CNN-Based Urban Building Classification", "abstract": "Automatic building categorization and analysis are particularly relevant for smart city applications and cultural heritage programs. Taking a picture of the facade of a building and instantly obtaining information about it can enable the automation of processes in urban planning, virtual city tours, and digital archiving of cultural artifacts. In this paper, we go beyond traditional convolutional neural networks (CNNs) for image classification and propose the HierarchyNet: a new hierarchical network for the classification of urban buildings from all across the globe into different main and subcategories from images of their facades. We introduce a coarse-to-fine hierarchy on the dataset and the model learns to simultaneously extract features and classify across both levels of hierarchy. We propose a new multiplicative layer, which is able to improve the accuracy of the finer prediction by considering the feedback signal of the coarse layers. We have quantitatively evaluated the proposed approach both on our proposed building datasets, as well as on various benchmark databases to demonstrate that the model is able to efficiently learn hierarchical information. The HierarchyNet model is able to outperform the state-of-the-art convolutional neural networks in urban building classification as well as in other multi-label classification tasks while using significantly fewer parameters.", "year": 2020, "venue": "Remote Sensing", "authors": [ "Salma Taoufiq", "Balázs Nagy", "C. Benedek" ], "externalIds": { "MAG": "3107738790", "DBLP": "journals/remotesensing/TaoufiqNB20", "DOI": "10.3390/rs12223794", "CorpusId": 229182703 }, "url": "https://www.semanticscholar.org/paper/a2496ffd056d6d2e435fb00a9419e52c0b7eec71", "referenceCount": 32, "citationCount": 36, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Coherent Hierarchical Multi-Label Classification Networks", "abstract": "Hierarchical multi-label classification (HMC) is a challenging classification task extending standard multi-label classification problems by imposing a hierarchy constraint on the classes. In this paper, we propose C-HMCNN(h), a novel approach for HMC problems, which, given a network h for the underlying multi-label classification problem, exploits the hierarchy information in order to produce predictions coherent with the constraint and improve performance. We conduct an extensive experimental analysis showing the superior performance of C-HMCNN(h) when compared to state-of-the-art models.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Eleonora Giunchiglia", "Thomas Lukasiewicz" ], "externalIds": { "MAG": "3104057403", "DBLP": "conf/nips/GiunchigliaL20", "ArXiv": "2010.10151", "CorpusId": 224803614 }, "url": "https://www.semanticscholar.org/paper/eedd6bb9cc3c0ea77acc61cc5cb945ad46d0c167", "referenceCount": 34, "citationCount": 75, "influentialCitationCount": 19, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Spherical separation with infinitely far center", "abstract": null, "year": 2020, "venue": "Soft Computing - A Fusion of Foundations, Methodologies and Applications", "authors": [ "A. Astorino", "A. Fuduli" ], "externalIds": { "MAG": "3092244490", "DBLP": "journals/soco/AstorinoF20", "DOI": "10.1007/s00500-020-05352-2", "CorpusId": 225169048 }, "url": "https://www.semanticscholar.org/paper/2cd89b17d9a4c653029d37039c5afa37d2f69e13", "referenceCount": 63, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Neural Network Training with Safe Regularization in the Null Space of Batch Activations", "abstract": null, "year": 2020, "venue": "International Conference on Artificial Neural Networks", "authors": [ "Matthias Kissel", "Martin Gottwald", "K. Diepold" ], "externalIds": { "MAG": "3097190837", "DBLP": "conf/icann/KisselGD20", "DOI": "10.1007/978-3-030-61616-8_18", "CorpusId": 224806120 }, "url": "https://www.semanticscholar.org/paper/c2532758908d52f4d63318a4cf8c380113d6f33a", "referenceCount": 11, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The use of grossone in elastic net regularization and sparse support vector machines", "abstract": null, "year": 2020, "venue": "Soft Computing - A Fusion of Foundations, Methodologies and Applications", "authors": [ "R. De Leone", "N. Egidi", "L. Fatone" ], "externalIds": { "MAG": "3045034854", "DBLP": "journals/soco/LeoneEF20", "DOI": "10.1007/s00500-020-05185-z", "CorpusId": 225496799 }, "url": "https://www.semanticscholar.org/paper/cd0c34e106114bf7dd09ccc2c162a2e59dc6d22d", "referenceCount": 37, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Solving mixed Pareto-Lexicographic multi-objective optimization problems: The case of priority chains", "abstract": null, "year": 2020, "venue": "Swarm and Evolutionary Computation", "authors": [ "Leonardo Lai", "Lorenzo Fiaschi", "M. Cococcioni" ], "externalIds": { "DBLP": "journals/swevo/LaiFC20", "MAG": "3013744805", "DOI": "10.1016/j.swevo.2020.100687", "CorpusId": 216227801 }, "url": "https://www.semanticscholar.org/paper/2a7f9b2ca42c0c46644996ecbab1ac7d5c366598", "referenceCount": 50, "citationCount": 21, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep Hierarchical Classification for Category Prediction in E-commerce System", "abstract": "In e-commerce system, category prediction is to automatically predict categories of given texts. Different from traditional classification where there are no relations between classes, category prediction is reckoned as a standard hierarchical classification problem since categories are usually organized as a hierarchical tree. In this paper, we address hierarchical category prediction. We propose a Deep Hierarchical Classification framework, which incorporates the multi-scale hierarchical information in neural networks and introduces a representation sharing strategy according to the category tree. We also define a novel combined loss function to punish hierarchical prediction losses. The evaluation shows that the proposed approach outperforms existing approaches in accuracy.", "year": 2020, "venue": "ECNLP", "authors": [ "D. Gao", "Wenjing Yang", "Huiling Zhou", "Yi Wei", "Y. Hu", "Hao Wang" ], "externalIds": { "MAG": "3024057201", "DBLP": "journals/corr/abs-2005-06692", "ACL": "2020.ecnlp-1.10", "ArXiv": "2005.06692", "DOI": "10.18653/v1/2020.ecnlp-1.10", "CorpusId": 218628764 }, "url": "https://www.semanticscholar.org/paper/73bb734055ca72273b4f0618cb4f1a3fd2e9e8a8", "referenceCount": 15, "citationCount": 21, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Solving the Lexicographic Multi-Objective Mixed-Integer Linear Programming Problem using branch-and-bound and grossone methodology", "abstract": null, "year": 2020, "venue": "Communications in nonlinear science & numerical simulation", "authors": [ "M. Cococcioni", "Alessandro Cudazzo", "M. Pappalardo", "Y. Sergeyev" ], "externalIds": { "MAG": "3000250712", "DBLP": "journals/cnsns/CococcioniCPS20", "DOI": "10.1016/j.cnsns.2020.105177", "CorpusId": 214245009 }, "url": "https://www.semanticscholar.org/paper/c10f9e1d44ffe870207fde352a87c87382a3c949", "referenceCount": 48, "citationCount": 41, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Yoga-82: A New Dataset for Fine-grained Classification of Human Poses", "abstract": "Human pose estimation is a well-known problem in computer vision to locate joint positions. Existing datasets for learning of poses are observed to be not challenging enough in terms of pose diversity, object occlusion and view points. This makes the pose annotation process relatively simple and restricts the application of the models that have been trained on them. To handle more variety in human poses, we propose the concept of fine-grained hierarchical pose classification, in which we formulate the pose estimation as a classification task, and propose a dataset, Yoga-82§, for large-scale yoga pose recognition with 82 classes. Yoga-82 consists of complex poses where fine annotations may not be possible. To resolve this, we provide hierarchical labels for yoga poses based on the body configuration of the pose. The dataset contains a three-level hierarchy including body positions, variations in body positions, and the actual pose names. We present the classification accuracy of the state-of-the-art convolutional neural network architectures on Yoga-82. We also present several hierarchical variants of DenseNet in order to utilize the hierarchical labels.", "year": 2020, "venue": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "authors": [ "Manisha Verma", "Sudhakar Kumawat", "Yuta Nakashima", "S. Raman" ], "externalIds": { "ArXiv": "2004.10362", "DBLP": "journals/corr/abs-2004-10362", "MAG": "3020208743", "DOI": "10.1109/CVPRW50498.2020.00527", "CorpusId": 216056215 }, "url": "https://www.semanticscholar.org/paper/910bbdf7b1b7f22b44b9cc777ad088486517d194", "referenceCount": 27, "citationCount": 67, "influentialCitationCount": 14, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Hierarchical Multi-label Classification of Text with Capsule Networks", "abstract": "Capsule networks have been shown to demonstrate good performance on structured data in the area of visual inference. In this paper we apply and compare simple shallow capsule networks for hierarchical multi-label text classification and show that they can perform superior to other neural networks, such as CNNs and LSTMs, and non-neural network architectures such as SVMs. For our experiments, we use the established Web of Science (WOS) dataset and introduce a new real-world scenario dataset, the BlurbGenreCollection (BGC). Our results confirm the hypothesis that capsule networks are especially advantageous for rare events and structurally diverse categories, which we attribute to their ability to combine latent encoded information.", "year": 2019, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Rami Aly", "Steffen Remus", "Chris Biemann" ], "externalIds": { "DBLP": "conf/acl/AlyRB19", "MAG": "2949448715", "ACL": "P19-2045", "DOI": "10.18653/v1/P19-2045", "CorpusId": 196196857 }, "url": "https://www.semanticscholar.org/paper/58203813610b866483ffc2bd1181f616ae38107c", "referenceCount": 25, "citationCount": 74, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Automatic Hierarchical Classification of Kelps Using Deep Residual Features", "abstract": "Across the globe, remote image data is rapidly being collected for the assessment of benthic communities from shallow to extremely deep waters on continental slopes to the abyssal seas. Exploiting this data is presently limited by the time it takes for experts to identify organisms found in these images. With this limitation in mind, a large effort has been made globally to introduce automation and machine learning algorithms to accelerate both classification and assessment of marine benthic biota. One major issue lies with organisms that move with swell and currents, such as kelps. This paper presents an automatic hierarchical classification method local binary classification as opposed to the conventional flat classification to classify kelps in images collected by autonomous underwater vehicles. The proposed kelp classification approach exploits learned feature representations extracted from deep residual networks. We show that these generic features outperform the traditional off-the-shelf CNN features and the conventional hand-crafted features. Experiments also demonstrate that the hierarchical classification method outperforms the traditional parallel multi-class classifications by a significant margin (90.0% vs. 57.6% and 77.2% vs. 59.0%) on Benthoz15 and Rottnest datasets respectively. Furthermore, we compare different hierarchical classification approaches and experimentally show that the sibling hierarchical training approach outperforms the inclusive hierarchical approach by a significant margin. We also report an application of our proposed method to study the change in kelp cover over time for annually repeated AUV surveys.", "year": 2019, "venue": "Italian National Conference on Sensors", "authors": [ "A. Mahmood", "A. Ospina", "Bennamoun", "S. An", "Ferdous Sohel", "F. Boussaïd", "R. Hovey", "Robert B. Fisher", "G. Kendrick" ], "externalIds": { "MAG": "2999688610", "PubMedCentral": "7013955", "DBLP": "journals/sensors/MahmoodOBASBHFK20", "DOI": "10.3390/s20020447", "CorpusId": 195658121, "PubMed": "31941132" }, "url": "https://www.semanticscholar.org/paper/17f65c54b4ce2210c3506d5e9628d95fca8d7830", "referenceCount": 59, "citationCount": 29, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Hierarchical Taxonomy-Aware and Attentional Graph Capsule RCNNs for Large-Scale Multi-Label Text Classification", "abstract": "CNNs, RNNs, GCNs, and CapsNets have shown significant insights in representation learning and are widely used in various text mining tasks such as large-scale multi-label text classification. Most existing deep models for multi-label text classification consider either the non-consecutive and long-distance semantics or the sequential semantics. However, how to coherently take them into account is still far from studied. In addition, most existing methods treat output labels as independent medoids, ignoring the hierarchical relationships among them, which leads to a substantial loss of useful semantic information. In this paper, we propose a novel hierarchical taxonomy-aware and attentional graph capsule recurrent CNNs framework for large-scale multi-label text classification. Specifically, we first propose to model each document as a word order preserved graph-of-words and normalize it as a corresponding word matrix representation preserving both the non-consecutive, long-distance and local sequential semantics. Then the word matrix is input to the proposed attentional graph capsule recurrent CNNs for effectively learning the semantic features. To leverage the hierarchical relations among the class labels, we propose a hierarchical taxonomy embedding method to learn their representations, and define a novel weighted margin loss by incorporating the label representation similarity. Extensive evaluations on three datasets show that our model significantly improves the performance of large-scale multi-label text classification by comparing with state-of-the-art approaches.", "year": 2019, "venue": "IEEE Transactions on Knowledge and Data Engineering", "authors": [ "Hao Peng", "Jianxin Li", "Qiran Gong", "Senzhang Wang", "Lifang He", "Bo Li", "Lihong Wang", "Philip S. Yu" ], "externalIds": { "MAG": "2995837271", "ArXiv": "1906.04898", "DBLP": "journals/tkde/PengLWWGYLYH21", "DOI": "10.1109/TKDE.2019.2959991", "CorpusId": 186207008 }, "url": "https://www.semanticscholar.org/paper/97eb41cb7ca3419d297fd402ff239de17d7b2ee8", "referenceCount": 74, "citationCount": 129, "influentialCitationCount": 10, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Breast Cancer Multi-classification through Deep Neural Network and Hierarchical Classification Approach", "abstract": null, "year": 2019, "venue": "Multimedia tools and applications", "authors": [ "G. Murtaza", "Liyana Shuib", "G. Mujtaba", "Ghulam Raza" ], "externalIds": { "MAG": "2930208220", "DBLP": "journals/mta/MurtazaSMR20", "DOI": "10.1007/s11042-019-7525-4", "CorpusId": 89616763 }, "url": "https://www.semanticscholar.org/paper/e5419f3436a96d0c1a63824d61bb15374c8f572d", "referenceCount": 56, "citationCount": 47, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Hierarchical convolutional neural networks for fashion image classification", "abstract": null, "year": 2019, "venue": "Expert systems with applications", "authors": [ "Yian Seo", "K. Shin" ], "externalIds": { "DBLP": "journals/eswa/SeoS19", "MAG": "2892166671", "DOI": "10.1016/j.eswa.2018.09.022", "CorpusId": 53113324 }, "url": "https://www.semanticscholar.org/paper/5088630d331a3a3e563791a011da7f0b0b64eb8f", "referenceCount": 22, "citationCount": 157, "influentialCitationCount": 12, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "mlDEEPre: Multi-Functional Enzyme Function Prediction With Hierarchical Multi-Label Deep Learning", "abstract": "As a great challenge in bioinformatics, enzyme function prediction is a significant step toward designing novel enzymes and diagnosing enzyme-related diseases. Existing studies mainly focus on the mono-functional enzyme function prediction. However, the number of multi-functional enzymes is growing rapidly, which requires novel computational methods to be developed. In this paper, following our previous work, DEEPre, which uses deep learning to annotate mono-functional enzyme's function, we propose a novel method, mlDEEPre, which is designed specifically for predicting the functionalities of multi-functional enzymes. By adopting a novel loss function, associated with the relationship between different labels, and a self-adapted label assigning threshold, mlDEEPre can accurately and efficiently perform multi-functional enzyme prediction. Extensive experiments also show that mlDEEPre can outperform the other methods in predicting whether an enzyme is a mono-functional or a multi-functional enzyme (mono-functional vs. multi-functional), as well as the main class prediction across different criteria. Furthermore, due to the flexibility of mlDEEPre and DEEPre, mlDEEPre can be incorporated into DEEPre seamlessly, which enables the updated DEEPre to handle both mono-functional and multi-functional predictions without human intervention.", "year": 2019, "venue": "Frontiers in Genetics", "authors": [ "Zhenzhen Zou", "Shuye Tian", "Xin Gao", "Yu Li" ], "externalIds": { "PubMedCentral": "6349967", "MAG": "2908663744", "DOI": "10.3389/fgene.2018.00714", "CorpusId": 58535728, "PubMed": "30723495" }, "url": "https://www.semanticscholar.org/paper/6633c836ce97ae7d250fc33b6e23dfd60ad12152", "referenceCount": 51, "citationCount": 87, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "A hierarchical multi-label classification method based on neural networks for gene function prediction", "abstract": "abstract Gene function prediction is used to assign biological or biochemical functions to genes, which continues to be a challenging problem in modern biology. Genes may exhibit many functions simultaneously, and these functions are organized into a hierarchy, such as a directed acyclic graph (DAG) for Gene Ontology (GO). Because of these characteristics, gene function prediction can be seen as a typical hierarchical multi-label classification (HMC) task. A novel HMC method based on neural networks is proposed in this article for predicting gene function based on GO. The proposed method belongs to a local approach by transferring the HMC task to a set of subtasks. There are three strategies implemented in this method to improve its performance. First, to tackle the imbalanced data set problem when building the training data set for each class, negative instances selecting policy and SMOTE approach are used to preprocess each imbalanced training data set. Second, a particular multi-layer perceptron (MLP) is designed for each node in GO. Third, a post processing method based on the Bayesian network is used to guarantee that the results are consistent with the hierarchy constraint. The experimental results indicate that the proposed HMC-MLPN method is a promising method for gene function prediction based on a comparison with two other state-of-the-art methods.", "year": 2018, "venue": "Biotechnology & Biotechnological Equipment", "authors": [ "Shou Feng", "Ping Fu", "Wenbin Zheng" ], "externalIds": { "MAG": "2896109759", "DOI": "10.1080/13102818.2018.1521302", "CorpusId": 91440234 }, "url": "https://www.semanticscholar.org/paper/c419c11caec473d7a4881f0cdd2bf9df243cad21", "referenceCount": 38, "citationCount": 22, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "AWX: An Integrated Approach to Hierarchical-Multilabel Classification", "abstract": null, "year": 2018, "venue": "ECML/PKDD", "authors": [ "L. Masera", "E. Blanzieri" ], "externalIds": { "MAG": "2910644300", "DBLP": "conf/pkdd/MaseraB18", "DOI": "10.1007/978-3-030-10925-7_20", "CorpusId": 58015453 }, "url": "https://www.semanticscholar.org/paper/f9ededdf7ba871b8d659de577cb7d4db36e64667", "referenceCount": 24, "citationCount": 16, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Hierarchical Multi-Label Classification Networks", "abstract": "One of the most challenging machine learning problems is a particular case of data classification in which classes are hierarchically structured and objects can be assigned to multiple paths of the class hierarchy at the same time. This task is known as hierarchical multi-label classification (HMC), with applications in text classification, image annotation, and in bioinformatics problems such as protein function prediction. In this paper, we propose novel neural network architectures for HMC called HMCN, capable of simultaneously optimizing local and global loss functions for discovering local hierarchical class-relationships and global information from the entire class hierarchy while penalizing hierarchical violations. We evaluate its performance in 21 datasets from four distinct domains, and we compare it against the current HMC state-of-the-art approaches. Results show that HMCN substantially outperforms all baselines with statistical significance, arising as the novel state-of-the-art for HMC.", "year": 2018, "venue": "International Conference on Machine Learning", "authors": [ "Jonatas Wehrmann", "R. Cerri", "Rodrigo C. Barros" ], "externalIds": { "DBLP": "conf/icml/WehrmannCB18", "MAG": "2803270043", "CorpusId": 51881472 }, "url": "https://www.semanticscholar.org/paper/8d6ed8b6d49097660408179eb20362c5f55d638c", "referenceCount": 36, "citationCount": 226, "influentialCitationCount": 29, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Improving Hierarchical Classification of Transposable Elements using Deep Neural Networks", "abstract": "Transposable Elements (TEs) are DNA sequences capable of moving within a cell’s genome. Their transposition has many effects in genomes, such as creating genetic variability and promoting changes in genes’ functionality. Recently, TEs classification has been addressed using Machine Learning (ML), more specifically by Hierarchical Classification (HC) methods. Such works proved to be superior than previous ones in the literature. However, there is still room for improvement performance wise. In this direction, Deep Neural Networks (DNNs) have attracted a lot of attention in ML. In particular, Stacked Denoising Auto-Encoders (DAEs) and Deep Multi Layer-Perceptrons (MLPs) are known to provide outstanding results. By performing an extensive evaluation, our results point out that DNNs can enhance the performance of HC methods, being able to push further the state-of-art in TEs’ classification.", "year": 2018, "venue": "IEEE International Joint Conference on Neural Network", "authors": [ "F. Nakano", "S. Mastelini", "Sylvio Barbon Junior", "R. Cerri" ], "externalIds": { "MAG": "2897216915", "DBLP": "conf/ijcnn/NakanoMBC18", "DOI": "10.1109/IJCNN.2018.8489461", "CorpusId": 52988127 }, "url": "https://www.semanticscholar.org/paper/1bb48d3ac859f34372342c0f87b9fa4c280dfde7", "referenceCount": 23, "citationCount": 19, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Large-Scale Hierarchical Text Classification with Recursively Regularized Deep Graph-CNN", "abstract": "Text classification to a hierarchical taxonomy of topics is a common and practical problem. Traditional approaches simply use bag-of-words and have achieved good results. However, when there are a lot of labels with different topical granularities, bag-of-words representation may not be enough. Deep learning models have been proven to be effective to automatically learn different levels of representations for image data. It is interesting to study what is the best way to represent texts. In this paper, we propose a graph-CNN based deep learning model to first convert texts to graph-of-words, and then use graph convolution operations to convolve the word graph. Graph-of-words representation of texts has the advantage of capturing non-consecutive and long-distance semantics. CNN models have the advantage of learning different level of semantics. To further leverage the hierarchy of labels, we regularize the deep architecture with the dependency among labels. Our results on both RCV1 and NYTimes datasets show that we can significantly improve large-scale hierarchical text classification over traditional hierarchical text classification and existing deep models.", "year": 2018, "venue": "The Web Conference", "authors": [ "Hao Peng", "Jianxin Li", "Yu He", "Yaopeng Liu", "Mengjiao Bao", "Lihong Wang", "Yangqiu Song", "Qiang Yang" ], "externalIds": { "DBLP": "conf/www/PengLHLBWS018", "MAG": "2788667846", "DOI": "10.1145/3178876.3186005", "CorpusId": 4897546 }, "url": "https://www.semanticscholar.org/paper/a1a197449aeca81a39cb2213b41cef4831d6983e", "referenceCount": 49, "citationCount": 370, "influentialCitationCount": 17, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Lexicographic multi-objective linear programming using grossone methodology: Theory and algorithm", "abstract": null, "year": 2018, "venue": "Applied Mathematics and Computation", "authors": [ "M. Cococcioni", "M. Pappalardo", "Y. Sergeyev" ], "externalIds": { "DBLP": "journals/amc/CococcioniPS18", "MAG": "2620873085", "DOI": "10.1016/j.amc.2017.05.058", "CorpusId": 32986920 }, "url": "https://www.semanticscholar.org/paper/04e26bf29bb9e388471bd0f2336a178092a4b1f0", "referenceCount": 40, "citationCount": 79, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "A Neural Architecture for Automated ICD Coding", "abstract": "The International Classification of Diseases (ICD) provides a hierarchy of diagnostic codes for classifying diseases. Medical coding – which assigns a subset of ICD codes to a patient visit – is a mandatory process that is crucial for patient care and billing. Manual coding is time-consuming, expensive, and error prone. In this paper, we build a neural architecture for automated coding. It takes the diagnosis descriptions (DDs) of a patient as inputs and selects the most relevant ICD codes. This architecture contains four major ingredients: (1) tree-of-sequences LSTM encoding of code descriptions (CDs), (2) adversarial learning for reconciling the different writing styles of DDs and CDs, (3) isotonic constraints for incorporating the importance order among the assigned codes, and (4) attentional matching for performing many-to-one and one-to-many mappings from DDs to CDs. We demonstrate the effectiveness of the proposed methods on a clinical datasets with 59K patient visits.", "year": 2017, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Haoran Shi", "P. Xie", "Zhiting Hu", "Ming Zhang", "E. Xing" ], "externalIds": { "MAG": "2768948924", "ArXiv": "1711.04075", "DBLP": "journals/corr/abs-1711-04075", "ACL": "P18-1098", "DOI": "10.18653/v1/P18-1098", "CorpusId": 3565485 }, "url": "https://www.semanticscholar.org/paper/73ad6d95db637240cf71b2656f19d12498cf36bb", "referenceCount": 57, "citationCount": 135, "influentialCitationCount": 15, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DEEPre: sequence-based enzyme EC number prediction by deep learning", "abstract": "Motivation Annotation of enzyme function has a broad range of applications, such as metagenomics, industrial biotechnology, and diagnosis of enzyme deficiency‐caused diseases. However, the time and resource required make it prohibitively expensive to experimentally determine the function of every enzyme. Therefore, computational enzyme function prediction has become increasingly important. In this paper, we develop such an approach, determining the enzyme function by predicting the Enzyme Commission number. Results We propose an end‐to‐end feature selection and classification model training approach, as well as an automatic and robust feature dimensionality uniformization method, DEEPre, in the field of enzyme function prediction. Instead of extracting manually crafted features from enzyme sequences, our model takes the raw sequence encoding as inputs, extracting convolutional and sequential features from the raw encoding based on the classification result to directly improve the prediction performance. The thorough cross‐fold validation experiments conducted on two large‐scale datasets show that DEEPre improves the prediction performance over the previous state‐of‐the‐art methods. In addition, our server outperforms five other servers in determining the main class of enzymes on a separate low‐homology dataset. Two case studies demonstrate DEEPre's ability to capture the functional difference of enzyme isoforms. Availability and implementation The server could be accessed freely at http://www.cbrc.kaust.edu.sa/DEEPre. Contact xin.gao@kaust.edu.sa Supplementary information Supplementary data are available at Bioinformatics online.", "year": 2017, "venue": "Bioinform.", "authors": [ "Yu Li", "Sheng Wang", "Ramzan Umarov", "Bingqing Xie", "M. Fan", "Lihua Li", "Xin Gao" ], "externalIds": { "DBLP": "journals/bioinformatics/LiWUXFLG18", "MAG": "2766352633", "PubMedCentral": "6030869", "DOI": "10.1093/bioinformatics/btx680", "CorpusId": 3790226, "PubMed": "29069344" }, "url": "https://www.semanticscholar.org/paper/c2657207664566d8c5c5a7071a1bd85d9c62d498", "referenceCount": 85, "citationCount": 215, "influentialCitationCount": 15, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "B-CNN: Branch Convolutional Neural Network for Hierarchical Classification", "abstract": "Convolutional Neural Network (CNN) image classifiers are traditionally designed to have sequential convolutional layers with a single output layer. This is based on the assumption that all target classes should be treated equally and exclusively. However, some classes can be more difficult to distinguish than others, and classes may be organized in a hierarchy of categories. At the same time, a CNN is designed to learn internal representations that abstract from the input data based on its hierarchical layered structure. So it is natural to ask if an inverse of this idea can be applied to learn a model that can predict over a classification hierarchy using multiple output layers in decreasing order of class abstraction. In this paper, we introduce a variant of the traditional CNN model named the Branch Convolutional Neural Network (B-CNN). A B-CNN model outputs multiple predictions ordered from coarse to fine along the concatenated convolutional layers corresponding to the hierarchical structure of the target classes, which can be regarded as a form of prior knowledge on the output. To learn with B-CNNs a novel training strategy, named the Branch Training strategy (BT-strategy), is introduced which balances the strictness of the prior with the freedom to adjust parameters on the output layers to minimize the loss. In this way we show that CNN based models can be forced to learn successively coarse to fine concepts in the internal layers at the output stage, and that hierarchical prior knowledge can be adopted to boost CNN models' classification performance. Our models are evaluated to show that the B-CNN extensions improve over the corresponding baseline CNN on the benchmark datasets MNIST, CIFAR-10 and CIFAR-100.", "year": 2017, "venue": "arXiv.org", "authors": [ "Xinqi Zhu", "Michael Bain" ], "externalIds": { "DBLP": "journals/corr/abs-1709-09890", "MAG": "2756815061", "ArXiv": "1709.09890", "CorpusId": 5983986 }, "url": "https://www.semanticscholar.org/paper/a73e7e1ef2d3278cdeb73f25be3f1588eb4a5ea2", "referenceCount": 36, "citationCount": 140, "influentialCitationCount": 28, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "HDLTex: Hierarchical Deep Learning for Text Classification", "abstract": "Increasingly large document collections require improved information processing methods for searching, retrieving, and organizing text. Central to these information processing methods is document classification, which has become an important application for supervised learning. Recently the performance of traditional supervised classifiers has degraded as the number of documents has increased. This is because along with growth in the number of documents has come an increase in the number of categories. This paper approaches this problem differently from current document classification methods that view the problem as multi-class classification. Instead we perform hierarchical classification using an approach we call Hierarchical Deep Learning for Text classification (HDLTex). HDLTex employs stacks of deep learning architectures to provide specialized understanding at each level of the document hierarchy.", "year": 2017, "venue": "International Conference on Machine Learning and Applications", "authors": [ "Kamran Kowsari", "Donald E. Brown", "Mojtaba Heidarysafa", "K. Meimandi", "M. Gerber", "Laura E. Barnes" ], "externalIds": { "ArXiv": "1709.08267", "DBLP": "journals/corr/abs-1709-08267", "MAG": "2759474451", "DOI": "10.1109/ICMLA.2017.0-134", "CorpusId": 393535 }, "url": "https://www.semanticscholar.org/paper/9dfacffdae4527d01563814c804e410e4ca885e1", "referenceCount": 54, "citationCount": 360, "influentialCitationCount": 43, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Fashion-MNIST: a Novel Image Dataset for Benchmarking Machine Learning Algorithms", "abstract": "We present Fashion-MNIST, a new dataset comprising of 28x28 grayscale images of 70,000 fashion products from 10 categories, with 7,000 images per category. The training set has 60,000 images and the test set has 10,000 images. Fashion-MNIST is intended to serve as a direct drop-in replacement for the original MNIST dataset for benchmarking machine learning algorithms, as it shares the same image size, data format and the structure of training and testing splits. The dataset is freely available at this https URL", "year": 2017, "venue": "arXiv.org", "authors": [ "Han Xiao", "Kashif Rasul", "Roland Vollgraf" ], "externalIds": { "MAG": "2750384547", "DBLP": "journals/corr/abs-1708-07747", "ArXiv": "1708.07747", "CorpusId": 702279 }, "url": "https://www.semanticscholar.org/paper/f9c602cc436a9ea2f9e7db48c77d924e09ce3c32", "referenceCount": 6, "citationCount": 7771, "influentialCitationCount": 1471, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "DeepGO: predicting protein functions from sequence and interactions using a deep ontology-aware classifier", "abstract": "Abstract Motivation A large number of protein sequences are becoming available through the application of novel high-throughput sequencing technologies. Experimental functional characterization of these proteins is time-consuming and expensive, and is often only done rigorously for few selected model organisms. Computational function prediction approaches have been suggested to fill this gap. The functions of proteins are classified using the Gene Ontology (GO), which contains over 40 000 classes. Additionally, proteins have multiple functions, making function prediction a large-scale, multi-class, multi-label problem. Results We have developed a novel method to predict protein function from sequence. We use deep learning to learn features from protein sequences as well as a cross-species protein–protein interaction network. Our approach specifically outputs information in the structure of the GO and utilizes the dependencies between GO classes as background information to construct a deep learning model. We evaluate our method using the standards established by the Computational Assessment of Function Annotation (CAFA) and demonstrate a significant improvement over baseline methods such as BLAST, in particular for predicting cellular locations. Availability and implementation Web server: http://deepgo.bio2vec.net, Source code: https://github.com/bio-ontology-research-group/deepgo Supplementary information Supplementary data are available at Bioinformatics online.", "year": 2017, "venue": "Bioinform.", "authors": [ "Maxat Kulmanov", "Mohammed Asif Khan", "R. Hoehndorf" ], "externalIds": { "DBLP": "journals/bioinformatics/KulmanovKH18", "ArXiv": "1705.05919", "MAG": "2615066396", "PubMedCentral": "5860606", "DOI": "10.1093/bioinformatics/btx624", "CorpusId": 3306193, "PubMed": "29028931" }, "url": "https://www.semanticscholar.org/paper/48326bdcbb094fce48b17390f7743c23cd0a1ebc", "referenceCount": 46, "citationCount": 337, "influentialCitationCount": 36, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Biology", "Medicine" ] }, { "title": "Reduction strategies for hierarchical multi-label classification in protein function prediction", "abstract": null, "year": 2016, "venue": "BMC Bioinformatics", "authors": [ "R. Cerri", "Rodrigo C. Barros", "A. Carvalho", "Yaochu Jin" ], "externalIds": { "MAG": "2573410259", "PubMedCentral": "5024469", "DBLP": "journals/bmcbi/CerriBCJ16", "DOI": "10.1186/s12859-016-1232-1", "CorpusId": 17890068, "PubMed": "27627880" }, "url": "https://www.semanticscholar.org/paper/3f087e754d5b16fb464ff6f792b759f8413a391f", "referenceCount": 56, "citationCount": 97, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Dueling Network Architectures for Deep Reinforcement Learning", "abstract": "In recent years there have been many successes of using deep representations in reinforcement learning. Still, many of these applications use conventional architectures, such as convolutional networks, LSTMs, or auto-encoders. In this paper, we present a new neural network architecture for model-free reinforcement learning. Our dueling network represents two separate estimators: one for the state value function and one for the state-dependent action advantage function. The main benefit of this factoring is to generalize learning across actions without imposing any change to the underlying reinforcement learning algorithm. Our results show that this architecture leads to better policy evaluation in the presence of many similar-valued actions. Moreover, the dueling architecture enables our RL agent to outperform the state-of-the-art on the Atari 2600 domain.", "year": 2015, "venue": "International Conference on Machine Learning", "authors": [ "Ziyun Wang", "T. Schaul", "Matteo Hessel", "H. V. Hasselt", "Marc Lanctot", "Nando de Freitas" ], "externalIds": { "DBLP": "conf/icml/WangSHHLF16", "ArXiv": "1511.06581", "MAG": "2951799221", "CorpusId": 5389801 }, "url": "https://www.semanticscholar.org/paper/4c05d7caa357148f0bbd61720bdd35f0bc05eb81", "referenceCount": 28, "citationCount": 3372, "influentialCitationCount": 435, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Very Deep Convolutional Networks for Large-Scale Image Recognition", "abstract": "In this work we investigate the effect of the convolutional network depth on its accuracy in the large-scale image recognition setting. Our main contribution is a thorough evaluation of networks of increasing depth using an architecture with very small (3x3) convolution filters, which shows that a significant improvement on the prior-art configurations can be achieved by pushing the depth to 16-19 weight layers. These findings were the basis of our ImageNet Challenge 2014 submission, where our team secured the first and the second places in the localisation and classification tracks respectively. We also show that our representations generalise well to other datasets, where they achieve state-of-the-art results. We have made our two best-performing ConvNet models publicly available to facilitate further research on the use of deep visual representations in computer vision.", "year": 2014, "venue": "International Conference on Learning Representations", "authors": [ "K. Simonyan", "Andrew Zisserman" ], "externalIds": { "MAG": "2949429431", "ArXiv": "1409.1556", "DBLP": "journals/corr/SimonyanZ14a", "CorpusId": 14124313 }, "url": "https://www.semanticscholar.org/paper/eb42cf88027de515750f230b23b1a057dc782108", "referenceCount": 43, "citationCount": 93036, "influentialCitationCount": 13588, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Hierarchical multi-label classification using local neural networks", "abstract": null, "year": 2014, "venue": "Journal of computer and system sciences (Print)", "authors": [ "R. Cerri", "Rodrigo C. Barros", "A. Carvalho" ], "externalIds": { "MAG": "1980179845", "DBLP": "journals/jcss/CerriBC14", "DOI": "10.1016/J.JCSS.2013.03.007", "CorpusId": 14589084 }, "url": "https://www.semanticscholar.org/paper/ff7eb443d708674a257ebf84f605daff578a9c99", "referenceCount": 55, "citationCount": 133, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The use of grossone in Mathematical Programming and Operations Research", "abstract": null, "year": 2011, "venue": "Applied Mathematics and Computation", "authors": [ "Sonia De Cosmis", "R. Leone" ], "externalIds": { "MAG": "2951499891", "ArXiv": "1107.5681", "DBLP": "journals/amc/CosmisL12", "DOI": "10.1016/j.amc.2011.07.042", "CorpusId": 4688270 }, "url": "https://www.semanticscholar.org/paper/25abbafd3a6459738f611fd449ff9e94aeb9ff2c", "referenceCount": 14, "citationCount": 86, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "ImageNet: A large-scale hierarchical image database", "abstract": "The explosion of image data on the Internet has the potential to foster more sophisticated and robust models and algorithms to index, retrieve, organize and interact with images and multimedia data. But exactly how such data can be harnessed and organized remains a critical problem. We introduce here a new database called “ImageNet”, a large-scale ontology of images built upon the backbone of the WordNet structure. ImageNet aims to populate the majority of the 80,000 synsets of WordNet with an average of 500-1000 clean and full resolution images. This will result in tens of millions of annotated images organized by the semantic hierarchy of WordNet. This paper offers a detailed analysis of ImageNet in its current state: 12 subtrees with 5247 synsets and 3.2 million images in total. We show that ImageNet is much larger in scale and diversity and much more accurate than the current image datasets. Constructing such a large-scale database is a challenging task. We describe the data collection scheme with Amazon Mechanical Turk. Lastly, we illustrate the usefulness of ImageNet through three simple applications in object recognition, image classification and automatic object clustering. We hope that the scale, accuracy, diversity and hierarchical structure of ImageNet can offer unparalleled opportunities to researchers in the computer vision community and beyond.", "year": 2009, "venue": "2009 IEEE Conference on Computer Vision and Pattern Recognition", "authors": [ "Jia Deng", "Wei Dong", "R. Socher", "Li-Jia Li", "K. Li", "Li Fei-Fei" ], "externalIds": { "DBLP": "conf/cvpr/DengDSLL009", "MAG": "2108598243", "DOI": "10.1109/CVPR.2009.5206848", "CorpusId": 57246310 }, "url": "https://www.semanticscholar.org/paper/d2c733e34d48784a37d717fe43d9e93277a8c53e", "referenceCount": 27, "citationCount": 56678, "influentialCitationCount": 8947, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Decision trees for hierarchical multi-label classification", "abstract": null, "year": 2008, "venue": "Machine-mediated learning", "authors": [ "C. Vens", "Jan Struyf", "Leander Schietgat", "S. Džeroski", "H. Blockeel" ], "externalIds": { "DBLP": "journals/ml/VensSSDB08", "MAG": "1967542092", "DOI": "10.1007/s10994-008-5077-3", "CorpusId": 1847933 }, "url": "https://www.semanticscholar.org/paper/0a665d99541ed9a5c509fa87db5581f30bb364f6", "referenceCount": 41, "citationCount": 661, "influentialCitationCount": 70, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Deep classification in large-scale text hierarchies", "abstract": "Most classification algorithms are best at categorizing the Web documents into a few categories, such as the top two levels in the Open Directory Project. Such a classification method does not give very detailed topic-related class information for the user because the first two levels are often too coarse. However, classification on a large-scale hierarchy is known to be intractable for many target categories with cross-link relationships among them. In this paper, we propose a novel deep-classification approach to categorize Web documents into categories in a large-scale taxonomy. The approach consists of two stages: a search stage and a classification stage. In the first stage, a category-search algorithm is used to acquire the category candidates for a given document. Based on the category candidates, we prune the large-scale hierarchy to focus our classification effort on a small subset of the original hierarchy. As a result, the classification model is trained on the small subset before being applied to assign the category for a new document. Since the category candidates are sufficiently close to each other in the hierarchy, a statistical-language-model based classifier using n-gram features is exploited. Furthermore, the structure of the taxonomy can be utilized in this stage to improve the performance of classification. We demonstrate the performance of our proposed algorithms on the Open Directory Project with over 130,000 categories. Experimental results show that our proposed approach can reach 51.8% on the measure of Mi-F1 at the 5th level, which is 77.7% improvement over top-down based SVM classification algorithms.", "year": 2008, "venue": "Annual International ACM SIGIR Conference on Research and Development in Information Retrieval", "authors": [ "Gui-Rong Xue", "Dikan Xing", "Qiang Yang", "Yong Yu" ], "externalIds": { "MAG": "2073830447", "DBLP": "conf/sigir/XueXYY08", "DOI": "10.1145/1390334.1390440", "CorpusId": 8196955 }, "url": "https://www.semanticscholar.org/paper/ca30957ba5b105608439faa2293d2a2e5d0062cb", "referenceCount": 23, "citationCount": 160, "influentialCitationCount": 15, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Kernel-Based Learning of Hierarchical Multilabel Classification Models", "abstract": "We present a kernel-based algorithm for hierarchical text classification where the documents are allowed to belong to more than one category at a time. The classification model is a variant of the Maximum Margin Markov Network framework, where the classification hierarchy is represented as a Markov tree equipped with an exponential family defined on the edges. We present an efficient optimization algorithm based on incremental conditional gradient ascent in single-example subspaces spanned by the marginal dual variables. The optimization is facilitated with a dynamic programming based algorithm that computes best update directions in the feasible set. \n \nExperiments show that the algorithm can feasibly optimize training sets of thousands of examples and classification hierarchies consisting of hundreds of nodes. Training of the full hierarchical model is as efficient as training independent SVM-light classifiers for each node. The algorithm's predictive accuracy was found to be competitive with other recently introduced hierarchical multi-category or multilabel classification learning algorithms.", "year": 2006, "venue": "Journal of machine learning research", "authors": [ "Juho Rousu", "C. Saunders", "S. Szedmák", "J. Shawe-Taylor" ], "externalIds": { "MAG": "2135140174", "DBLP": "journals/jmlr/RousuSSS06", "CorpusId": 15146655 }, "url": "https://www.semanticscholar.org/paper/5cd1c62dc99b6c3ecb2e678aa6fb2bffe3853c28", "referenceCount": 33, "citationCount": 305, "influentialCitationCount": 28, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "RCV1: A New Benchmark Collection for Text Categorization Research", "abstract": "Reuters Corpus Volume I (RCV1) is an archive of over 800,000 manually categorized newswire stories recently made available by Reuters, Ltd. for research purposes. Use of this data for research on text categorization requires a detailed understanding of the real world constraints under which the data was produced. Drawing on interviews with Reuters personnel and access to Reuters documentation, we describe the coding policy and quality control procedures used in producing the RCV1 data, the intended semantics of the hierarchical category taxonomies, and the corrections necessary to remove errorful data. We refer to the original data as RCV1-v1, and the corrected data as RCV1-v2. We benchmark several widely used supervised learning methods on RCV1-v2, illustrating the collection's properties, suggesting new directions for research, and providing baseline results for future studies. We make available detailed, per-category experimental results, as well as corrected versions of the category assignments and taxonomy structures, via online appendices.", "year": 2004, "venue": "Journal of machine learning research", "authors": [ "D. Lewis", "Yiming Yang", "T. Rose", "Fan Li" ], "externalIds": { "MAG": "2150102617", "DBLP": "journals/jmlr/LewisYRL04", "CorpusId": 11027141 }, "url": "https://www.semanticscholar.org/paper/2abe6b9ea1b13653b7384e9c8ef14b0d87e20cfc", "referenceCount": 51, "citationCount": 2989, "influentialCitationCount": 373, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Multilayer feedforward networks are universal approximators", "abstract": null, "year": 1989, "venue": "Neural Networks", "authors": [ "K. Hornik", "M. Stinchcombe", "H. White" ], "externalIds": { "DBLP": "journals/nn/HornikSW89", "MAG": "2137983211", "DOI": "10.1016/0893-6080(89)90020-8", "CorpusId": 2757547 }, "url": "https://www.semanticscholar.org/paper/f22f6972e66bdd2e769fa64b0df0a13063c0c101", "referenceCount": 25, "citationCount": 21061, "influentialCitationCount": 498, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Constrained Optimization and Lagrange Multiplier Methods", "abstract": null, "year": 1982, "venue": "", "authors": [ "D. Bertsekas" ], "externalIds": { "MAG": "1669104078", "DOI": "10.1016/c2013-0-10366-2", "CorpusId": 118821639 }, "url": "https://www.semanticscholar.org/paper/cb89a25141febad0c14d080e2791c506ae6e4a76", "referenceCount": 0, "citationCount": 4437, "influentialCitationCount": 444, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Intrusion Detection System Based on Fast Hierarchical Deep Convolutional Neural Network", "abstract": "Currently, with the increasing number of devices connected to the Internet, search for network vulnerabilities to attackers has increased, and protection systems have become indispensable. There are prevalent security attacks, such as the Distributed Denial of Service (DDoS), which have been causing significant damage to companies. However, through security attacks, it is possible to extract characteristics that identify the type of attack. Thus, it is essential to have fast and effective security identification models. In this work, a novel Intrusion Detection System (IDS) based on the Tree-CNN hierarchical algorithm with the Soft-Root-Sign (SRS) activation function is proposed. The model reduces the training time of the generated model for detecting DDoS, Infiltration, Brute Force, and Web attacks. For performance assessment, the model is implemented in a medium-sized company, analyzing the level of complexity of the proposed solution. Experimental results demonstrate that the proposed hierarchical model achieves a significant reduction in execution time, around 36%, and an average detection accuracy of 0.98 considering all the analyzed attacks. Therefore, the results of performance evaluation show that the proposed classifier based on Tree-CNN is of low complexity and requires less processing time and computational resources, outperforming other current IDS based on machine learning algorithms.", "year": 2021, "venue": "IEEE Access", "authors": [ "Robson V. Mendonça", "Arthur A. M. Teodoro", "R. L. Rosa", "Muhammad Saadi", "D. Melgarejo", "P. Nardelli", "D. Z. Rodríguez" ], "externalIds": { "DBLP": "journals/access/MendoncaTRSMNR21", "DOI": "10.1109/ACCESS.2021.3074664", "CorpusId": 233435039 }, "url": "https://www.semanticscholar.org/paper/c1204c03ca667eca708c36067d47599ef1bfeac4", "referenceCount": 65, "citationCount": 60, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Bi-objective Trade-off with Dynamic Barrier Gradient Descent", "abstract": "Many machine learning tasks have to make a trade-off between two loss functions, typically the main data-fitness loss and an auxiliary loss. The most widely used approach is to optimize the linear combination of the objectives, which, however, requires manual tuning of the combination coefficient and is theoretically unsuitable for non-convex functions. In this work, we consider constrained optimization as a more principled approach for trading off two losses, with a special emphasis on lexicographic (lexico) optimization , a degenerated limit of constrained optimization which optimizes a secondary loss inside the optimal set of the main loss. We propose a dynamic barrier gradient descent algorithm which provides a uni-fied solution of both constrained and lexicographic optimization. We establish the convergence of the method for general non-convex functions. Through a number of experiments on real-world deep learning tasks, we show that 1) lexico optimization provides a tuning-free approach to incorporating side loss functions without hurting the main objective, and 2) constrained and lexico optimization combined provide an automatic approach to profiling Pareto sets, especially in non-convex problems on which linear combination methods fail.", "year": 2021, "venue": "", "authors": [ "Chengyue Gong", "Xingchao Liu", "Qiang Liu" ], "externalIds": { "CorpusId": 245217006 }, "url": "https://www.semanticscholar.org/paper/005ded4b07e2302b75c32352336f12557e853664", "referenceCount": 39, "citationCount": 18, "influentialCitationCount": 8, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "The algorithmic numbers in non-archimedean numerical computing environments", "abstract": "There are many natural phenomena that can best be described by the use of infinitesimal and infinite numbers (see e.g. [ 1 , 5 , 13 , 23 ]. However, until now, the Non-standard techniques have been applied to theoretical models. In this paper we investigate the possibility to implement such models in numerical simulations. First we define the field of Euclidean numbers which is a particular field of hyperreal numbers. Then, we introduce a set of families of Euclidean numbers, that we have called altogether algorithmic numbers, some of which are inspired by the IEEE 754 standard for floating point numbers. In particular, we suggest three formats which are relevant from the hardware implementation point of view: the Polynomial Algorithmic Numbers, the Bounded Algorithmic Numbers and the Truncated Algorithmic Numbers. In the second part of the paper, we show a few applications of such numbers.", "year": 2021, "venue": "Discrete and Continuous Dynamical Systems. Series A", "authors": [ "V. Benci", "M. Cococcioni" ], "externalIds": { "MAG": "3099608658", "DOI": "10.3934/dcdss.2020449", "CorpusId": 234143156 }, "url": "https://www.semanticscholar.org/paper/0238569d03b3cd02daa4d5be3f046d9e5567861c", "referenceCount": 27, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Hierarchical diagnosis of bearing faults using branch convolutional neural network considering noise interference and variable working conditions", "abstract": null, "year": 2021, "venue": "Knowledge-Based Systems", "authors": [ "Kaige Su", "Jianhua Liu", "Hui Xiong" ], "externalIds": { "DBLP": "journals/kbs/SuLX21", "DOI": "10.1016/j.knosys.2021.107386", "CorpusId": 238244932 }, "url": "https://www.semanticscholar.org/paper/0a490067ed9aa2bcc4e896ace561e62c7e293b93", "referenceCount": 0, "citationCount": 40, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "How to measure the infinite: Mathematics with infinite and infinitesimal numbers", "abstract": null, "year": 2018, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Hierarchical classification of diatom images using ensembles of predictive clustering trees", "abstract": null, "year": 2012, "venue": "Ecological Informatics", "authors": [ "I. Dimitrovski", "D. Kocev", "S. Loskovska", "S. Džeroski" ], "externalIds": { "DBLP": "journals/ecoi/DimitrovskiKLD12", "MAG": "2124396699", "DOI": "10.1016/j.ecoinf.2011.09.001", "CorpusId": 9042741 }, "url": "https://www.semanticscholar.org/paper/cfe08de191995e3f3dfc774a7ed910b12d0b7951", "referenceCount": 33, "citationCount": 84, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning Multiple Layers of Features from Tiny Images", "abstract": "Groups at MIT and NYU have collected a dataset of millions of tiny colour images from the web. It is, in principle, an excellent dataset for unsupervised training of deep generative models, but previous researchers who have tried this have found it dicult to learn a good set of lters from the images. We show how to train a multi-layer generative model that learns to extract meaningful features which resemble those found in the human visual cortex. Using a novel parallelization algorithm to distribute the work among multiple machines connected on a network, we show how training such a model can be done in reasonable time. A second problematic aspect of the tiny images dataset is that there are no reliable class labels which makes it hard to use for object recognition experiments. We created two sets of reliable labels. The CIFAR-10 set has 6000 examples of each of 10 classes and the CIFAR-100 set has 600 examples of each of 100 non-overlapping classes. Using these labels, we show that object recognition is signicantly improved by pre-training a layer of features on a large set of unlabeled tiny images.", "year": 2009, "venue": "", "authors": [ "A. Krizhevsky" ], "externalIds": { "MAG": "2945315962", "CorpusId": 18268744 }, "url": "https://www.semanticscholar.org/paper/5d90f06bb70a0a3dced62413346235c02b1aa086", "referenceCount": 15, "citationCount": 31347, "influentialCitationCount": 7621, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Consistent probabilistic outputs for protein function prediction", "abstract": null, "year": 2008, "venue": "Genome Biology", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Neural Networks: A Comprehensive Foundation", "abstract": "Simon Haykin Neural Networks A Comprehensive Foundation. Neural Networks A Comprehensive Foundation Simon S. Neural Networks A Comprehensive Foundation Simon S. Neural Networks A Comprehensive Foundation. Neural Networks Association for Computing Machinery. Book Review Neural Networks A Comprehensive Foundation. Neural Networks A Comprehensive Foundation Pearson. Neural networks a comprehensive foundation. Neural Networks a Comprehensive Foundation AbeBooks. Neural networks a comprehensive foundation solutions. cdn preterhuman net. Neural Networks A Comprehensive Foundation Goodreads. Neural Networks A Comprehensive Foundation Amazon it. Neural Networks A Comprehensive Foundation Amazon co uk. Neural Networks A Comprehensive Foundation 3rd Edition. Neural Networks A Comprehensive Foundation Simon. Neural Networks A Comprehensive Foundation amazon com. Neural networks a comprehensive foundation Academia edu. Neural Networks A Comprehensive Foundation Amazon. neural networks a comprehensive foundation simon haykin. Simon Haykin Neural Networks A Comprehensive Foundation. Neural Networks A comprehensive Foundation 2 ed. Simon haykin neural networks a comprehensive foundation pdf. Buy Neural Networks A Comprehensive Foundation Book. Neural networks a comprehensive foundation 2e book. Neural Networks A Comprehensive Foundation. NEURAL NETWORKS A COMPREHENSIVE FOUNDATION SIMON. Neural Networks a Comprehensive Foundation by Haykin Simon. Neural Networks A Comprehensive Foundation pdf PDF Drive. Neural Networks A Comprehensive Foundation amazon ca. Simon Haykin Neural Networks A Comprehensive Foundation. NEURAL NETWORKS A Comprehensive Foundation PDF. Neural Networks A Comprehensive Foundation pdf PDF Drive. Neural Networks A Comprehensive Foundation by Haykin. Neural Networks A Comprehensive Foundation 3rd Edition. Neural Networks A Comprehensive Foundation Simon S. Neural Networks A Comprehensive Foundation. Neural networks a comprehensive foundation Book 1994. Neural Networks A Comprehensive Foundation 2nd Edition. Neural Networks A Comprehensive Foundation S S Haykin. Neural Networks A Comprehensive Foundation International. Neural Networks A Comprehensive Foundation 2 e Pearson. Download Neural Networks A Comprehensive Foundation 2Nd. Neural Networks A comprehensive foundation Aalto", "year": 1994, "venue": "", "authors": [ "Simon Haykin" ], "externalIds": { "MAG": "1556847948", "CorpusId": 60577818 }, "url": "https://www.semanticscholar.org/paper/045310b06e8a3983a363a118cc9dcc3f292970b4", "referenceCount": 3, "citationCount": 13479, "influentialCitationCount": 1475, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Non-Standard Analysis", "abstract": null, "year": 1977, "venue": "", "authors": [ "W. Luxemburg" ], "externalIds": { "MAG": "150913681", "DOI": "10.1007/978-94-010-1138-9_6", "CorpusId": 118308605 }, "url": "https://www.semanticscholar.org/paper/595d0d9965030b4bbdf420df2f761808fce68dbc", "referenceCount": 10, "citationCount": 817, "influentialCitationCount": 66, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Noname manuscript No. (will be inserted by the editor) A Survey of Hierarchical Classification Across Different Application Domains", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": { "CorpusId": 207113055 }, "url": "https://www.semanticscholar.org/paper/9e3d12a20e3e6e76fec282c3a12f9cff6f8f2810", "referenceCount": 0, "citationCount": 1012, "influentialCitationCount": 114, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Solving MDPs with thresholded lexicographic ordering using reinforcement learning", "abstract": null, "year": null, "venue": "Ph.D. thesis", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2022. Deep learning with logical constraints", "abstract": null, "year": null, "venue": "Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence, IJCAI-22", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2023. Semantic hierarchy-aware segmentation", "abstract": null, "year": null, "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Disentangling Age and Identity with a Mutual Information Minimization Approach for Cross-Age Speaker Verification": { "paper_title": "Disentangling Age and Identity with a Mutual Information Minimization Approach for Cross-Age Speaker Verification", "arxiv_id": "2409.15974v1", "keyword": "representation learning", "authors": [ "Fengrun Zhang", "Wangjin Zhou", "Yiming Liu", "Wang Geng", "Yahui Shan", "Chen Zhang" ], "references": [ { "title": "Mutual Information-based Embedding Decoupling for Generalizable Speaker Verification", "abstract": "Domain shift is a challenging problem in speaker verification, especially when dealing with unseen target domains. Recently, embedding decoupling-based methods have shown their effectiveness. Typically, domain information is extracted by a domain classification loss and then decoupled from speaker embeddings. However, the domain classification loss fails to ensure that only domain information is encoded in domain embed-dings. This paper proposes a novel mutual information-based embedding decoupling framework, in which the domain information is extracted by maximizing the mutual information be-tween different speaker sample pairs in the same domain. Then the domain information is removed from speaker embeddings by minimizing mutual information between speaker and domain embeddings. Experiments indicate that our method can improve the generalization and outperform domain classification-based decoupling methods.", "year": 2023, "venue": "Interspeech", "authors": [ "Jiancheng Li", "Jiqing Han", "Shiwen Deng", "Tieran Zheng", "Yongjun He", "Guibin Zheng" ], "externalIds": { "DBLP": "conf/interspeech/Li0DZHZ23", "DOI": "10.21437/interspeech.2023-1314", "CorpusId": 260915301 }, "url": "https://www.semanticscholar.org/paper/d2d563cb3e209cf501e35c6a1a3e77143093721b", "referenceCount": 32, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Speaker Verification Across Ages: Investigating Deep Speaker Embedding Sensitivity to Age Mismatch in Enrollment and Test Speech", "abstract": "In this paper, we study the impact of the ageing on modern deep speaker embedding based automatic speaker verification (ASV) systems. We have selected two different datasets to examine ageing on the state-of-the-art ECAPA-TDNN system. The first dataset, used for addressing short-term ageing (up to 10 years time difference between enrollment and test) under uncontrolled conditions, is VoxCeleb. The second dataset, used for addressing long-term ageing effect (up to 40 years difference) of Finnish speakers under a more controlled setup, is Longitudinal Corpus of Finnish Spoken in Helsinki (LCFSH). Our study provides new insights into the impact of speaker ageing on modern ASV systems. Specifically, we establish a quantitative measure between ageing and ASV scores. Further, our research indicates that ageing affects female English speakers to a greater degree than male English speakers, while in the case of Finnish, it has a greater impact on male speakers than female speakers.", "year": 2023, "venue": "Interspeech", "authors": [ "Vishwanath Pratap Singh", "Md. Sahidullah", "T. Kinnunen" ], "externalIds": { "DBLP": "journals/corr/abs-2306-07501", "ArXiv": "2306.07501", "DOI": "10.48550/arXiv.2306.07501", "CorpusId": 259145394 }, "url": "https://www.semanticscholar.org/paper/852eaf5fc69a0ec67211824ec94763f54c40a59e", "referenceCount": 36, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "An Enhanced Res2Net with Local and Global Feature Fusion for Speaker Verification", "abstract": "Effective fusion of multi-scale features is crucial for improving speaker verification performance. While most existing methods aggregate multi-scale features in a layer-wise manner via simple operations, such as summation or concatenation. This paper proposes a novel architecture called Enhanced Res2Net (ERes2Net), which incorporates both local and global feature fusion techniques to improve the performance. The local feature fusion (LFF) fuses the features within one single residual block to extract the local signal. The global feature fusion (GFF) takes acoustic features of different scales as input to aggregate global signal. To facilitate effective feature fusion in both LFF and GFF, an attentional feature fusion module is employed in the ERes2Net architecture, replacing summation or concatenation operations. A range of experiments conducted on the VoxCeleb datasets demonstrate the superiority of the ERes2Net in speaker verification. Code has been made publicly available at https://github.com/alibaba-damo-academy/3D-Speaker.", "year": 2023, "venue": "Interspeech", "authors": [ "Yafeng Chen", "Siqi Zheng", "Haibo Wang", "Luyao Cheng", "Qian Chen", "Jiajun Qi" ], "externalIds": { "ArXiv": "2305.12838", "DBLP": "conf/interspeech/ChenZWC0Q23", "DOI": "10.48550/arXiv.2305.12838", "CorpusId": 258832457 }, "url": "https://www.semanticscholar.org/paper/db22321ee7316104b7e69cca6babcfbc7ee0f4b3", "referenceCount": 33, "citationCount": 20, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Wespeaker: A Research and Production Oriented Speaker Embedding Learning Toolkit", "abstract": "Speaker modeling is essential for many related tasks, such as speaker recognition and speaker diarization. The dominant modeling approach is fixed-dimensional vector representation, i.e., speaker embedding. This paper introduces a research and production oriented speaker embedding learning toolkit, Wespeaker. Wespeaker contains the implementation of scalable data management, state-of-the-art speaker embedding models, loss functions, and scoring back-ends, with highly competitive results achieved by structured recipes which were adopted in the winning systems in several speaker verification challenges. The application to other downstream tasks such as speaker diarization is also exhibited in the related recipe. Moreover, CPU- and GPU-compatible deployment codes are integrated for production-oriented development. The toolkit is publicly available at https://github.com/wenet-e2e/wespeaker.", "year": 2022, "venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", "authors": [ "Hongji Wang", "Che-Yuan Liang", "Shuai Wang", "Zhengyang Chen", "Binbin Zhang", "Xu Xiang", "Yan Deng", "Y. Qian" ], "externalIds": { "ArXiv": "2210.17016", "DBLP": "journals/corr/abs-2210-17016", "DOI": "10.1109/ICASSP49357.2023.10096626", "CorpusId": 253237382 }, "url": "https://www.semanticscholar.org/paper/9bc3f4f3310480289c1d31e8244ab8ffa8cc6829", "referenceCount": 33, "citationCount": 80, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Cross-Age Speaker Verification: Learning Age-Invariant Speaker Embeddings", "abstract": "Automatic speaker verification has achieved remarkable progress in recent years. However, there is little research on cross-age speaker verification (CASV) due to insufficient relevant data. In this paper, we mine cross-age test sets based on the VoxCeleb dataset and propose our age-invariant speaker representation(AISR) learning method. Since the VoxCeleb is collected from the YouTube platform, the dataset consists of cross-age data inherently. However, the meta-data does not contain the speaker age label. Therefore, we adopt the face age estimation method to predict the speaker age value from the associated visual data, then label the audio recording with the estimated age. We construct multiple Cross-Age test sets on VoxCeleb (Vox-CA), which deliberately select the positive trials with large age-gap. Also, the effect of nationality and gender is considered in selecting negative pairs to align with Vox-H cases. The baseline system performance drops from 1.939\\% EER on the Vox-H test set to 10.419\\% on the Vox-CA20 test set, which indicates how difficult the cross-age scenario is. Consequently, we propose an age-decoupling adversarial learning (ADAL) method to alleviate the negative effect of the age gap and reduce intra-class variance. Our method outperforms the baseline system by over 10\\% related EER reduction on the Vox-CA20 test set. The source code and trial resources are available on https://github.com/qinxiaoyi/Cross-Age_Speaker_Verification", "year": 2022, "venue": "Interspeech", "authors": [ "Xiaoyi Qin", "N. Li", "Chao Weng", "Dan Su", "Ming Li" ], "externalIds": { "DBLP": "conf/interspeech/Qin0W0022", "ArXiv": "2207.05929", "DOI": "10.48550/arXiv.2207.05929", "CorpusId": 250492904 }, "url": "https://www.semanticscholar.org/paper/9ad7c2497272d9d08d00791a5a38871f86078626", "referenceCount": 34, "citationCount": 13, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Tackling the Score Shift in Cross-Lingual Speaker Verification by Exploiting Language Information", "abstract": "This paper contains a post-challenge performance analysis on cross-lingual speaker verification of the IDLab submission to the VoxCeleb Speaker Recognition Challenge 2021 (VoxSRC-21). We show that current speaker embedding extractors consistently underestimate speaker similarity in within-speaker cross-lingual trials. Consequently, the typical training and scoring protocols do not put enough emphasis on the compensation of intra-speaker language variability. We propose two techniques to increase cross-lingual speaker verification robustness. First, we enhance our previously proposed Large-Margin Fine-Tuning (LM-FT) training stage with a mini-batch sampling strategy which increases the amount of intra-speaker cross-lingual samples within the mini-batch. Second, we incorporate language information in the logistic regression calibration stage. We integrate quality metrics based on soft and hard decisions of a VoxLingua107 language identification model. The proposed techniques result in a 11.7% relative improvement over the baseline model on the VoxSRC-21 test set and contributed to our third place finish in the corresponding challenge.", "year": 2021, "venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", "authors": [ "Jenthe Thienpondt", "Brecht Desplanques", "Kris Demuynck" ], "externalIds": { "DBLP": "journals/corr/abs-2110-09150", "ArXiv": "2110.09150", "DOI": "10.1109/ICASSP43922.2022.9746210", "CorpusId": 239016815 }, "url": "https://www.semanticscholar.org/paper/5299bb0103193b4232ce9f4b0f609a6c7839c847", "referenceCount": 25, "citationCount": 8, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Engineering", "Computer Science" ] }, { "title": "Multi-Query Multi-Head Attention Pooling and Inter-Topk Penalty for Speaker Verification", "abstract": "This paper describes the multi-query multi-head attention (MQMHA) pooling and inter-topK penalty methods which were first proposed in our submitted system description for VoxCeleb speaker recognition challenge (VoxSRC) 2021. Most multi-head attention pooling mechanisms either attend to the whole feature through multiple heads or attend to several split parts of the whole feature. Our proposed MQMHA combines both these two mechanisms and gain more diversified information. The margin-based softmax loss functions are commonly adopted to obtain discriminative speaker representations. To further enhance the inter-class discriminability, we propose a method that adds an extra inter-topK penalty on some confused speakers. By adopting both the MQMHA and inter-topK penalty, we achieved state-of-the-art performance in all of the public VoxCeleb test sets.", "year": 2021, "venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", "authors": [ "Miao Zhao", "Yufeng Ma", "Yiwei Ding", "Yu Zheng", "Min Liu", "Minqiang Xu" ], "externalIds": { "ArXiv": "2110.05042", "DBLP": "conf/icassp/ZhaoMDZLX22", "DOI": "10.1109/ICASSP43922.2022.9746178", "CorpusId": 238583776 }, "url": "https://www.semanticscholar.org/paper/15db6693cff2ae4db1e36429cc279359b0044cdb", "referenceCount": 29, "citationCount": 17, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Disentangled Representation for Age-Invariant Face Recognition: A Mutual Information Minimization Perspective", "abstract": "General face recognition has seen remarkable progress in recent years. However, large age gap still remains a big challenge due to significant alterations in facial appearance and bone structure. Disentanglement plays a key role in partitioning face representations into identity-dependent and age-dependent components for age-invariant face recognition (AIFR). In this paper we propose a multi-task learning framework based on mutual information minimization (MT-MIM), which casts the disentangled representation learning as an objective of information constraints. The method trains a disentanglement network to minimize mutual information between the identity component and age component of the face image from the same person, and reduce the effect of age variations during the identification process. For quantitative measure of the degree of disentanglement, we verify that mutual information can represent as metric. The resulting identity-dependent representations are used for age-invariant face recognition. We evaluate MT-MIM on popular public-domain face aging datasets (FG-NET, MORPH Album 2, CACD and AgeDB) and obtained significant improvements over previous state-of-the-art methods. Specifically, our method exceeds the baseline models by over 0.4% on MORPH Album 2, and over 0.7% on CACD subsets, which are impressive improvements at the high accuracy levels of above 99% and an average of 94%.", "year": 2021, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Xuege Hou", "Yali Li", "Shengjin Wang" ], "externalIds": { "DBLP": "conf/iccv/HouLW21", "DOI": "10.1109/ICCV48922.2021.00367", "CorpusId": 244681260 }, "url": "https://www.semanticscholar.org/paper/75490d0dab86ec4cf8d734ee051c03ce59eb4da8", "referenceCount": 61, "citationCount": 22, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Voxceleb Enrichment for Age and Gender Recognition", "abstract": "VoxCeleb datasets are widely used in speaker recognition studies. Our work serves two purposes. First, we provide speaker age labels and (an alternative) annotation of speaker gender. Second, we demonstrate the use of this metadata by constructing age and gender recognition models with different features and classifiers. We query different celebrity databases and apply consensus rules to derive age and gender labels. We also compare the original VoxCeleb gender labels with our labels to identify records that might be mislabeled in the original VoxCeleb data. On modeling side, we design a comprehensive study of multiple features and models for recognizing gender and age. Our best system, using i-vector features, achieved an F1-score of 0.9829 for gender recognition task using logistic regression, and the lowest mean absolute error (MAE) in age regression, 9.443 years, is obtained with ridge regression. This indicates challenge in age estimation from in-the-wild style speech data.", "year": 2021, "venue": "Automatic Speech Recognition & Understanding", "authors": [ "Khaled Hechmi", "Trung Ngo Trong", "Ville Hautamäki", "T. Kinnunen" ], "externalIds": { "DBLP": "journals/corr/abs-2109-13510", "ArXiv": "2109.13510", "DOI": "10.1109/ASRU51503.2021.9688085", "CorpusId": 238198433 }, "url": "https://www.semanticscholar.org/paper/f16f501a7926d99ebc748e07c315f8c3fcfaab5f", "referenceCount": 28, "citationCount": 22, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Learning to Diversify for Single Domain Generalization", "abstract": "Domain generalization (DG) aims to generalize a model trained on multiple source (i.e., training) domains to a distributionally different target (i.e., test) domain. In contrast to the conventional DG that strictly requires the availability of multiple source domains, this paper considers a more realistic yet challenging scenario, namely Single Domain Generalization (Single-DG), where only one source domain is available for training. In this scenario, the limited diversity may jeopardize the model generalization on unseen target domains. To tackle this problem, we propose a style-complement module to enhance the generalization power of the model by synthesizing images from diverse distributions that are complementary to the source ones. More specifically, we adopt a tractable upper bound of mutual information (MI) between the generated and source samples and perform a two-step optimization iteratively: (1) by minimizing the MI upper bound approximation for each sample pair, the generated images are forced to be diversified from the source samples; (2) subsequently, we maximize the MI between the samples from the same semantic category, which assists the network to learn discriminative features from diverse-styled images. Extensive experiments on three benchmark datasets demonstrate the superiority of our approach, which surpasses the state-of-the-art single-DG methods by up to 25.14%. The code will be publicly available at https://github.com/BUserName/Learning_to_diversify", "year": 2021, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Zijian Wang", "Yadan Luo", "Ruihong Qiu", "Zi Huang", "Mahsa Baktash" ], "externalIds": { "DBLP": "journals/corr/abs-2108-11726", "ArXiv": "2108.11726", "DOI": "10.1109/ICCV48922.2021.00087", "CorpusId": 237303847 }, "url": "https://www.semanticscholar.org/paper/1109f18a41ad21635ae30952be89f56f52295f39", "referenceCount": 51, "citationCount": 200, "influentialCitationCount": 28, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Age-VOX-Celeb: Multi-Modal Corpus for Facial and Speech Estimation", "abstract": "Estimating a speaker’s age from their speech is more challenging than age estimation from their face because of insufficiently available public corpora. To tackle this problem, we construct a new audio-visual age corpus named AgeVoxCeleb by annotating age labels to VoxCeleb2 videos. AgeVoxCeleb is the first large-scale, balanced, and multi-modal age corpus that contains both video and speech of the same speakers from a wide age range. Using AgeVox-Celeb, our paper makes the following contributions: (i) A facial age estimation model can outperform a speech age estimation model by comparing the state-of-the-art models in each task. (ii) Facial age estimation is more robust against the difference between training and test sets. (iii) We developed cross-modal transfer learning from face to speech age estimation, showing that the estimated age with a facial age estimation model can be used to train a speech age estimation model. Proposed AgeVoxCeleb will be published in https://github.com/nttcslab-sp/agevoxceleb.", "year": 2021, "venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", "authors": [ "Naohiro Tawara", "A. Ogawa", "Yuki Kitagishi", "Hosana Kamiyama" ], "externalIds": { "MAG": "3160812582", "DBLP": "conf/icassp/TawaraOKK21", "DOI": "10.1109/ICASSP39728.2021.9414272", "CorpusId": 235780050 }, "url": "https://www.semanticscholar.org/paper/76fc630d88bc193c39fdce613390f9de0c755de5", "referenceCount": 31, "citationCount": 20, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "CLUB: A Contrastive Log-ratio Upper Bound of Mutual Information", "abstract": "Mutual information (MI) minimization has gained considerable interests in various machine learning tasks. However, estimating and minimizing MI in high-dimensional spaces remains a challenging problem, especially when only samples, rather than distribution forms, are accessible. Previous works mainly focus on MI lower bound approximation, which is not applicable to MI minimization problems. In this paper, we propose a novel Contrastive Log-ratio Upper Bound (CLUB) of mutual information. We provide a theoretical analysis of the properties of CLUB and its variational approximation. Based on this upper bound, we introduce an accelerated MI minimization training scheme, which bridges MI minimization with negative sampling. Simulation studies on Gaussian distributions show the reliable estimation ability of CLUB. Real-world MI minimization experiments, including domain adaptation and information bottleneck, further demonstrate the effectiveness of the proposed method.", "year": 2020, "venue": "International Conference on Machine Learning", "authors": [ "Pengyu Cheng", "Weituo Hao", "Shuyang Dai", "Jiachang Liu", "Zhe Gan", "L. Carin" ], "externalIds": { "DBLP": "conf/icml/ChengHDLGC20", "ArXiv": "2006.12013", "MAG": "3036928441", "CorpusId": 219966650 }, "url": "https://www.semanticscholar.org/paper/41382835ae60fb3280ea9a5b3004a236af1eb01b", "referenceCount": 48, "citationCount": 263, "influentialCitationCount": 52, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "The INTERSPEECH 2020 Far-Field Speaker Verification Challenge", "abstract": "The INTERSPEECH 2020 Far-Field Speaker Verification Challenge (FFSVC 2020) addresses three different research problems under well-defined conditions: far-field text-dependent speaker verification from single microphone array, far-field text-independent speaker verification from single microphone array, and far-field text-dependent speaker verification from distributed microphone arrays. All three tasks pose a cross-channel challenge to the participants. To simulate the real-life scenario, the enrollment utterances are recorded from close-talk cellphone, while the test utterances are recorded from the far-field microphone arrays. In this paper, we describe the database, the challenge, and the baseline system, which is based on a ResNet-based deep speaker network with cosine similarity scoring. For a given utterance, the speaker embeddings of different channels are equally averaged as the final embedding. The baseline system achieves minDCFs of 0.62, 0.66, and 0.64 and EERs of 6.27%, 6.55%, and 7.18% for task 1, task 2, and task 3, respectively.", "year": 2020, "venue": "Interspeech", "authors": [ "Xiaoyi Qin", "Ming Li", "Hui Bu", "Wei Rao", "Rohan Kumar Das", "Shrikanth S. Narayanan", "Haizhou Li" ], "externalIds": { "MAG": "3097224504", "ArXiv": "2005.08046", "DBLP": "journals/corr/abs-2005-08046", "DOI": "10.21437/interspeech.2020-1249", "CorpusId": 218673686 }, "url": "https://www.semanticscholar.org/paper/5b894732f03cfb0b13364de743aac2ee7ed9abda", "referenceCount": 34, "citationCount": 46, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "ECAPA-TDNN: Emphasized Channel Attention, Propagation and Aggregation in TDNN Based Speaker Verification", "abstract": "Current speaker verification techniques rely on a neural network to extract speaker representations. The successful x-vector architecture is a Time Delay Neural Network (TDNN) that applies statistics pooling to project variable-length utterances into fixed-length speaker characterizing embeddings. In this paper, we propose multiple enhancements to this architecture based on recent trends in the related fields of face verification and computer vision. Firstly, the initial frame layers can be restructured into 1-dimensional Res2Net modules with impactful skip connections. Similarly to SE-ResNet, we introduce Squeeze-and-Excitation blocks in these modules to explicitly model channel interdependencies. The SE block expands the temporal context of the frame layer by rescaling the channels according to global properties of the recording. Secondly, neural networks are known to learn hierarchical features, with each layer operating on a different level of complexity. To leverage this complementary information, we aggregate and propagate features of different hierarchical levels. Finally, we improve the statistics pooling module with channel-dependent frame attention. This enables the network to focus on different subsets of frames during each of the channel's statistics estimation. The proposed ECAPA-TDNN architecture significantly outperforms state-of-the-art TDNN based systems on the VoxCeleb test sets and the 2019 VoxCeleb Speaker Recognition Challenge.", "year": 2020, "venue": "Interspeech", "authors": [ "Brecht Desplanques", "Jenthe Thienpondt", "Kris Demuynck" ], "externalIds": { "ArXiv": "2005.07143", "DBLP": "conf/interspeech/DesplanquesTD20", "MAG": "3024869864", "DOI": "10.21437/INTERSPEECH.2020-2650", "CorpusId": 218630075 }, "url": "https://www.semanticscholar.org/paper/9609f4817a7e769f5e3e07084db35e46696e82cd", "referenceCount": 31, "citationCount": 1042, "influentialCitationCount": 180, "isOpenAccess": true, "fieldsOfStudy": [ "Engineering", "Computer Science" ] }, { "title": "On the limits of automatic speaker verification: Explaining degraded recognizer scores through acoustic changes resulting from voice disguise.", "abstract": "In speaker verification research, objective performance benchmarking of listeners and automatic speaker verification (ASV) systems are of key importance in understanding the limits of speaker recognition. While the adoption of common data and metrics has been instrumental to progress in ASV, there are two major shortcomings. First, the utterances lack intentional voice changes imposed by the speaker. Second, the standard evaluation metrics focus on average performance across all speakers and trials. As a result, a knowledge gap remains in how the acoustic changes impact recognition performance at the level of individual speakers. This paper addresses the limits of speaker recognition in ASV systems under voice disguise using a linear mixed effects model to analyze the impact of change in long-term statistics of selected features (formants F1-F4, the bandwidths B1-B4, F0, and speaking rate) to ASV log-likelihood ratio (LLR) score. The correlations between the proposed predictive model and the LLR scores are 0.72 for females and 0.81 for male speakers. As a whole, the difference in long-term F0 between enrollment and test utterances was found to be the individually most detrimental factor, even if the ASV system uses only spectral, rather than prosodic, features.", "year": 2019, "venue": "Journal of the Acoustical Society of America", "authors": [ "Rosa González Hautamäki", "Ville Hautamäki", "T. Kinnunen" ], "externalIds": { "MAG": "2964959870", "DOI": "10.1121/1.5119240", "CorpusId": 199383022, "PubMed": "31370618" }, "url": "https://www.semanticscholar.org/paper/7cd6a2c7123cdc06e67426c1e21202119634d6a3", "referenceCount": 55, "citationCount": 14, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "VoxCeleb2: Deep Speaker Recognition", "abstract": "The objective of this paper is speaker recognition under noisy and unconstrained conditions. \nWe make two key contributions. First, we introduce a very large-scale audio-visual speaker recognition dataset collected from open-source media. Using a fully automated pipeline, we curate VoxCeleb2 which contains over a million utterances from over 6,000 speakers. This is several times larger than any publicly available speaker recognition dataset. \nSecond, we develop and compare Convolutional Neural Network (CNN) models and training strategies that can effectively recognise identities from voice under various conditions. The models trained on the VoxCeleb2 dataset surpass the performance of previous works on a benchmark dataset by a significant margin.", "year": 2018, "venue": "Interspeech", "authors": [ "Joon Son Chung", "Arsha Nagrani", "Andrew Zisserman" ], "externalIds": { "MAG": "2950872062", "DBLP": "journals/corr/abs-1806-05622", "ArXiv": "1806.05622", "DOI": "10.21437/Interspeech.2018-1929", "CorpusId": 49211906 }, "url": "https://www.semanticscholar.org/paper/8875ae233bc074f5cd6c4ebba447b536a7e847a5", "referenceCount": 45, "citationCount": 2015, "influentialCitationCount": 313, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "X-Vectors: Robust DNN Embeddings for Speaker Recognition", "abstract": "In this paper, we use data augmentation to improve performance of deep neural network (DNN) embeddings for speaker recognition. The DNN, which is trained to discriminate between speakers, maps variable-length utterances to fixed-dimensional embeddings that we call x-vectors. Prior studies have found that embeddings leverage large-scale training datasets better than i-vectors. However, it can be challenging to collect substantial quantities of labeled data for training. We use data augmentation, consisting of added noise and reverberation, as an inexpensive method to multiply the amount of training data and improve robustness. The x-vectors are compared with i-vector baselines on Speakers in the Wild and NIST SRE 2016 Cantonese. We find that while augmentation is beneficial in the PLDA classifier, it is not helpful in the i-vector extractor. However, the x-vector DNN effectively exploits data augmentation, due to its supervised training. As a result, the x-vectors achieve superior performance on the evaluation datasets.", "year": 2018, "venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", "authors": [ "David Snyder", "D. Garcia-Romero", "Gregory Sell", "Daniel Povey", "S. Khudanpur" ], "externalIds": { "DBLP": "conf/icassp/SnyderGSPK18", "MAG": "2890964092", "DOI": "10.1109/ICASSP.2018.8461375", "CorpusId": 46954166 }, "url": "https://www.semanticscholar.org/paper/389cd9824428be98a710f5f4de67121a70c15fd3", "referenceCount": 30, "citationCount": 2444, "influentialCitationCount": 299, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ArcFace: Additive Angular Margin Loss for Deep Face Recognition", "abstract": "One of the main challenges in feature learning using Deep Convolutional Neural Networks (DCNNs) for large-scale face recognition is the design of appropriate loss functions that can enhance the discriminative power. Centre loss penalises the distance between deep features and their corresponding class centres in the Euclidean space to achieve intra-class compactness. SphereFace assumes that the linear transformation matrix in the last fully connected layer can be used as a representation of the class centres in the angular space and therefore penalises the angles between deep features and their corresponding weights in a multiplicative way. Recently, a popular line of research is to incorporate margins in well-established loss functions in order to maximise face class separability. In this paper, we propose an Additive Angular Margin Loss (ArcFace) to obtain highly discriminative features for face recognition. The proposed ArcFace has a clear geometric interpretation due to its exact correspondence to geodesic distance on a hypersphere. We present arguably the most extensive experimental evaluation against all recent state-of-the-art face recognition methods on ten face recognition benchmarks which includes a new large-scale image database with trillions of pairs and a large-scale video dataset. We show that ArcFace consistently outperforms the state of the art and can be easily implemented with negligible computational overhead. To facilitate future research, the code has been made available.", "year": 2018, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Jiankang Deng", "J. Guo", "S. Zafeiriou" ], "externalIds": { "DBLP": "journals/corr/abs-1801-07698", "MAG": "2969985801", "DOI": "10.1109/CVPR.2019.00482", "CorpusId": 8923541 }, "url": "https://www.semanticscholar.org/paper/ca235ce0decdb4f80024a429a20ae4437ceae09e", "referenceCount": 66, "citationCount": 5365, "influentialCitationCount": 1281, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "VoxCeleb: A Large-Scale Speaker Identification Dataset", "abstract": "Most existing datasets for speaker identification contain samples obtained under quite constrained conditions, and are usually hand-annotated, hence limited in size. The goal of this paper is to generate a large scale text-independent speaker identification dataset collected 'in the wild'. We make two contributions. First, we propose a fully automated pipeline based on computer vision techniques to create the dataset from open-source media. Our pipeline involves obtaining videos from YouTube; performing active speaker verification using a two-stream synchronization Convolutional Neural Network (CNN), and confirming the identity of the speaker using CNN based facial recognition. We use this pipeline to curate VoxCeleb which contains hundreds of thousands of 'real world' utterances for over 1,000 celebrities. Our second contribution is to apply and compare various state of the art speaker identification techniques on our dataset to establish baseline performance. We show that a CNN based architecture obtains the best performance for both identification and verification.", "year": 2017, "venue": "Interspeech", "authors": [ "Arsha Nagrani", "Joon Son Chung", "Andrew Zisserman" ], "externalIds": { "MAG": "2950102398", "DBLP": "conf/interspeech/NagraniCZ17", "ArXiv": "1706.08612", "DOI": "10.21437/Interspeech.2017-950", "CorpusId": 10475843 }, "url": "https://www.semanticscholar.org/paper/8a26431833b0ea8659ef1d24bff3ac9e56dcfcd0", "referenceCount": 42, "citationCount": 2053, "influentialCitationCount": 348, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A study on data augmentation of reverberant speech for robust speech recognition", "abstract": "The environmental robustness of DNN-based acoustic models can be significantly improved by using multi-condition training data. However, as data collection is a costly proposition, simulation of the desired conditions is a frequently adopted strategy. In this paper we detail a data augmentation approach for far-field ASR. We examine the impact of using simulated room impulse responses (RIRs), as real RIRs can be difficult to acquire, and also the effect of adding point-source noises. We find that the performance gap between using simulated and real RIRs can be eliminated when point-source noises are added. Further we show that the trained acoustic models not only perform well in the distant-talking scenario but also provide better results in the close-talking scenario. We evaluate our approach on several LVCSR tasks which can adequately represent both scenarios.", "year": 2017, "venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", "authors": [ "Tom Ko", "Vijayaditya Peddinti", "Daniel Povey", "M. Seltzer", "S. Khudanpur" ], "externalIds": { "MAG": "2696967604", "DBLP": "conf/icassp/KoPPSK17", "DOI": "10.1109/ICASSP.2017.7953152", "CorpusId": 23138179 }, "url": "https://www.semanticscholar.org/paper/5005a3295dc2c931526438dd6d3f8fae8e34b641", "referenceCount": 23, "citationCount": 870, "influentialCitationCount": 53, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep Residual Learning for Image Recognition", "abstract": "Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. We provide comprehensive empirical evidence showing that these residual networks are easier to optimize, and can gain accuracy from considerably increased depth. On the ImageNet dataset we evaluate residual nets with a depth of up to 152 layers - 8× deeper than VGG nets [40] but still having lower complexity. An ensemble of these residual nets achieves 3.57% error on the ImageNet test set. This result won the 1st place on the ILSVRC 2015 classification task. We also present analysis on CIFAR-10 with 100 and 1000 layers. The depth of representations is of central importance for many visual recognition tasks. Solely due to our extremely deep representations, we obtain a 28% relative improvement on the COCO object detection dataset. Deep residual nets are foundations of our submissions to ILSVRC & COCO 2015 competitions1, where we also won the 1st places on the tasks of ImageNet detection, ImageNet localization, COCO detection, and COCO segmentation.", "year": 2015, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Kaiming He", "X. Zhang", "Shaoqing Ren", "Jian Sun" ], "externalIds": { "DBLP": "conf/cvpr/HeZRS16", "MAG": "2949650786", "ArXiv": "1512.03385", "DOI": "10.1109/cvpr.2016.90", "CorpusId": 206594692 }, "url": "https://www.semanticscholar.org/paper/2c03df8b48bf3fa39054345bafabfeff15bfd11d", "referenceCount": 53, "citationCount": 172713, "influentialCitationCount": 28229, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MUSAN: A Music, Speech, and Noise Corpus", "abstract": "This report introduces a new corpus of music, speech, and noise. This dataset is suitable for training models for voice activity detection (VAD) and music/speech discrimination. Our corpus is released under a flexible Creative Commons license. The dataset consists of music from several genres, speech from twelve languages, and a wide assortment of technical and non-technical noises. We demonstrate use of this corpus for music/speech discrimination on Broadcast news and VAD for speaker identification.", "year": 2015, "venue": "arXiv.org", "authors": [ "David Snyder", "Guoguo Chen", "Daniel Povey" ], "externalIds": { "ArXiv": "1510.08484", "DBLP": "journals/corr/SnyderCP15", "MAG": "2219249508", "CorpusId": 15676318 }, "url": "https://www.semanticscholar.org/paper/32d21dc13f8770958b196a96f99a6f3959c7dc0f", "referenceCount": 9, "citationCount": 1199, "influentialCitationCount": 111, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Adam: A Method for Stochastic Optimization", "abstract": "We introduce Adam, an algorithm for first-order gradient-based optimization of stochastic objective functions, based on adaptive estimates of lower-order moments. The method is straightforward to implement, is computationally efficient, has little memory requirements, is invariant to diagonal rescaling of the gradients, and is well suited for problems that are large in terms of data and/or parameters. The method is also appropriate for non-stationary objectives and problems with very noisy and/or sparse gradients. The hyper-parameters have intuitive interpretations and typically require little tuning. Some connections to related algorithms, on which Adam was inspired, are discussed. We also analyze the theoretical convergence properties of the algorithm and provide a regret bound on the convergence rate that is comparable to the best known results under the online convex optimization framework. Empirical results demonstrate that Adam works well in practice and compares favorably to other stochastic optimization methods. Finally, we discuss AdaMax, a variant of Adam based on the infinity norm.", "year": 2014, "venue": "International Conference on Learning Representations", "authors": [ "Diederik P. Kingma", "Jimmy Ba" ], "externalIds": { "MAG": "2964121744", "DBLP": "journals/corr/KingmaB14", "ArXiv": "1412.6980", "CorpusId": 6628106 }, "url": "https://www.semanticscholar.org/paper/a6cb366736791bcccc5c8639de5a8f9636bf87e8", "referenceCount": 26, "citationCount": 139990, "influentialCitationCount": 22063, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Unsupervised Domain Adaptation by Backpropagation", "abstract": "Top-performing deep architectures are trained on massive amounts of labeled data. In the absence of labeled data for a certain task, domain adaptation often provides an attractive option given that labeled data of similar nature but from a different domain (e.g. synthetic images) are available. Here, we propose a new approach to domain adaptation in deep architectures that can be trained on large amount of labeled data from the source domain and large amount of unlabeled data from the target domain (no labeled target-domain data is necessary). \nAs the training progresses, the approach promotes the emergence of \"deep\" features that are (i) discriminative for the main learning task on the source domain and (ii) invariant with respect to the shift between the domains. We show that this adaptation behaviour can be achieved in almost any feed-forward model by augmenting it with few standard layers and a simple new gradient reversal layer. The resulting augmented architecture can be trained using standard backpropagation. \nOverall, the approach can be implemented with little effort using any of the deep-learning packages. The method performs very well in a series of image classification experiments, achieving adaptation effect in the presence of big domain shifts and outperforming previous state-of-the-art on Office datasets.", "year": 2014, "venue": "International Conference on Machine Learning", "authors": [ "Yaroslav Ganin", "V. Lempitsky" ], "externalIds": { "ArXiv": "1409.7495", "MAG": "2951688345", "DBLP": "conf/icml/GaninL15", "CorpusId": 6755881 }, "url": "https://www.semanticscholar.org/paper/2530cfc7764bda1330c48c0c8e2cd0e0c671d7e1", "referenceCount": 44, "citationCount": 5414, "influentialCitationCount": 1017, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Vocal aging effects on F0 and the first formant: A longitudinal analysis in adult speakers", "abstract": null, "year": 2010, "venue": "Speech Communication", "authors": [ "Ulrich Reubold", "J. Harrington", "Felicitas Kleber" ], "externalIds": { "MAG": "2166113160", "DBLP": "journals/speech/ReuboldHK10", "DOI": "10.1016/j.specom.2010.02.012", "CorpusId": 13235138 }, "url": "https://www.semanticscholar.org/paper/c44e459cc56c9a39309ab7a75401bd3b01d3fede", "referenceCount": 78, "citationCount": 120, "influentialCitationCount": 13, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "GENERATIVE ADVERSARIAL NETS", "abstract": "Estimating individualized treatment effects (ITE) is a challenging task due to the need for an individual’s potential outcomes to be learned from biased data and without having access to the counterfactuals. We propose a novel method for inferring ITE based on the Generative Adversarial Nets (GANs) framework. Our method, termed Generative Adversarial Nets for inference of Individualized Treatment Effects (GANITE), is motivated by the possibility that we can capture the uncertainty in the counterfactual distributions by attempting to learn them using a GAN. We generate proxies of the counterfactual outcomes using a counterfactual generator, G, and then pass these proxies to an ITE generator, I, in order to train it. By modeling both of these using the GAN framework, we are able to infer based on the factual data, while still accounting for the unseen counterfactuals. We test our method on three real-world datasets (with both binary and multiple treatments) and show that GANITE outperforms state-of-the-art methods.", "year": 2018, "venue": "", "authors": [ "Individualized Treat", "Jinsung Yoon" ], "externalIds": { "CorpusId": 10319744 }, "url": "https://www.semanticscholar.org/paper/c68796f833a7151f0a63d1d1608dc902b4fdc9b6", "referenceCount": 24, "citationCount": 28002, "influentialCitationCount": 3321, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Aging Voice", "abstract": null, "year": 2017, "venue": "Springer Singapore", "authors": [ "K. Makiyama", "S. Hirano" ], "externalIds": { "DOI": "10.1007/978-981-10-3698-9", "CorpusId": 19431290 }, "url": "https://www.semanticscholar.org/paper/52e2a258a5a165ac9856a697dbc926aef53ffe28", "referenceCount": 66, "citationCount": 72, "influentialCitationCount": 6, "isOpenAccess": false, "fieldsOfStudy": null } ] }, "Mitigating Semantic Leakage in Cross-lingual Embeddings via Orthogonality Constraint": { "paper_title": "Mitigating Semantic Leakage in Cross-lingual Embeddings via Orthogonality Constraint", "arxiv_id": "2409.15664v1", "keyword": "representation learning", "authors": [ "Dayeon Ki", "Cheonbok Park", "Hyunjoong Kim" ], "references": [ { "title": "English Contrastive Learning Can Learn Universal Cross-lingual Sentence Embeddings", "abstract": "Universal cross-lingual sentence embeddings map semantically similar cross-lingual sentences into a shared embedding space. Aligning cross-lingual sentence embeddings usually requires supervised cross-lingual parallel sentences. In this work, we propose mSimCSE, which extends SimCSE to multilingual settings and reveal that contrastive learning on English data can surprisingly learn high-quality universal cross-lingual sentence embeddings without any parallel data.In unsupervised and weakly supervised settings, mSimCSE significantly improves previous sentence embedding methods on cross-lingual retrieval and multilingual STS tasks. The performance of unsupervised mSimCSE is comparable to fully supervised methods in retrieving low-resource languages and multilingual STS.The performance can be further enhanced when cross-lingual NLI data is available.", "year": 2022, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Yau-Shian Wang", "Ashley Wu", "Graham Neubig" ], "externalIds": { "DBLP": "conf/emnlp/WangWN22", "ArXiv": "2211.06127", "ACL": "2022.emnlp-main.621", "DOI": "10.48550/arXiv.2211.06127", "CorpusId": 253499206 }, "url": "https://www.semanticscholar.org/paper/1fec9a1c1d28228b1d23874c74de7315928dea6f", "referenceCount": 49, "citationCount": 22, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "LAWDR: Language-Agnostic Weighted Document Representations from Pre-trained Models", "abstract": "Cross-lingual document representations enable language understanding in multilingual contexts and allow transfer learning from high-resource to low-resource languages at the document level. Recently large pre-trained language models such as BERT, XLM and XLM-RoBERTa have achieved great success when fine-tuned on sentence-level downstream tasks. It is tempting to apply these cross-lingual models to document representation learning. However, there are two challenges: (1) these models impose high costs on long document processing and thus many of them have strict length limit; (2) model fine-tuning requires extra data and computational resources, which is not practical in resource-limited settings. In this work, we address these challenges by proposing unsupervised Language-Agnostic Weighted Document Representations (LAWDR). We study the geometry of pre-trained sentence embeddings and leverage it to derive document representations without fine-tuning. Evaluated on cross-lingual document alignment, LAWDR demonstrates comparable performance to state-of-the-art models on benchmark datasets.", "year": 2021, "venue": "arXiv.org", "authors": [ "Hongyu Gong", "Vishrav Chaudhary", "Yuqing Tang", "Francisco Guzmán" ], "externalIds": { "DBLP": "journals/corr/abs-2106-03379", "ArXiv": "2106.03379", "CorpusId": 235358862 }, "url": "https://www.semanticscholar.org/paper/d5c150e69783c5f1ab3b66dd15a1987d6f1b377c", "referenceCount": 30, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Inducing Language-Agnostic Multilingual Representations", "abstract": "Cross-lingual representations have the potential to make NLP techniques available to the vast majority of languages in the world. However, they currently require large pretraining corpora or access to typologically similar languages. In this work, we address these obstacles by removing language identity signals from multilingual embeddings. We examine three approaches for this: (i) re-aligning the vector spaces of target languages (all together) to a pivot source language; (ii) removing language-specific means and variances, which yields better discriminativeness of embeddings as a by-product; and (iii) increasing input similarity across languages by removing morphological contractions and sentence reordering. We evaluate on XNLI and reference-free MT evaluation across 19 typologically diverse languages. Our findings expose the limitations of these approaches—unlike vector normalization, vector space re-alignment and text normalization do not achieve consistent gains across encoders and languages. Due to the approaches’ additive effects, their combination decreases the cross-lingual transfer gap by 8.9 points (m-BERT) and 18.2 points (XLM-R) on average across all tasks and languages, however.", "year": 2020, "venue": "STARSEM", "authors": [ "Wei Zhao", "Steffen Eger", "Johannes Bjerva", "Isabelle Augenstein" ], "externalIds": { "DBLP": "journals/corr/abs-2008-09112", "ACL": "2021.starsem-1.22", "MAG": "3051275803", "ArXiv": "2008.09112", "DOI": "10.18653/v1/2021.starsem-1.22", "CorpusId": 221186888 }, "url": "https://www.semanticscholar.org/paper/27ef20774bde3d529df93468823e3c09e79f8294", "referenceCount": 47, "citationCount": 57, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "InfoXLM: An Information-Theoretic Framework for Cross-Lingual Language Model Pre-Training", "abstract": "In this work, we present an information-theoretic framework that formulates cross-lingual language model pre-training as maximizing mutual information between multilingual-multi-granularity texts. The unified view helps us to better understand the existing methods for learning cross-lingual representations. More importantly, inspired by the framework, we propose a new pre-training task based on contrastive learning. Specifically, we regard a bilingual sentence pair as two views of the same meaning and encourage their encoded representations to be more similar than the negative examples. By leveraging both monolingual and parallel corpora, we jointly train the pretext tasks to improve the cross-lingual transferability of pre-trained models. Experimental results on several benchmarks show that our approach achieves considerably better performance. The code and pre-trained models are available at https://aka.ms/infoxlm.", "year": 2020, "venue": "North American Chapter of the Association for Computational Linguistics", "authors": [ "Zewen Chi", "Li Dong", "Furu Wei", "Nan Yang", "Saksham Singhal", "Wenhui Wang", "Xia Song", "Xian-Ling Mao", "Heyan Huang", "M. Zhou" ], "externalIds": { "MAG": "3042711927", "ACL": "2021.naacl-main.280", "DBLP": "conf/naacl/ChiDWYSWSMHZ21", "ArXiv": "2007.07834", "DOI": "10.18653/V1/2021.NAACL-MAIN.280", "CorpusId": 220525491 }, "url": "https://www.semanticscholar.org/paper/4ceff7472c04ee6d76bce89d61ba4b445d8dbf74", "referenceCount": 49, "citationCount": 318, "influentialCitationCount": 52, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Language-agnostic BERT Sentence Embedding", "abstract": "While BERT is an effective method for learning monolingual sentence embeddings for semantic similarity and embedding based transfer learning BERT based cross-lingual sentence embeddings have yet to be explored. We systematically investigate methods for learning multilingual sentence embeddings by combining the best methods for learning monolingual and cross-lingual representations including: masked language modeling (MLM), translation language modeling (TLM), dual encoder translation ranking, and additive margin softmax. We show that introducing a pre-trained multilingual language model dramatically reduces the amount of parallel training data required to achieve good performance by 80%. Composing the best of these methods produces a model that achieves 83.7% bi-text retrieval accuracy over 112 languages on Tatoeba, well above the 65.5% achieved by LASER, while still performing competitively on monolingual transfer learning benchmarks. Parallel data mined from CommonCrawl using our best model is shown to train competitive NMT models for en-zh and en-de. We publicly release our best multilingual sentence embedding model for 109+ languages at https://tfhub.dev/google/LaBSE.", "year": 2020, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Fangxiaoyu Feng", "Yinfei Yang", "Daniel Matthew Cer", "N. Arivazhagan", "Wei Wang" ], "externalIds": { "MAG": "3039695075", "DBLP": "journals/corr/abs-2007-01852", "ArXiv": "2007.01852", "ACL": "2022.acl-long.62", "DOI": "10.18653/v1/2022.acl-long.62", "CorpusId": 220347683 }, "url": "https://www.semanticscholar.org/paper/b896b846ae180d804c7290d8b9ae9ffc55325866", "referenceCount": 51, "citationCount": 728, "influentialCitationCount": 156, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Cross-lingual Retrieval for Iterative Self-Supervised Training", "abstract": "Recent studies have demonstrated the cross-lingual alignment ability of multilingual pretrained language models. In this work, we found that the cross-lingual alignment can be further improved by training seq2seq models on sentence pairs mined using their own encoder outputs. We utilized these findings to develop a new approach -- cross-lingual retrieval for iterative self-supervised training (CRISS), where mining and training processes are applied iteratively, improving cross-lingual alignment and translation ability at the same time. Using this method, we achieved state-of-the-art unsupervised machine translation results on 9 language directions with an average improvement of 2.4 BLEU, and on the Tatoeba sentence retrieval task in the XTREME benchmark on 16 languages with an average improvement of 21.5% in absolute accuracy. Furthermore, CRISS also brings an additional 1.8 BLEU improvement on average compared to mBART, when finetuned on supervised machine translation downstream tasks.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "C. Tran", "Y. Tang", "Xian Li", "Jiatao Gu" ], "externalIds": { "MAG": "3105378761", "DBLP": "conf/nips/TranTLG20", "ArXiv": "2006.09526", "CorpusId": 219721183 }, "url": "https://www.semanticscholar.org/paper/adf6cefb3ec88e9ffad24f5af24fe51cb421e83b", "referenceCount": 59, "citationCount": 71, "influentialCitationCount": 17, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "On the Language Neutrality of Pre-trained Multilingual Representations", "abstract": "Multilingual contextual embeddings, such as multilingual BERT and XLM-RoBERTa, have proved useful for many multi-lingual tasks. Previous work probed the cross-linguality of the representations indirectly using zero-shot transfer learning on morphological and syntactic tasks. We instead investigate the language-neutrality of multilingual contextual embeddings directly and with respect to lexical semantics. Our results show that contextual embeddings are more language-neutral and, in general, more informative than aligned static word-type embeddings, which are explicitly trained for language neutrality. Contextual embeddings are still only moderately language-neutral by default, so we propose two simple methods for achieving stronger language neutrality: first, by unsupervised centering of the representation for each language and second, by fitting an explicit projection on small parallel data. Besides, we show how to reach state-of-the-art accuracy on language identification and match the performance of statistical methods for word alignment of parallel sentences without using parallel data.", "year": 2020, "venue": "Findings", "authors": [ "Jindřich Libovický", "Rudolf Rosa", "Alexander M. Fraser" ], "externalIds": { "MAG": "3103490574", "DBLP": "conf/emnlp/LibovickyRF20", "ACL": "2020.findings-emnlp.150", "ArXiv": "2004.05160", "DOI": "10.18653/v1/2020.findings-emnlp.150", "CorpusId": 215745575 }, "url": "https://www.semanticscholar.org/paper/75a35576efee34622254f265e4cbeb5e01eea7a1", "referenceCount": 45, "citationCount": 87, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization", "abstract": "Much recent progress in applications of machine learning models to NLP has been driven by benchmarks that evaluate models across a wide variety of tasks. However, these broad-coverage benchmarks have been mostly limited to English, and despite an increasing interest in multilingual models, a benchmark that enables the comprehensive evaluation of such methods on a diverse range of languages and tasks is still missing. To this end, we introduce the Cross-lingual TRansfer Evaluation of Multilingual Encoders XTREME benchmark, a multi-task benchmark for evaluating the cross-lingual generalization capabilities of multilingual representations across 40 languages and 9 tasks. We demonstrate that while models tested on English reach human performance on many tasks, there is still a sizable gap in the performance of cross-lingually transferred models, particularly on syntactic and sentence retrieval tasks. There is also a wide spread of results across languages. We release the benchmark to encourage research on cross-lingual learning methods that transfer linguistic knowledge across a diverse and representative set of languages and tasks.", "year": 2020, "venue": "International Conference on Machine Learning", "authors": [ "Junjie Hu", "Sebastian Ruder", "Aditya Siddhant", "Graham Neubig", "Orhan Firat", "Melvin Johnson" ], "externalIds": { "MAG": "3035579820", "DBLP": "conf/icml/HuRSNFJ20", "ArXiv": "2003.11080", "CorpusId": 214641214 }, "url": "https://www.semanticscholar.org/paper/ba4a34680e09e77984624c95f5245d91b54373f6", "referenceCount": 62, "citationCount": 872, "influentialCitationCount": 170, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Unsupervised Cross-lingual Representation Learning at Scale", "abstract": "This paper shows that pretraining multilingual language models at scale leads to significant performance gains for a wide range of cross-lingual transfer tasks. We train a Transformer-based masked language model on one hundred languages, using more than two terabytes of filtered CommonCrawl data. Our model, dubbed XLM-R, significantly outperforms multilingual BERT (mBERT) on a variety of cross-lingual benchmarks, including +14.6% average accuracy on XNLI, +13% average F1 score on MLQA, and +2.4% F1 score on NER. XLM-R performs particularly well on low-resource languages, improving 15.7% in XNLI accuracy for Swahili and 11.4% for Urdu over previous XLM models. We also present a detailed empirical analysis of the key factors that are required to achieve these gains, including the trade-offs between (1) positive transfer and capacity dilution and (2) the performance of high and low resource languages at scale. Finally, we show, for the first time, the possibility of multilingual modeling without sacrificing per-language performance; XLM-R is very competitive with strong monolingual models on the GLUE and XNLI benchmarks. We will make our code and models publicly available.", "year": 2019, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Alexis Conneau", "Kartikay Khandelwal", "Naman Goyal", "Vishrav Chaudhary", "Guillaume Wenzek", "Francisco Guzmán", "Edouard Grave", "Myle Ott", "Luke Zettlemoyer", "Veselin Stoyanov" ], "externalIds": { "MAG": "2983040767", "ArXiv": "1911.02116", "ACL": "2020.acl-main.747", "DBLP": "conf/acl/ConneauKGCWGGOZ20", "DOI": "10.18653/v1/2020.acl-main.747", "CorpusId": 207880568 }, "url": "https://www.semanticscholar.org/paper/6fec3e579c7cd4f13bdabbee2b6ac2e8ff5941c6", "referenceCount": 42, "citationCount": 5499, "influentialCitationCount": 1305, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "How Multilingual is Multilingual BERT?", "abstract": "In this paper, we show that Multilingual BERT (M-BERT), released by Devlin et al. (2018) as a single language model pre-trained from monolingual corpora in 104 languages, is surprisingly good at zero-shot cross-lingual model transfer, in which task-specific annotations in one language are used to fine-tune the model for evaluation in another language. To understand why, we present a large number of probing experiments, showing that transfer is possible even to languages in different scripts, that transfer works best between typologically similar languages, that monolingual corpora can train models for code-switching, and that the model can find translation pairs. From these results, we can conclude that M-BERT does create multilingual representations, but that these representations exhibit systematic deficiencies affecting certain language pairs.", "year": 2019, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Telmo Pires", "Eva Schlinger", "Dan Garrette" ], "externalIds": { "DBLP": "journals/corr/abs-1906-01502", "ACL": "P19-1493", "MAG": "2952638691", "ArXiv": "1906.01502", "DOI": "10.18653/v1/P19-1493", "CorpusId": 174798142 }, "url": "https://www.semanticscholar.org/paper/809cc93921e4698bde891475254ad6dfba33d03b", "referenceCount": 19, "citationCount": 1248, "influentialCitationCount": 109, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond", "abstract": "Abstract We introduce an architecture to learn joint multilingual sentence representations for 93 languages, belonging to more than 30 different families and written in 28 different scripts. Our system uses a single BiLSTM encoder with a shared byte-pair encoding vocabulary for all languages, which is coupled with an auxiliary decoder and trained on publicly available parallel corpora. This enables us to learn a classifier on top of the resulting embeddings using English annotated data only, and transfer it to any of the 93 languages without any modification. Our experiments in cross-lingual natural language inference (XNLI data set), cross-lingual document classification (MLDoc data set), and parallel corpus mining (BUCC data set) show the effectiveness of our approach. We also introduce a new test set of aligned sentences in 112 languages, and show that our sentence embeddings obtain strong results in multilingual similarity search even for low- resource languages. Our implementation, the pre-trained encoder, and the multilingual test set are available at https://github.com/facebookresearch/LASER.", "year": 2018, "venue": "Transactions of the Association for Computational Linguistics", "authors": [ "Mikel Artetxe", "Holger Schwenk" ], "externalIds": { "MAG": "2973088264", "DBLP": "journals/tacl/ArtetxeS19", "ArXiv": "1812.10464", "DOI": "10.1162/tacl_a_00288", "CorpusId": 56895585 }, "url": "https://www.semanticscholar.org/paper/160563abbd75265b19afc8b4169bab9e1eb33d97", "referenceCount": 66, "citationCount": 909, "influentialCitationCount": 161, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Margin-based Parallel Corpus Mining with Multilingual Sentence Embeddings", "abstract": "Machine translation is highly sensitive to the size and quality of the training data, which has led to an increasing interest in collecting and filtering large parallel corpora. In this paper, we propose a new method for this task based on multilingual sentence embeddings. In contrast to previous approaches, which rely on nearest neighbor retrieval with a hard threshold over cosine similarity, our proposed method accounts for the scale inconsistencies of this measure, considering the margin between a given sentence pair and its closest candidates instead. Our experiments show large improvements over existing methods. We outperform the best published results on the BUCC mining task and the UN reconstruction task by more than 10 F1 and 30 precision points, respectively. Filtering the English-German ParaCrawl corpus with our approach, we obtain 31.2 BLEU points on newstest2014, an improvement of more than one point over the best official filtered version.", "year": 2018, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Mikel Artetxe", "Holger Schwenk" ], "externalIds": { "MAG": "2899337869", "ArXiv": "1811.01136", "ACL": "P19-1309", "DBLP": "conf/acl/ArtetxeS19", "DOI": "10.18653/v1/P19-1309", "CorpusId": 53217060 }, "url": "https://www.semanticscholar.org/paper/30b09a853ab72e53078f1feefe6de5a847a2b169", "referenceCount": 35, "citationCount": 186, "influentialCitationCount": 28, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MTNT: A Testbed for Machine Translation of Noisy Text", "abstract": "Noisy or non-standard input text can cause disastrous mistranslations in most modern Machine Translation (MT) systems, and there has been growing research interest in creating noise-robust MT systems. However, as of yet there are no publicly available parallel corpora of with naturally occurring noisy inputs and translations, and thus previous work has resorted to evaluating on synthetically created datasets. In this paper, we propose a benchmark dataset for Machine Translation of Noisy Text (MTNT), consisting of noisy comments on Reddit (www.reddit.com) and professionally sourced translations. We commissioned translations of English comments into French and Japanese, as well as French and Japanese comments into English, on the order of 7k-37k sentences per language pair. We qualitatively and quantitatively examine the types of noise included in this dataset, then demonstrate that existing MT models fail badly on a number of noise-related phenomena, even after performing adaptation on a small training set of in-domain data. This indicates that this dataset can provide an attractive testbed for methods tailored to handling noisy text in MT.", "year": 2018, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Paul Michel", "Graham Neubig" ], "externalIds": { "DBLP": "conf/emnlp/MichelN18", "MAG": "2889746679", "ArXiv": "1809.00388", "ACL": "D18-1050", "DOI": "10.18653/v1/D18-1050", "CorpusId": 52155427 }, "url": "https://www.semanticscholar.org/paper/ce89ee7aaeeea2c9d474707690f3ea9d948776a3", "referenceCount": 49, "citationCount": 141, "influentialCitationCount": 22, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Filtering and Mining Parallel Data in a Joint Multilingual Space", "abstract": "We learn a joint multilingual sentence embedding and use the distance between sentences in different languages to filter noisy parallel data and to mine for parallel data in large news collections. We are able to improve a competitive baseline on the WMT’14 English to German task by 0.3 BLEU by filtering out 25% of the training data. The same approach is used to mine additional bitexts for the WMT’14 system and to obtain competitive results on the BUCC shared task to identify parallel sentences in comparable corpora. The approach is generic, it can be applied to many language pairs and it is independent of the architecture of the machine translation system.", "year": 2018, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Holger Schwenk" ], "externalIds": { "MAG": "2798389157", "ArXiv": "1805.09822", "DBLP": "conf/acl/Schwenk18", "ACL": "P18-2037", "DOI": "10.18653/v1/P18-2037", "CorpusId": 44087711 }, "url": "https://www.semanticscholar.org/paper/a0e51a7b8957558f25aef4621c7d287a2f910504", "referenceCount": 39, "citationCount": 108, "influentialCitationCount": 15, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Bi-Directional Neural Machine Translation with Synthetic Parallel Data", "abstract": "Despite impressive progress in high-resource settings, Neural Machine Translation (NMT) still struggles in low-resource and out-of-domain scenarios, often failing to match the quality of phrase-based translation. We propose a novel technique that combines back-translation and multilingual NMT to improve performance in these difficult cases. Our technique trains a single model for both directions of a language pair, allowing us to back-translate source or target monolingual data without requiring an auxiliary model. We then continue training on the augmented parallel data, enabling a cycle of improvement for a single model that can incorporate any source, target, or parallel data to improve both translation directions. As a byproduct, these models can reduce training and deployment costs significantly compared to uni-directional models. Extensive experiments show that our technique outperforms standard back-translation in low-resource scenarios, improves quality on cross-domain tasks, and effectively reduces costs across the board.", "year": 2018, "venue": "NMT@ACL", "authors": [ "Xing Niu", "Michael J. Denkowski", "Marine Carpuat" ], "externalIds": { "MAG": "2805097732", "ACL": "W18-2710", "ArXiv": "1805.11213", "DBLP": "journals/corr/abs-1805-11213", "DOI": "10.18653/v1/W18-2710", "CorpusId": 44061149 }, "url": "https://www.semanticscholar.org/paper/b7ed99696d8854d080152b0723483d7c956ed838", "referenceCount": 35, "citationCount": 58, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Word Translation Without Parallel Data", "abstract": "State-of-the-art methods for learning cross-lingual word embeddings have relied on bilingual dictionaries or parallel corpora. Recent studies showed that the need for parallel data supervision can be alleviated with character-level information. While these methods showed encouraging results, they are not on par with their supervised counterparts and are limited to pairs of languages sharing a common alphabet. In this work, we show that we can build a bilingual dictionary between two languages without using any parallel corpora, by aligning monolingual word embedding spaces in an unsupervised way. Without using any character information, our model even outperforms existing supervised methods on cross-lingual tasks for some language pairs. Our experiments demonstrate that our method works very well also for distant language pairs, like English-Russian or English-Chinese. We finally describe experiments on the English-Esperanto low-resource language pair, on which there only exists a limited amount of parallel data, to show the potential impact of our method in fully unsupervised machine translation. Our code, embeddings and dictionaries are publicly available.", "year": 2017, "venue": "International Conference on Learning Representations", "authors": [ "Alexis Conneau", "Guillaume Lample", "Marc'Aurelio Ranzato", "Ludovic Denoyer", "Herv'e J'egou" ], "externalIds": { "MAG": "2963118869", "DBLP": "journals/corr/abs-1710-04087", "ArXiv": "1710.04087", "CorpusId": 3470398 }, "url": "https://www.semanticscholar.org/paper/562c09c112df56c5696c010d90a815d6018a86c8", "referenceCount": 53, "citationCount": 1563, "influentialCitationCount": 409, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Overview of the Second BUCC Shared Task: Spotting Parallel Sentences in Comparable Corpora", "abstract": "This paper presents the BUCC 2017 shared task on parallel sentence extraction from comparable corpora. It recalls the design of the datasets, presents their final construction and statistics and the methods used to evaluate system results. 13 runs were submitted to the shared task by 4 teams, covering three of the four proposed language pairs: French-English (7 runs), German-English (3 runs), and Chinese-English (3 runs). The best F-scores as measured against the gold standard were 0.84 (German-English), 0.80 (French-English), and 0.43 (Chinese-English). Because of the design of the dataset, in which not all gold parallel sentence pairs are known, these are only minimum values. We examined manually a small sample of the false negative sentence pairs for the most precise French-English runs and estimated the number of parallel sentence pairs not yet in the provided gold standard. Adding them to the gold standard leads to revised estimates for the French-English F-scores of at most +1.5pt. This suggests that the BUCC 2017 datasets provide a reasonable approximate evaluation of the parallel sentence spotting task.", "year": 2017, "venue": "BUCC@ACL", "authors": [ "Pierre Zweigenbaum", "S. Sharoff", "R. Rapp" ], "externalIds": { "MAG": "2742155240", "ACL": "W17-2512", "DBLP": "conf/acl-bucc/ZweigenbaumSR17", "DOI": "10.18653/v1/W17-2512", "CorpusId": 43282987 }, "url": "https://www.semanticscholar.org/paper/8be59c1f54b2f0775b7de9d73bc10e5eef5520dd", "referenceCount": 16, "citationCount": 127, "influentialCitationCount": 17, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "SemEval-2017 Task 1: Semantic Textual Similarity Multilingual and Crosslingual Focused Evaluation", "abstract": "Semantic Textual Similarity (STS) measures the meaning similarity of sentences. Applications include machine translation (MT), summarization, generation, question answering (QA), short answer grading, semantic search, dialog and conversational systems. The STS shared task is a venue for assessing the current state-of-the-art. The 2017 task focuses on multilingual and cross-lingual pairs with one sub-track exploring MT quality estimation (MTQE) data. The task obtained strong participation from 31 teams, with 17 participating in all language tracks. We summarize performance and review a selection of well performing methods. Analysis highlights common errors, providing insight into the limitations of existing models. To support ongoing work on semantic representations, the STS Benchmark is introduced as a new shared training and evaluation set carefully selected from the corpus of English STS shared task data (2012-2017).", "year": 2017, "venue": "International Workshop on Semantic Evaluation", "authors": [ "Daniel Matthew Cer", "Mona T. Diab", "Eneko Agirre", "I. Lopez-Gazpio", "Lucia Specia" ], "externalIds": { "DBLP": "journals/corr/abs-1708-00055", "ACL": "S17-2001", "MAG": "3104033643", "ArXiv": "1708.00055", "DOI": "10.18653/v1/S17-2001", "CorpusId": 4421747 }, "url": "https://www.semanticscholar.org/paper/a23fa96e7217ba0e9405d9e1fe3cdedd57b6e096", "referenceCount": 92, "citationCount": 1726, "influentialCitationCount": 293, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MUSE: Modularizing Unsupervised Sense Embeddings", "abstract": "This paper proposes to address the word sense ambiguity issue in an unsupervised manner, where word sense representations are learned along a word sense selection mechanism given contexts. Prior work focused on designing a single model to deliver both mechanisms, and thus suffered from either coarse-grained representation learning or inefficient sense selection. The proposed modular approach, MUSE, implements flexible modules to optimize distinct mechanisms, achieving the first purely sense-level representation learning system with linear-time sense selection. We leverage reinforcement learning to enable joint training on the proposed modules, and introduce various exploration techniques on sense selection for better robustness. The experiments on benchmark data show that the proposed approach achieves the state-of-the-art performance on synonym selection as well as on contextual word similarities in terms of MaxSimC.", "year": 2017, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Guang-He Lee", "Yun-Nung (Vivian) Chen" ], "externalIds": { "MAG": "2949460680", "DBLP": "journals/corr/LeeC17", "ACL": "D17-1034", "ArXiv": "1704.04601", "DOI": "10.18653/v1/D17-1034", "CorpusId": 9914140 }, "url": "https://www.semanticscholar.org/paper/9fd010312235c28ff208b8caa441faacb2283f1f", "referenceCount": 33, "citationCount": 33, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning Joint Multilingual Sentence Representations with Neural Machine Translation", "abstract": "In this paper, we use the framework of neural machine translation to learn joint sentence representations across six very different languages. Our aim is that a representation which is independent of the language, is likely to capture the underlying semantics. We define a new cross-lingual similarity measure, compare up to 1.4M sentence representations and study the characteristics of close sentences. We provide experimental evidence that sentences that are close in embedding space are indeed semantically highly related, but often have quite different structure and syntax. These relations also hold when comparing sentences in different languages.", "year": 2017, "venue": "Rep4NLP@ACL", "authors": [ "Holger Schwenk", "Matthijs Douze" ], "externalIds": { "MAG": "2607106700", "DBLP": "journals/corr/SchwenkTFD17", "ArXiv": "1704.04154", "ACL": "W17-2619", "DOI": "10.18653/v1/W17-2619", "CorpusId": 6660863 }, "url": "https://www.semanticscholar.org/paper/dce5ca746224a97e532a034c371305f8bddcb5fc", "referenceCount": 43, "citationCount": 204, "influentialCitationCount": 16, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Orthogonality of Syntax and Semantics within Distributional Spaces", "abstract": "A recent distributional approach to wordanalogy problems (Mikolov et al., 2013b) exploits interesting regularities in the structure of the space of representations. Investigating further, we find that performance on this task can be related to orthogonality within the space. Explicitly designing such structure into a neural network model results in representations that decompose into orthogonal semantic and syntactic subspaces. We demonstrate that learning from word-order and morphological structure within English Wikipedia text to enable this decomposition can produce substantial improvements on semantic-similarity, posinduction and word-analogy tasks.", "year": 2015, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Jeff Mitchell", "Mark Steedman" ], "externalIds": { "MAG": "2250635746", "DBLP": "conf/acl/MitchellS15", "ACL": "P15-1126", "DOI": "10.3115/v1/P15-1126", "CorpusId": 11635710 }, "url": "https://www.semanticscholar.org/paper/4508bdad3b365369890d9d64d65e88657ba8afc6", "referenceCount": 18, "citationCount": 12, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Linguistics", "abstract": "4501. Brown, C. H. A WIDESPREAD MARKING REVERSAL IN LANGUAGES OF THE SOUTHEASTERN UNITED STATES. Anthropological Linguistics. 1996, 38(3): 439-460. Across the southeastern United States, native American languages have linguistically accommodated the European-introduced peach by referring to it through the use of respective terms for the native plum. This has taken the form of marking reversals in which native words originally designating plum have shifted in reference to peach, with modified (overtly marked) 'peach' terms used to denote plum (e.g., 'little peach' = plum). Marking reversals were motivated throughout the region by a radical change in the relative cultural importance of the two referents, wherein the introduced peach surpassed the native plum in salience. The broad distribution of this nomenclatural feature is probably attributable both to diffusion and to independent development. Other widespread features involving words for introduced items are noted including a marking reversal in which the introduced pig and the native opossum are nomenclaturally linked. These lexical traits suggest the southeastern United States to be a post-contact linguistic area.", "year": 1999, "venue": "", "authors": [ "Véronique Verhagen", "J. Schilperoord" ], "externalIds": { "DOI": "10.1177/000134559903900301", "CorpusId": 220057900 }, "url": "https://www.semanticscholar.org/paper/ba6e3b28090d935205ed0e1d398206906b5b8905", "referenceCount": 33, "citationCount": 649, "influentialCitationCount": 22, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Adversarial Training on Disentangling Meaning and Language Representations for Unsupervised Quality Estimation", "abstract": "We propose a method to distill language-agnostic meaning embeddings from multilingual sentence encoders for unsupervised quality estimation of machine translation. Our method facilitates that the meaning embeddings focus on semantics by adversarial training that attempts to eliminate language-specific information. Experimental results on unsupervised quality estimation reveal that our method achieved higher correlations with human evaluations.", "year": 2022, "venue": "International Conference on Computational Linguistics", "authors": [ "Yuto Kuroda", "Tomoyuki Kajiwara", "Yuki Arase", "Takashi Ninomiya" ], "externalIds": { "DBLP": "conf/coling/KurodaKAN22", "ACL": "2022.coling-1.465", "CorpusId": 252819405 }, "url": "https://www.semanticscholar.org/paper/d4edeacfbfe2c3591121891170ed62d48af10a29", "referenceCount": 22, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Language-agnostic Representation from Multilingual Sentence Encoders for Cross-lingual Similarity Estimation", "abstract": "We propose a method to distill a language-agnostic meaning embedding from a multilingual sentence encoder. By removing language-specific information from the original embedding, we retrieve an embedding that fully represents the sentence’s meaning. The proposed method relies only on parallel corpora without any human annotations. Our meaning embedding allows efficient cross-lingual sentence similarity estimation by simple cosine similarity calculation. Experimental results on both quality estimation of machine translation and cross-lingual semantic textual similarity tasks reveal that our method consistently outperforms the strong baselines using the original multilingual embedding. Our method consistently improves the performance of any pre-trained multilingual sentence encoder, even in low-resource language pairs where only tens of thousands of parallel sentence pairs are available.", "year": 2021, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Nattapong Tiyajamorn", "Tomoyuki Kajiwara", "Yuki Arase", "Makoto Onizuka" ], "externalIds": { "ACL": "2021.emnlp-main.612", "DBLP": "conf/emnlp/TiyajamornKAO21", "DOI": "10.18653/v1/2021.emnlp-main.612", "CorpusId": 243865423 }, "url": "https://www.semanticscholar.org/paper/c0a54963b0689fa7d76fda1063b65003c769d9b7", "referenceCount": 37, "citationCount": 15, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Cross-lingual Sentence Embedding using Multi-Task Learning", "abstract": "Multilingual sentence embeddings capture rich semantic information not only for measuring similarity between texts but also for catering to a broad range of downstream cross-lingual NLP tasks. State-of-the-art multilingual sentence embedding models require large parallel corpora to learn efficiently, which confines the scope of these models. In this paper, we propose a novel sentence embedding framework based on an unsupervised loss function for generating effective multilingual sentence embeddings, eliminating the need for parallel corpora. We capture semantic similarity and relatedness between sentences using a multi-task loss function for training a dual encoder model mapping different languages onto the same vector space. We demonstrate the efficacy of an unsupervised as well as a weakly supervised variant of our framework on STS, BUCC and Tatoeba benchmark tasks. The proposed unsupervised sentence embedding framework outperforms even supervised state-of-the-art methods for certain under-resourced languages on the Tatoeba dataset and on a monolingual benchmark. Further, we show enhanced zero-shot learning capabilities for more than 30 languages, with the model being trained on only 13 languages. Our model can be extended to a wide range of languages from any language family, as it overcomes the requirement of parallel corpora for training.", "year": 2021, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Koustava Goswami", "Sourav Dutta", "H. Assem", "Theodorus Fransen", "John P. Mccrae" ], "externalIds": { "DBLP": "conf/emnlp/GoswamiDAFM21", "ACL": "2021.emnlp-main.716", "DOI": "10.18653/v1/2021.emnlp-main.716", "CorpusId": 243865169 }, "url": "https://www.semanticscholar.org/paper/8a097088a201180847d2cb4b2e3f911016f8faf9", "referenceCount": 34, "citationCount": 17, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, EMNLP 2021, Online and Punta Cana, Dominican Republic, 7-11 November, 2021", "abstract": null, "year": 2020, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [], "externalIds": { "DBLP": "conf/emnlp/2021-d", "DOI": "10.18653/v1/2021.emnlp-main", "CorpusId": 244119800 }, "url": "https://www.semanticscholar.org/paper/64cba2d6e5c9aef933983f0763bb8ec13918a357", "referenceCount": 0, "citationCount": 14, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding", "abstract": "We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models (Peters et al., 2018a; Radford et al., 2018), BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5 (7.7 point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).", "year": 2019, "venue": "North American Chapter of the Association for Computational Linguistics", "authors": [ "Jacob Devlin", "Ming-Wei Chang", "Kenton Lee", "Kristina Toutanova" ], "externalIds": { "MAG": "2951055169", "ACL": "N19-1423", "DBLP": "journals/corr/abs-1810-04805", "ArXiv": "1810.04805", "DOI": "10.18653/v1/N19-1423", "CorpusId": 52967399 }, "url": "https://www.semanticscholar.org/paper/df2b0e26d0599ce3e70df8a9da02e51594e0e992", "referenceCount": 63, "citationCount": 81678, "influentialCitationCount": 19056, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "details of our ORACLE objective. We specific training corpus utilized for each", "abstract": null, "year": null, "venue": "pair", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "A.3 Training Details Size of each MLP layer is embedding size of the encoder (1024 for LASER and 768 for XLM-R and LaBSE) by the number of language pairs (12)", "abstract": null, "year": null, "venue": "For training, we use Adam optimizer with an initial learning rate as 1e-5 and a batch size", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "languages for each multilingual sentence encoder baseline, listed in alphabetical order", "abstract": null, "year": null, "venue": "InfoXLM and LaBSE", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2022. Learning disentangled semantic representations for zero-shot cross-lingual transfer in multilingual machine reading comprehension", "abstract": null, "year": null, "venue": "Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers) ,", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2021 Conference of the North American Chapter of the Association for", "abstract": null, "year": null, "venue": "Computational", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Reinforcement Feature Transformation for Polymer Property Performance Prediction": { "paper_title": "Reinforcement Feature Transformation for Polymer Property Performance Prediction", "arxiv_id": "2409.15616v1", "keyword": "representation learning", "authors": [ "Xuanming Hu", "Dongjie Wang", "Wangyang Ying", "Yanjie Fu" ], "references": [ { "title": "Enhancing Tabular Data Optimization with a Flexible Graph-based Reinforced Exploration Strategy", "abstract": "Tabular data optimization methods aim to automatically find an optimal feature transformation process that generates high-value features and improves the performance of downstream machine learning tasks. Current frameworks for automated feature transformation rely on iterative sequence generation tasks, optimizing decision strategies through performance feedback from downstream tasks. However, these approaches fail to effectively utilize historical decision-making experiences and overlook potential relationships among generated features, thus limiting the depth of knowledge extraction. Moreover, the granularity of the decision-making process lacks dynamic backtracking capabilities for individual features, leading to insufficient adaptability when encountering inefficient pathways, adversely affecting overall robustness and exploration efficiency. To address the limitations observed in current automatic feature engineering frameworks, we introduce a novel method that utilizes a feature-state transformation graph to effectively preserve the entire feature transformation journey, where each node represents a specific transformation state. During exploration, three cascading agents iteratively select nodes and idea mathematical operations to generate new transformation states. This strategy leverages the inherent properties of the graph structure, allowing for the preservation and reuse of valuable transformations. It also enables backtracking capabilities through graph pruning techniques, which can rectify inefficient transformation paths. To validate the efficacy and flexibility of our approach, we conducted comprehensive experiments and detailed case studies, demonstrating superior performance in diverse scenarios.", "year": 2024, "venue": "arXiv.org", "authors": [ "Xiaohan Huang", "Dongjie Wang", "Zhiyuan Ning", "Ziyue Qiao", "Qingqing Long", "Haowei Zhu", "Min Wu", "Yuanchun Zhou", "Meng Xiao" ], "externalIds": { "ArXiv": "2406.07404", "DBLP": "journals/corr/abs-2406-07404", "DOI": "10.48550/arXiv.2406.07404", "CorpusId": 270379922 }, "url": "https://www.semanticscholar.org/paper/97eca3c0eb7f9b058938ce2c0cc2f8131a40fbb3", "referenceCount": 35, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Knockoff-Guided Feature Selection via A Single Pre-trained Reinforced Agent", "abstract": "Feature selection prepares the AI-readiness of data by eliminating redundant features. Prior research falls into two primary categories: i) Supervised Feature Selection, which identifies the optimal feature subset based on their relevance to the target variable; ii) Unsupervised Feature Selection, which reduces the feature space dimensionality by capturing the essential information within the feature set instead of using target variable. However, SFS approaches suffer from time-consuming processes and limited generalizability due to the dependence on the target variable and downstream ML tasks. UFS methods are constrained by the deducted feature space is latent and untraceable. To address these challenges, we introduce an innovative framework for feature selection, which is guided by knockoff features and optimized through reinforcement learning, to identify the optimal and effective feature subset. In detail, our method involves generating\"knockoff\"features that replicate the distribution and characteristics of the original features but are independent of the target variable. Each feature is then assigned a pseudo label based on its correlation with all the knockoff features, serving as a novel metric for feature evaluation. Our approach utilizes these pseudo labels to guide the feature selection process in 3 novel ways, optimized by a single reinforced agent: 1). A deep Q-network, pre-trained with the original features and their corresponding pseudo labels, is employed to improve the efficacy of the exploration process in feature selection. 2). We introduce unsupervised rewards to evaluate the feature subset quality based on the pseudo labels and the feature space reconstruction loss to reduce dependencies on the target variable. 3). A new {\\epsilon}-greedy strategy is used, incorporating insights from the pseudo labels to make the feature selection process more effective.", "year": 2024, "venue": "arXiv.org", "authors": [ "Xinyuan Wang", "Dongjie Wang", "Wangyang Ying", "Rui Xie", "Haifeng Chen", "Yanjie Fu" ], "externalIds": { "ArXiv": "2403.04015", "DBLP": "journals/corr/abs-2403-04015", "DOI": "10.48550/arXiv.2403.04015", "CorpusId": 268264088 }, "url": "https://www.semanticscholar.org/paper/03e3841cac150af9d489207b8be1e9b3aec50d48", "referenceCount": 37, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Reinforcement-Enhanced Autoregressive Feature Transformation: Gradient-steered Search in Continuous Space for Postfix Expressions", "abstract": "Feature transformation aims to generate new pattern-discriminative feature space from original features to improve downstream machine learning (ML) task performances. However, the discrete search space for the optimal feature explosively grows on the basis of combinations of features and operations from low-order forms to high-order forms. Existing methods, such as exhaustive search, expansion reduction, evolutionary algorithms, reinforcement learning, and iterative greedy, suffer from large search space. Overly emphasizing efficiency in algorithm design usually sacrifices stability or robustness. To fundamentally fill this gap, we reformulate discrete feature transformation as a continuous space optimization task and develop an embedding-optimization-reconstruction framework. This framework includes four steps: 1) reinforcement-enhanced data preparation, aiming to prepare high-quality transformation-accuracy training data; 2) feature transformation operation sequence embedding, intending to encapsulate the knowledge of prepared training data within a continuous space; 3) gradient-steered optimal embedding search, dedicating to uncover potentially superior embeddings within the learned space; 4) transformation operation sequence reconstruction, striving to reproduce the feature transformation solution to pinpoint the optimal feature space.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Dongjie Wang", "Meng Xiao", "Min Wu", "P. Wang", "Yuanchun Zhou", "Yanjie Fu" ], "externalIds": { "DBLP": "journals/corr/abs-2309-13618", "ArXiv": "2309.13618", "DOI": "10.48550/arXiv.2309.13618", "CorpusId": 262460698 }, "url": "https://www.semanticscholar.org/paper/3b28e053bd9d2aef2f289b5103ba62a1bf9b780f", "referenceCount": 34, "citationCount": 11, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Self-optimizing Feature Generation via Categorical Hashing Representation and Hierarchical Reinforcement Crossing", "abstract": "Feature generation aims to generate new and meaningful features to create a discriminative representation space. A generated feature is meaningful when the generated feature is from a feature pair with inherent feature interaction. In the real world, experienced data scientists can identify potentially useful feature-feature interactions, and generate meaningful dimensions from an exponentially large search space in an optimal crossing form over an optimal generation path. But, machines have limited human-like abilities. We generalize such learning tasks as self-optimizing feature generation. Self-optimizing feature generation imposes several under-addressed challenges on existing systems: meaningful, robust, and efficient generation. To tackle these challenges, we propose a principled and generic representation-crossing framework to solve self-optimizing feature generation. To achieve hashing representation, we propose a three-step approach: feature discretization, feature hashing, and descriptive summarization. To achieve reinforcement crossing, we develop a hierarchical reinforcement feature crossing approach. We present extensive experimental results to demonstrate the effectiveness and efficiency of the proposed method. The code is available at https://github.com/yingwangyang/HRC_feature_cross.git.", "year": 2023, "venue": "Industrial Conference on Data Mining", "authors": [ "Wangyang Ying", "Dongjie Wang", "Kunpeng Liu", "Leilei Sun", "Yanjie Fu" ], "externalIds": { "DBLP": "journals/corr/abs-2309-04612", "ArXiv": "2309.04612", "DOI": "10.1109/ICDM58522.2023.00084", "CorpusId": 261682615 }, "url": "https://www.semanticscholar.org/paper/46c9b043326e6a8d8df6208cff465b45ff934dd9", "referenceCount": 39, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Traceable Group-Wise Self-Optimizing Feature Transformation Learning: A Dual Optimization Perspective", "abstract": "Feature transformation aims to reconstruct an effective representation space by mathematically refining the existing features. It serves as a pivotal approach to combat the curse of dimensionality, enhance model generalization, mitigate data sparsity, and extend the applicability of classical models. Existing research predominantly focuses on domain knowledge-based feature engineering or learning latent representations. However, these methods, while insightful, lack full automation and fail to yield a traceable and optimal representation space. An indispensable question arises: Can we concurrently address these limitations when reconstructing a feature space for a machine learning task? Our initial work took a pioneering step towards this challenge by introducing a novel self-optimizing framework. This framework leverages the power of three cascading reinforced agents to automatically select candidate features and operations for generating improved feature transformation combinations. Despite the impressive strides made, there was room for enhancing its effectiveness and generalization capability. In this extended journal version, we advance our initial work from two distinct yet interconnected perspectives: 1) We propose a refinement of the original framework, which integrates a graph-based state representation method to capture the feature interactions more effectively and develop different Q-learning strategies to alleviate Q-value overestimation further. 2) We utilize a new optimization technique (actor-critic) to train the entire self-optimizing framework in order to accelerate the model convergence and improve the feature transformation performance. Finally, to validate the improved effectiveness and generalization capability of our framework, we perform extensive experiments and conduct comprehensive analyses. These provide empirical evidence of the strides made in this journal version over the initial work, solidifying our framework’s standing as a substantial contribution to the field of automated feature transformation. To improve the reproducibility, we have released the associated code and data by the Github link https://github.com/coco11563/TKDD2023_code.", "year": 2023, "venue": "ACM Transactions on Knowledge Discovery from Data", "authors": [ "Meng Xiao", "Dongjie Wang", "Min Wu", "Kunpeng Liu", "H. Xiong", "Yuanchun Zhou", "Yanjie Fu" ], "externalIds": { "DBLP": "journals/tkdd/XiaoWWLXZF24", "ArXiv": "2306.16893", "DOI": "10.1145/3638059", "CorpusId": 259287044 }, "url": "https://www.semanticscholar.org/paper/77bfe732dbd17f4b816027bee0d041bc6874cf34", "referenceCount": 40, "citationCount": 11, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Beyond Discrete Selection: Continuous Embedding Space Optimization for Generative Feature Selection", "abstract": "The goal of Feature Selection - comprising filter, wrapper, and embedded approaches - is to find the optimal feature subset for designated downstream tasks. Nevertheless, current feature selection methods are limited by: 1) the selection criteria of these methods are varied for different domains, leading them hard to be generalized; 2) the selection performance of these approaches drops significantly when processing high-dimensional feature space coupled with small sample size. In light of these challenges, we pose the question: can selected feature subsets be more robust, accurate, and input dimensionality agnostic? In this paper, we reformulate the feature selection problem as a deep differentiable optimization task and propose a new research perspective: conceptualizing discrete feature subsetting as continuous embedding space optimization. We introduce a novel and principled framework that encompasses a sequential encoder, an accuracy evaluator, a sequential decoder, and a gradient ascent optimizer. This comprehensive framework includes four important steps: preparation of features-accuracy training data, deep feature subset embedding, gradient-optimized search, and feature subset reconstruction. Specifically, we utilize reinforcement feature selection learning to generate diverse and high-quality training data and enhance generalization. By optimizing reconstruction and accuracy losses, we embed feature selection knowledge into a continuous space using an encodere-valuator-decoder model structure. We employ a gradient ascent search algorithm to find better embeddings in the learned embedding space. Furthermore, we reconstruct feature selection solutions using these embeddings and select the feature subset with the highest performance for downstream tasks as the optimal subset. Finally, extensive experimental results demonstrate the effectiveness of our proposed method, showcasing significant enhancements in feature selection robustness and accuracy. To improve the reproducibility of our research, we have released accompanying code and datasets by Dropbox.1.", "year": 2023, "venue": "Industrial Conference on Data Mining", "authors": [ "Meng Xiao", "Dongjie Wang", "Min Wu", "P. Wang", "Yuanchun Zhou", "Yanjie Fu" ], "externalIds": { "ArXiv": "2302.13221", "DBLP": "conf/icdm/0001W0WZF23", "DOI": "10.1109/ICDM58522.2023.00078", "CorpusId": 257219550 }, "url": "https://www.semanticscholar.org/paper/73bfcc8cde1a71b073ca4336b866ac70c224a451", "referenceCount": 35, "citationCount": 13, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Traceable Automatic Feature Transformation via Cascading Actor-Critic Agents", "abstract": "Feature transformation for AI is an essential task to boost the effectiveness and interpretability of machine learning (ML). Feature transformation aims to transform original data to identify an optimal feature space that enhances the performances of a downstream ML model. Existing studies either combines preprocessing, feature selection, and generation skills to empirically transform data, or automate feature transformation by machine intelligence, such as reinforcement learning. However, existing studies suffer from: 1) high-dimensional non-discriminative feature space; 2) inability to represent complex situational states; 3) inefficiency in integrating local and global feature information. To fill the research gap, we formulate the feature transformation task as an iterative, nested process of feature generation and selection, where feature generation is to generate and add new features based on original features, and feature selection is to remove redundant features to control the size of feature space. Finally, we present extensive experiments and case studies to illustrate 24.7\\% improvements in F1 scores compared with SOTAs and robustness in high-dimensional data.", "year": 2022, "venue": "SDM", "authors": [ "Meng Xiao", "Dongjie Wang", "Min Wu", "Ziyue Qiao", "P. Wang", "Kunpeng Liu", "Yuanchun Zhou", "Yanjie Fu" ], "externalIds": { "ArXiv": "2212.13402", "DBLP": "conf/sdm/0001W0Q00ZF23", "DOI": "10.48550/arXiv.2212.13402", "CorpusId": 255186310 }, "url": "https://www.semanticscholar.org/paper/d04ce621cf6f7968be1abe629ba3eaa14da75c76", "referenceCount": 28, "citationCount": 16, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "TransPolymer: a Transformer-based language model for polymer property predictions", "abstract": null, "year": 2022, "venue": "npj Computational Materials", "authors": [ "Changwen Xu", "Yuyang Wang", "A. Farimani" ], "externalIds": { "DBLP": "journals/corr/abs-2209-01307", "ArXiv": "2209.01307", "DOI": "10.1038/s41524-023-01016-5", "CorpusId": 252090395 }, "url": "https://www.semanticscholar.org/paper/72c53ffacd4ad86391dd70d3b18c2b9e80ba2956", "referenceCount": 78, "citationCount": 51, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Physics" ] }, { "title": "Efficient Reinforced Feature Selection via Early Stopping Traverse Strategy", "abstract": "In this paper, we propose a single-agent Monte Carlo based reinforced feature selection (MCRFS) method, as well as two efficiency improvement strategies, i.e., early stopping (ES) strategy and reward-level interactive (RI) strategy. Feature selection is one of the most important technologies in data prepossessing, aiming to find the optimal feature subset for a given downstream machine learning task. Enormous research has been done to improve its effectiveness and efficiency. Recently, the multi-agent reinforced feature selection (MARFS) has achieved great success in improving the performance of feature selection. However, MARFS suffers from the heavy burden of computational cost, which greatly limits its application in real-world scenarios. In this paper, we propose an efficient reinforcement feature selection method, which uses one agent to traverse the whole feature set, and decides to select or not select each feature one by one. Specifically, we first develop one behavior policy and use it to traverse the feature set and generate training data. And then, we evaluate the target policy based on the training data and improve the target policy by Bellman equation. Besides, we conduct the importance sampling in an incremental way, and propose an early stopping strategy to improve the training efficiency by the removal of skew data. In the early stopping strategy, the behavior policy stops traversing with a probability inversely proportional to the importance sampling weight. In addition, we propose a reward-level interactive strategy to improve the training efficiency via reward-level external advice. Finally, we design extensive experiments on real-world data to demonstrate the superiority of the proposed method.", "year": 2021, "venue": "Industrial Conference on Data Mining", "authors": [ "Kunpeng Liu", "Pengfei Wang", "Dongjie Wang", "Wan Du", "Dapeng Oliver Wu", "Yanjie Fu" ], "externalIds": { "DBLP": "journals/corr/abs-2109-14180", "ArXiv": "2109.14180", "DOI": "10.1109/ICDM51629.2021.00051", "CorpusId": 238215197 }, "url": "https://www.semanticscholar.org/paper/f97f531868ac1ddeb48e83985220958fef96eed3", "referenceCount": 45, "citationCount": 15, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Automated Feature-Topic Pairing: Aligning Semantic and Embedding Spaces in Spatial Representation Learning", "abstract": "Automated characterization of spatial data is a kind of critical geographical intelligence. As an emerging technique for characterization, Spatial Representation Learning (SRL) uses deep neural networks (DNNs) to learn non-linear embedded features of spatial data for characterization. However, SRL extracts features by internal layers of DNNs, and thus suffers from lacking semantic labels. Texts of spatial entities, on the other hand, provide semantic understanding of latent feature labels, but is insensible to deep SRL models. How can we teach a SRL model to discover appropriate topic labels in texts and pair learned features with the labels? This paper formulates a new problem: feature-topic pairing, and proposes a novel Particle Swarm Optimization (PSO) based deep learning framework. Specifically, we formulate the feature-topic pairing problem into an automated alignment task between 1) a latent embedding feature space and 2) a textual semantic topic space. We decompose the alignment of the two spaces into: 1) point-wise alignment, denoting the correlation between a topic distribution and an embedding vector; 2) pair-wise alignment, denoting the consistency between a feature-feature similarity matrix and a topic-topic similarity matrix. We design a PSO based solver to simultaneously select an optimal set of topics and learn corresponding features based on the selected topics. We develop a closed loop algorithm to iterate between 1) minimizing losses of representation reconstruction and feature-topic alignment and 2) searching the best topics. Finally, we present extensive experiments to demonstrate the enhanced performance of our method.", "year": 2021, "venue": "SIGSPATIAL/GIS", "authors": [ "Dongjie Wang", "Kunpeng Liu", "David Mohaisen", "Pengyang Wang", "Chang-Tien Lu", "Yanjie Fu" ], "externalIds": { "ArXiv": "2109.11053", "DBLP": "journals/corr/abs-2109-11053", "DOI": "10.1145/3474717.3484212", "CorpusId": 237605181 }, "url": "https://www.semanticscholar.org/paper/eeb731bae032823124c88c5921c4543d16e61ab1", "referenceCount": 22, "citationCount": 8, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Orbital Graph Convolutional Neural Network for Material Property Prediction", "abstract": "Material representations that are compatible with machine learning models play a key role in developing models that exhibit high accuracy for property prediction. Atomic orbital interactions are one of the important factors that govern the properties of crystalline materials, from which the local chemical environments of atoms is inferred. Therefore, to develop robust machine learningmodels for material properties prediction, it is imperative to include features representing such chemical attributes. Here, we propose the Orbital Graph Convolutional Neural Network (OGCNN), a crystal graph convolutional neural network framework that includes atomic orbital interaction features that learns material properties in a robust way. In addition, we embedded an encoder-decoder network into the OGCNN enabling it to learn important features among basic atomic (elemental features), orbital-orbital interactions, and topological features. We examined the performance of this model on a broad range of crystalline material data to predict different properties. We benchmarked the performance of the OGCNN model with that of: 1) the crystal graph convolutional neural network (CGCNN), 2) other state-of-the-art descriptors for material representations including Many-body Tensor Representation (MBTR) and the Smooth Overlap of Atomic Positions (SOAP), and 3) other conventional regression machine learning algorithms where different crystal featurization methods have been used. We find that OGCNN significantly outperforms them. The OGCNN model with high predictive accuracy can be used to discover new materials among the immense phase and compound spaces of materials", "year": 2020, "venue": "PHYSICAL REVIEW MATERIALS", "authors": [ "M. Karamad", "Rishikesh Magar", "Yuting Shi", "Samira Siahrostami", "I. Gates", "A. Farimani" ], "externalIds": { "ArXiv": "2008.06415", "DBLP": "journals/corr/abs-2008-06415", "MAG": "3098062507", "DOI": "10.1103/physrevmaterials.4.093801", "CorpusId": 221136009 }, "url": "https://www.semanticscholar.org/paper/fe4b3a263bade6b122522f41143cf1e98efb375e", "referenceCount": 57, "citationCount": 70, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Materials Science", "Physics" ] }, { "title": "Neural Feature Search: A Neural Architecture for Automated Feature Engineering", "abstract": "Feature engineering is a crucial step for developing effective machine learning models. Traditionally, feature engineering is performed manually, which requires much domain knowledge and is time-consuming. In recent years, many automated feature engineering methods have been proposed. These methods improve the accuracy of a machine learning model by automatically transforming the original features into a set of new features. However, existing methods either lack ability to perform high-order transformations or suffer from the feature space explosion problem. In this paper, we present Neural Feature Search (NFS), a novel neural architecture for automated feature engineering. We utilize a recurrent neural network based controller to transform each raw feature through a series of transformation functions. The controller is trained through reinforcement learning to maximize the expected performance of the machine learning algorithm. Extensive experiments on public datasets illustrate that our neural architecture is effective and outperforms the existing state-of-the-art automated feature engineering methods. Our architecture can efficiently capture potentially valuable high-order transformations and mitigate the feature explosion problem.", "year": 2019, "venue": "Industrial Conference on Data Mining", "authors": [ "Xiangning Chen", "Bo Qiao", "Weiyi Zhang", "Wei Wu", "Murali Chintalapati", "Dongmei Zhang", "Qingwei Lin", "Chuan Luo", "Xudong Li", "Hongyu Zhang", "Yong Xu", "Yingnong Dang", "Kaixin Sui", "Xu Zhang" ], "externalIds": { "DBLP": "conf/icdm/ChenQZWCZLLLZXD19", "MAG": "2977715335", "DOI": "10.1109/ICDM.2019.00017", "CorpusId": 208102475 }, "url": "https://www.semanticscholar.org/paper/6c2d7d7dc5b161e6a9a1ca0e706f57db9c14e70e", "referenceCount": 40, "citationCount": 45, "influentialCitationCount": 13, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The autofeat Python Library for Automated Feature Engineering and Selection", "abstract": null, "year": 2019, "venue": "PKDD/ECML Workshops", "authors": [ "F. Horn", "R. Pack", "M. Rieger" ], "externalIds": { "ArXiv": "1901.07329", "DBLP": "conf/pkdd/HornPR19", "MAG": "3013995528", "DOI": "10.1007/978-3-030-43823-4_10", "CorpusId": 58981501 }, "url": "https://www.semanticscholar.org/paper/5442cb82d18913f91b3e6452186bad0e7a145deb", "referenceCount": 43, "citationCount": 73, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Computer-Aided Screening of Conjugated Polymers for Organic Solar Cell: Classification by Random Forest.", "abstract": "Owing to the diverse chemical structures, organic photovoltaic (OPV) applications with a bulk heterojunction framework have greatly evolved over the last two decades, which has produced numerous organic semiconductors exhibiting improved power conversion efficiencies (PCEs). Despite the recent fast progress in materials informatics and data science, data-driven molecular design of OPV materials remains challenging. We report a screening of conjugated molecules for polymer-fullerene OPV applications by supervised learning methods (artificial neural network (ANN) and random forest (RF)). Approximately 1000 experimental parameters including PCE, molecular weight, and electronic properties are manually collected from the literature and subjected to machine learning with digitized chemical structures. Contrary to the low correlation coefficient in ANN, RF yields an acceptable accuracy, which is twice that of random classification. We demonstrate the application of RF screening for the design, synthesis, and characterization of a conjugated polymer, which facilitates a rapid development of optoelectronic materials.", "year": 2018, "venue": "Journal of Physical Chemistry Letters", "authors": [ "Shinji Nagasawa", "Eman Al-Naamani", "A. Saeki" ], "externalIds": { "MAG": "2800793736", "DOI": "10.1021/acs.jpclett.8b00635", "CorpusId": 19148188, "PubMed": "29733216" }, "url": "https://www.semanticscholar.org/paper/99f15ef5063e894a443952dee50d919a8bc21814", "referenceCount": 0, "citationCount": 135, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Materials Science", "Medicine" ] }, { "title": "Feature Engineering for Predictive Modeling using Reinforcement Learning", "abstract": "\n \n Feature engineering is a crucial step in the process of predictive modeling. It involves the transformation of given feature space, typically using mathematical functions, with the objective of reducing the modeling error for a given target. However, there is no well-defined basis for performing effective feature engineering. It involves domain knowledge, intuition, and most of all, a lengthy process of trial and error. The human attention involved in overseeing this process significantly influences the cost of model generation. We present a new framework to automate feature engineering. It is based on performance driven exploration of a transformation graph, which systematically and compactly captures the space of given options. A highly efficient exploration strategy is derived through reinforcement learning on past examples.\n \n", "year": 2017, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Udayan Khurana", "Horst Samulowitz", "D. Turaga" ], "externalIds": { "DBLP": "journals/corr/abs-1709-07150", "MAG": "2759903677", "ArXiv": "1709.07150", "DOI": "10.1609/aaai.v32i1.11678", "CorpusId": 8175458 }, "url": "https://www.semanticscholar.org/paper/1130c667589d57834b2ee407e03cc6b94713a5b2", "referenceCount": 33, "citationCount": 158, "influentialCitationCount": 22, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Proximal Policy Optimization Algorithms", "abstract": "We propose a new family of policy gradient methods for reinforcement learning, which alternate between sampling data through interaction with the environment, and optimizing a \"surrogate\" objective function using stochastic gradient ascent. Whereas standard policy gradient methods perform one gradient update per data sample, we propose a novel objective function that enables multiple epochs of minibatch updates. The new methods, which we call proximal policy optimization (PPO), have some of the benefits of trust region policy optimization (TRPO), but they are much simpler to implement, more general, and have better sample complexity (empirically). Our experiments test PPO on a collection of benchmark tasks, including simulated robotic locomotion and Atari game playing, and we show that PPO outperforms other online policy gradient methods, and overall strikes a favorable balance between sample complexity, simplicity, and wall-time.", "year": 2017, "venue": "arXiv.org", "authors": [ "John Schulman", "Filip Wolski", "Prafulla Dhariwal", "Alec Radford", "Oleg Klimov" ], "externalIds": { "MAG": "2736601468", "ArXiv": "1707.06347", "DBLP": "journals/corr/SchulmanWDRK17", "CorpusId": 28695052 }, "url": "https://www.semanticscholar.org/paper/dce6f9d4017b1785979e7520fd0834ef8cf02f4b", "referenceCount": 14, "citationCount": 14872, "influentialCitationCount": 3164, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Graph Embedding Techniques, Applications, and Performance: A Survey", "abstract": null, "year": 2017, "venue": "Knowledge-Based Systems", "authors": [ "Palash Goyal", "Emilio Ferrara" ], "externalIds": { "DBLP": "journals/kbs/GoyalF18", "MAG": "2612872092", "ArXiv": "1705.02801", "DOI": "10.1016/j.knosys.2018.03.022", "CorpusId": 3953995 }, "url": "https://www.semanticscholar.org/paper/374b4409f6a1d2d853af31e329f025da239d375f", "referenceCount": 105, "citationCount": 1621, "influentialCitationCount": 60, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Playing Atari with Deep Reinforcement Learning", "abstract": "We present the first deep learning model to successfully learn control policies directly from high-dimensional sensory input using reinforcement learning. The model is a convolutional neural network, trained with a variant of Q-learning, whose input is raw pixels and whose output is a value function estimating future rewards. We apply our method to seven Atari 2600 games from the Arcade Learning Environment, with no adjustment of the architecture or learning algorithm. We find that it outperforms all previous approaches on six of the games and surpasses a human expert on three of them.", "year": 2013, "venue": "arXiv.org", "authors": [ "Volodymyr Mnih", "K. Kavukcuoglu", "David Silver", "Alex Graves", "Ioannis Antonoglou", "Daan Wierstra", "Martin A. Riedmiller" ], "externalIds": { "DBLP": "journals/corr/MnihKSGAWR13", "MAG": "1757796397", "ArXiv": "1312.5602", "CorpusId": 15238391 }, "url": "https://www.semanticscholar.org/paper/2319a491378867c7049b3da055c5df60e1671158", "referenceCount": 30, "citationCount": 11117, "influentialCitationCount": 1357, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Neuro-SymbolicEmbeddingforShortandEffectiveFeatureSelectionviaAutoregressiveGeneration", "abstract": null, "year": 2024, "venue": "arXiv", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Reinforcedimitativegraphrepresentationlearningformobile userprofiling:Anadversarialtrainingperspective", "abstract": null, "year": 2021, "venue": "Proceedings of the AAAI Conference on Artificial Intelligence", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Connectingthedots:Multivariatetimeseriesforecastingwithgraphneuralnetworks", "abstract": null, "year": 2020, "venue": "Proceedings of the 26th ACM SIGKDD international conference on knowledge discovery & data mining", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Enhancingmaterialspropertypredictionbyleveragingcomputationalandexperimentaldatausingdeeptrans-ferlearning", "abstract": null, "year": 2019, "venue": "Nature communications", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Probabilisticmatrixfactorization for automated machine learning", "abstract": null, "year": 2018, "venue": "Advances in neural information processing systems", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Feature engineering for machine learning: principles and techniques for data scientists", "abstract": null, "year": 2018, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Feature selection: A data perspective", "abstract": null, "year": 2017, "venue": "ACM computing surveys (CSUR)", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Deep reinforcement learningwithdoubleq-learning", "abstract": null, "year": 2016, "venue": "ProceedingsoftheAAAIconferenceonartificial intelligence", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "An Introduction to Variable and Feature Selection", "abstract": "Variable and feature selection have become the focus of much research in areas of application for which datasets with tens or hundreds of thousands of variables are available. These areas include text processing of internet documents, gene expression array analysis, and combinatorial chemistry. The objective of variable selection is three-fold: improving the prediction performance of the predictors, providing faster and more cost-effective predictors, and providing a better understanding of the underlying process that generated the data. The contributions of this special issue cover a wide range of aspects of such problems: providing a better definition of the objective function, feature construction, feature ranking, multivariate feature selection, efficient search methods, and feature validity assessment methods.", "year": 2003, "venue": "Journal of machine learning research", "authors": [ "Isabelle M Guyon", "A. Elisseeff" ], "externalIds": { "DBLP": "journals/jmlr/GuyonE03", "CorpusId": 379259 }, "url": "https://www.semanticscholar.org/paper/d8384f7ef288d2d5cb267128471c5427fc98b54b", "referenceCount": 51, "citationCount": 15229, "influentialCitationCount": 759, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Latentdirichletallocation", "abstract": null, "year": 2003, "venue": "Journal of machine Learning research", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Policygradientmethodsforreinforcementlearningwithfunctionapproximation", "abstract": null, "year": 1999, "venue": "Advances in neural information processing systems", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Reinforcement Learning: An Introduction", "abstract": "Reinforcement learning, one of the most active research areas in artificial intelligence, is a computational approach to learning whereby an agent tries to maximize the total amount of reward it receives when interacting with a complex, uncertain environment. In Reinforcement Learning, Richard Sutton and Andrew Barto provide a clear and simple account of the key ideas and algorithms of reinforcement learning. Their discussion ranges from the history of the field's intellectual foundations to the most recent developments and applications. The only necessary mathematical background is familiarity with elementary concepts of probability. The book is divided into three parts. Part I defines the reinforcement learning problem in terms of Markov decision processes. Part II provides basic solution methods: dynamic programming, Monte Carlo methods, and temporal-difference learning. Part III presents a unified view of the solution methods and incorporates artificial neural networks, eligibility traces, and planning; the two final chapters present case studies and consider the future of reinforcement learning.", "year": 1998, "venue": "IEEE Trans. Neural Networks", "authors": [ "R. S. Sutton", "A. Barto" ], "externalIds": { "DBLP": "journals/tnn/SuttonB98", "MAG": "2121863487", "DOI": "10.1109/TNN.1998.712192", "CorpusId": 60035920 }, "url": "https://www.semanticscholar.org/paper/97efafdb4a3942ab3efba53ded7413199f79c054", "referenceCount": 456, "citationCount": 33767, "influentialCitationCount": 5249, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Aprobabilisticapproachtofeatureselection-a filter solution", "abstract": null, "year": 1996, "venue": "ICML", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Regressionshrinkageandselectionviathelasso", "abstract": null, "year": 1996, "venue": "Journal of the Royal Statistical Society Series B: Statistical Methodology", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2023. Exploringhighthermalconductivitypolymersviainterpretablemachinelearningwithphysicaldescriptors", "abstract": null, "year": null, "venue": "arXiv", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2022. Group-wisereinforcementfeaturegenerationforoptimalandexplainablerepresentationspacereconstruction", "abstract": null, "year": null, "venue": "Proceedings of the 28th ACM SIGKDD Conference on Knowledge Discovery and Data Mining", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2023. Boosting Urban Prediction via Addressing Spatial-TemporalDistributionShift", "abstract": null, "year": null, "venue": "2023IEEEInternationalConferenceonDataMining (ICDM)", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2024. Feature SelectionasDeepSequentialGenerativeLearning", "abstract": null, "year": null, "venue": "arXivpreprintarXiv", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "CauSkelNet: Causal Representation Learning for Human Behaviour Analysis": { "paper_title": "CauSkelNet: Causal Representation Learning for Human Behaviour Analysis", "arxiv_id": "2409.15564v2", "keyword": "representation learning", "authors": [ "Xingrui Gu", "Chuyi Jiang", "Erte Wang", "Zekun Wu", "Qiang Cui", "Leimin Tian", "Lianlong Wu", "Siyang Song", "Chuang Yu" ], "references": [ { "title": "Multi-Rater Consensus Learning for Modeling Multiple Sparse Ratings of Affective Behaviour", "abstract": "The use of multiple raters to label datasets is an established practice in affective computing. The principal goal is to reduce unwanted subjective bias in the labelling process. Unfortunately, this leads to the key problem of identifying a ground truth for training the affect recognition system. This problem becomes more relevant in a sparsely-crossed annotation where each rater only labels a portion of the full dataset to ensure a manageable workload per rater. In this article, we introduce a Multi-Rater Consensus Learning (MRCL) method which learns a representative affect recognition model that accounts for each rater's agreement with the other raters. MRCL combines a multitask learning (MTL) regularizer and a consensus loss. Unlike standard MTL, this approach allows the model to learn to predict each rater's label while explicitly accounting for the consensus among raters. We evaluated our approach on two different datasets based on spontaneous affective body movement expressions for pain behaviour detection and laughter type recognition respectively. The two naturalistic datasets were chosen for the different forms of labelling (different in affect, observation stimuli, and raters) that they together offer for evaluating our approach. Empirical results demonstrate that MRCL is effective for modelling affect from datasets with sparsely-crossed multi-rater annotation.", "year": 2024, "venue": "IEEE Transactions on Affective Computing", "authors": [ "L. Romeo", "Temitayo A. Olugbade", "M. Pontil", "N. Bianchi-Berthouze" ], "externalIds": { "DBLP": "journals/taffco/RomeoOPB24", "DOI": "10.1109/TAFFC.2023.3297270", "CorpusId": 260026834 }, "url": "https://www.semanticscholar.org/paper/e5c4cb0d9fb9f188699f67b0c81300da8035e6db", "referenceCount": 41, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Movement Representation Learning for Pain Level Classification", "abstract": "Self-supervised learning has shown value for uncovering informative movement features for human activity recognition. However, there has been minimal exploration of this approach for affect recognition where availability of large labelled datasets is particularly limited. In this paper, we propose a P-STEMR (Parallel Space-Time Encoding Movement Representation) architecture with the aim of addressing this gap and specifically leveraging the higher availability of human activity recognition datasets for pain-level classification. We evaluated and analyzed the architecture using three different datasets across four sets of experiments. We found statistically significant increase in average F1 score to 0.84 for pain level classification with two classes based on the architecture compared with the use of hand-crafted features. This suggests that it is capable of learning movement representations and transferring these from activity recognition based on data captured in lab settings to classification of pain levels with messier real-world data. We further found that the efficacy of transfer between datasets can be undermined by dissimilarities in population groups due to impairments that affect movement behaviour and in motion primitives (e.g. rotation versus flexion). Future work should investigate how the effect of these differences could be minimized so that data from healthy people can be more valuable for transfer learning.", "year": 2024, "venue": "IEEE Transactions on Affective Computing", "authors": [ "Temitayo A. Olugbade", "A. C. D. C. Williams", "Nicolas Gold", "Nadia Bianchi-Berthouze" ], "externalIds": { "DBLP": "journals/taffco/OlugbadeWGB24", "DOI": "10.1109/TAFFC.2023.3334522", "CorpusId": 265342130 }, "url": "https://www.semanticscholar.org/paper/4a832262812b36e0fe77119d761bce855bc6de4e", "referenceCount": 36, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Advancing Multimodal Data Fusion in Pain Recognition: A Strategy Leveraging Statistical Correlation and Human-Centered Perspectives", "abstract": "This research presents a novel multimodal data fusion methodology for pain behavior recognition, integrating statistical correlation analysis with human-centered insights. Our approach introduces two key innovations: 1) integrating data-driven statistical relevance weights into the fusion strategy to effectively utilize complementary information from heterogeneous modalities, and 2) incorporating human-centric movement characteristics into multimodal representation learning for detailed modeling of pain behaviors. Validated across various deep learning architectures, our method demonstrates superior performance and broad applicability. We propose a customizable framework that aligns each modality with a suitable classifier based on statistical significance, advancing personalized and effective multimodal fusion. Furthermore, our methodology provides explainable analysis of multimodal data, contributing to interpretable and explainable AI in healthcare. By highlighting the importance of data diversity and modality-specific representations, we enhance traditional fusion techniques and set new standards for recognizing complex pain behaviors. Our findings have significant implications for promoting patient-centered healthcare interventions and supporting explainable clinical decision-making.", "year": 2024, "venue": "arXiv.org", "authors": [ "Xingrui Gu", "Zhixuan Wang", "Irisa Jin", "Zekun Wu" ], "externalIds": { "ArXiv": "2404.00320", "DBLP": "journals/corr/abs-2404-00320", "DOI": "10.48550/arXiv.2404.00320", "CorpusId": 268820179 }, "url": "https://www.semanticscholar.org/paper/6335596fb3e0097d74ad2b48ba7f363d45ae2681", "referenceCount": 29, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Exploring Multimodal Fusion for Continuous Protective Behavior Detection", "abstract": "Chronic pain is a prevalent condition that affects everyday life of people around the world. Protective behaviors (strategies that are naturally but unhelpfully adopted by people with chronic pain to cope with fear of pain in executing harmless everyday movements) can lead to further disability over time if not recognized and addressed appropriately. In this paper, we build on previous work on unimodal, activity-independent, time-continuous protective behavior detection (PBD) by focusing on the fusion of muscle activity and body movement modalities for characterizing both protective behavior and its physical activity context. We explore different fusion strategies based on consideration of the manner in which protective behavior influences muscle activity and overt body movement as well as the relationship between the two modalities. We evaluate the various strategies on the multimodal EmoPain dataset containing data from people with and without chronic pain engaged in physical activities that reflect everyday challenges for those with chronic pain. Our results show that a central (model-level) fusion approach leads to better PBD performance than input- and decision-level fusions, or unimodal approaches. We also show that additional use of attention mechanism, typifying shifts in attention characteristic of protective behavior, further improves the sensitivity of the model, i.e. detection of the positive class (which is the minority class). We analyze these results and suggest that fusion in modelling a motor condition should consider how emotional responses (fear of movement and pain in this case) triggered by a condition affect each of the given modalities and hence their contributions to the modelling task.", "year": 2022, "venue": "Affective Computing and Intelligent Interaction", "authors": [ "Guanting Cen", "Chongyang Wang", "Temitayo A. Olugbade", "A. Williams", "N. Bianchi-Berthouze" ], "externalIds": { "DBLP": "conf/acii/CenWOWB22", "DOI": "10.1109/ACII55700.2022.9953851", "CorpusId": 253881531 }, "url": "https://www.semanticscholar.org/paper/869f51818b3a901057052a9707461934573278e9", "referenceCount": 34, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Big Data, Data Science, and Causal Inference: A Primer for Clinicians", "abstract": "Clinicians handle a growing amount of clinical, biometric, and biomarker data. In this “big data” era, there is an emerging faith that the answer to all clinical and scientific questions reside in “big data” and that data will transform medicine into precision medicine. However, data by themselves are useless. It is the algorithms encoding causal reasoning and domain (e.g., clinical and biological) knowledge that prove transformative. The recent introduction of (health) data science presents an opportunity to re-think this data-centric view. For example, while precision medicine seeks to provide the right prevention and treatment strategy to the right patients at the right time, its realization cannot be achieved by algorithms that operate exclusively in data-driven prediction modes, as do most machine learning algorithms. Better understanding of data science and its tasks is vital to interpret findings and translate new discoveries into clinical practice. In this review, we first discuss the principles and major tasks of data science by organizing it into three defining tasks: (1) association and prediction, (2) intervention, and (3) counterfactual causal inference. Second, we review commonly-used data science tools with examples in the medical literature. Lastly, we outline current challenges and future directions in the fields of medicine, elaborating on how data science can enhance clinical effectiveness and inform medical practice. As machine learning algorithms become ubiquitous tools to handle quantitatively “big data,” their integration with causal reasoning and domain knowledge is instrumental to qualitatively transform medicine, which will, in turn, improve health outcomes of patients.", "year": 2021, "venue": "Frontiers in Medicine", "authors": [ "Y. Raita", "C. Camargo", "L. Liang", "K. Hasegawa" ], "externalIds": { "PubMedCentral": "8290071", "DOI": "10.3389/fmed.2021.678047", "CorpusId": 235736422, "PubMed": "34295910" }, "url": "https://www.semanticscholar.org/paper/4fcc7f6085b407d777d39cd2be62f70071107db4", "referenceCount": 79, "citationCount": 28, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Leveraging Activity Recognition to Enable Protective Behavior Detection in Continuous Data", "abstract": "Protective behavior exhibited by people with chronic pain (CP) during physical activities is very informative to understanding their physical and emotional states. Existing automatic protective behavior detection (PBD) methods rely on pre-segmentation of activities predefined by users. However, in real life, people perform activities casually. Therefore, where those activities present difficulties for people with CP, technology-enabled support should be delivered continuously and automatically adapted to activity type and occurrence of protective behavior. Hence, to facilitate ubiquitous CP management, it becomes critical to enable accurate PBD over continuous data. In this paper, we propose to integrate human activity recognition (HAR) with PBD via a novel hierarchical HAR-PBD architecture comprising graph-convolution and long short-term memory (GC-LSTM) networks, and alleviate class imbalances using a class-balanced focal categorical cross-entropy (CFCC) loss. Through in-depth evaluation of the approach using a CP patients' dataset, we show that the leveraging of HAR, GC-LSTM networks, and CFCC loss leads to clear increase in PBD performance against the baseline (macro F1 score of 0.81 vs. 0.66 and precision-recall area-under-the-curve (PR-AUC) of 0.60 vs. 0.44). We conclude by discussing possible use cases of the hierarchical architecture in CP management and beyond. We also discuss current limitations and ways forward.", "year": 2020, "venue": "Proceedings of the ACM on Interactive Mobile Wearable and Ubiquitous Technologies", "authors": [ "Chongyang Wang", "Yuan Gao", "Akhil Mathur", "A. Williams", "N. Lane", "N. Bianchi-Berthouze" ], "externalIds": { "DBLP": "journals/corr/abs-2011-01776", "MAG": "3096783252", "ArXiv": "2011.01776", "DOI": "10.1145/3463508", "CorpusId": 226237050 }, "url": "https://www.semanticscholar.org/paper/7456f493443db4a66774e8037ac3774f133489b8", "referenceCount": 105, "citationCount": 29, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Role of machine learning in gait analysis: a review", "abstract": "Abstract Human biomechanics and gait form an integral part of life. The gait analysis involves a large number of interdependent parameters that were difficult to interpret due to a vast amount of data and their inter-relations. To simplify evaluation, the integration of machine learning (ML) with biomechanics is a promising solution. The purpose of this review is to familiarise the readers with key directions of implementation of ML techniques for gait analysis and gait rehabilitation. An extensive literature survey was based on research articles from nine databases published from 1980 to 2019. With over 943 studies identified, finally, 43 studies met the inclusion criteria. The outcome reported illustrates that supervised ML techniques showed accuracies above 90% in the identified gait analysis domain. The statistical results revealed support vector machine (SVM) as the best classifier (mean-score = 0.87 ± 0.07) with remarkable generalisation capability even on small to medium datasets. It has also been analysed that the control strategies for gait rehabilitation are benefitted from reinforcement learning and (deep) neural-networks due to their ability to capture participants’ variability. This review paper shows the success of ML techniques in detecting disorders, predicting rehabilitation length, and control of rehabilitation devices which make them suitable for clinical diagnosis.", "year": 2020, "venue": "Journal of Medical Engineering & Technology", "authors": [ "Preeti Khera", "Neelesh Kumar" ], "externalIds": { "MAG": "3092746285", "DOI": "10.1080/03091902.2020.1822940", "CorpusId": 224809990, "PubMed": "33078988" }, "url": "https://www.semanticscholar.org/paper/8d268c65e269be9dbbe103adb1fb6014ac129c24", "referenceCount": 57, "citationCount": 87, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "The relationship between guarding, pain, and emotion", "abstract": "Supplemental Digital Content is Available in the Text.", "year": 2019, "venue": "PAIN Reports", "authors": [ "Temitayo A. Olugbade", "N. Bianchi-Berthouze", "A. Williams" ], "externalIds": { "PubMedCentral": "6728010", "MAG": "2964304750", "DOI": "10.1097/PR9.0000000000000770", "CorpusId": 200107025, "PubMed": "31579861" }, "url": "https://www.semanticscholar.org/paper/179420c19cd620e67d6dbb9c6a48c12b5a4ac3f1", "referenceCount": 53, "citationCount": 29, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "The Automatic Detection of Chronic Pain-Related Expression: Requirements, Challenges and the Multimodal EmoPain Dataset", "abstract": "Pain-related emotions are a major barrier to effective self rehabilitation in chronic pain. Automated coaching systems capable of detecting these emotions are a potential solution. This paper lays the foundation for the development of such systems by making three contributions. First, through literature reviews, an overview of how pain is expressed in chronic pain and the motivation for detecting it in physical rehabilitation is provided. Second, a fully labelled multimodal dataset (named `EmoPain') containing high resolution multiple-view face videos, head mounted and room audio signals, full body 3D motion capture and electromyographic signals from back muscles is supplied. Natural unconstrained pain related facial expressions and body movement behaviours were elicited from people with chronic pain carrying out physical exercises. Both instructed and non-instructed exercises were considered to reflect traditional scenarios of physiotherapist directed therapy and home-based self-directed therapy. Two sets of labels were assigned: level of pain from facial expressions annotated by eight raters and the occurrence of six pain-related body behaviours segmented by four experts. Third, through exploratory experiments grounded in the data, the factors and challenges in the automated recognition of such expressions and behaviour are described, the paper concludes by discussing potential avenues in the context of these findings also highlighting differences for the two exercise scenarios addressed.", "year": 2016, "venue": "IEEE Transactions on Affective Computing", "authors": [ "M. Aung", "Sebastian Kaltwang", "Bernardino Romera-Paredes", "Brais Martínez", "Aneesha Singh", "M. Cella", "M. Valstar", "Hongying Meng", "A. Kemp", "M. Shafizadeh", "Aaron C. Elkins", "N. Kanakam", "Amschel de Rothschild", "N. Tyler", "P. Watson", "A. C. de C Williams", "M. Pantic", "N. Bianchi-Berthouze" ], "externalIds": { "DBLP": "journals/taffco/AungKRMSCVMKSEK16", "MAG": "2162825184", "DOI": "10.1109/TAFFC.2015.2462830", "CorpusId": 541113, "PubMed": "30906508" }, "url": "https://www.semanticscholar.org/paper/525e86f8c25e6ed4edf6c99dcc2bc0b52638bdf3", "referenceCount": 119, "citationCount": 162, "influentialCitationCount": 14, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science", "Psychology" ] }, { "title": "Cognitive neuroscience of human counterfactual reasoning", "abstract": "Counterfactual reasoning is a hallmark of human thought, enabling the capacity to shift from perceiving the immediate environment to an alternative, imagined perspective. Mental representations of counterfactual possibilities (e.g., imagined past events or future outcomes not yet at hand) provide the basis for learning from past experience, enable planning and prediction, support creativity and insight, and give rise to emotions and social attributions (e.g., regret and blame). Yet remarkably little is known about the psychological and neural foundations of counterfactual reasoning. In this review, we survey recent findings from psychology and neuroscience indicating that counterfactual thought depends on an integrative network of systems for affective processing, mental simulation, and cognitive control. We review evidence to elucidate how these mechanisms are systematically altered through psychiatric illness and neurological disease. We propose that counterfactual thinking depends on the coordination of multiple information processing systems that together enable adaptive behavior and goal-directed decision making and make recommendations for the study of counterfactual inference in health, aging, and disease.", "year": 2015, "venue": "Frontiers in Human Neuroscience", "authors": [ "Nicole Van Hoeck", "P. Watson", "A. Barbey" ], "externalIds": { "PubMedCentral": "4511878", "MAG": "1524926235", "DOI": "10.3389/fnhum.2015.00420", "CorpusId": 16686929, "PubMed": "26257633" }, "url": "https://www.semanticscholar.org/paper/7222510b94e9ea58e6e493f63674319ef30a8de2", "referenceCount": 272, "citationCount": 90, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Psychology", "Medicine" ] }, { "title": "Comparative abilities of Microsoft Kinect and Vicon 3D motion capture for gait analysis", "abstract": "Abstract Biomechanical analysis is a powerful tool in the evaluation of movement dysfunction in orthopaedic and neurologic populations. Three-dimensional (3D) motion capture systems are widely used, accurate systems, but are costly and not available in many clinical settings. The Microsoft Kinect™ has the potential to be used as an alternative low-cost motion analysis tool. The purpose of this study was to assess concurrent validity of the Kinect™ with Brekel Kinect software in comparison to Vicon Nexus during sagittal plane gait kinematics. Twenty healthy adults (nine male, 11 female) were tracked while walking and jogging at three velocities on a treadmill. Concurrent hip and knee peak flexion and extension and stride timing measurements were compared between Vicon and Kinect™. Although Kinect measurements were representative of normal gait, the Kinect™ generally under-estimated joint flexion and over-estimated extension. Kinect™ and Vicon hip angular displacement correlation was very low and error was large. Kinect™ knee measurements were somewhat better than hip, but were not consistent enough for clinical assessment. Correlation between Kinect™ and Vicon stride timing was high and error was fairly small. Variability in Kinect™ measurements was smallest at the slowest velocity. The Kinect™ has basic motion capture capabilities and with some minor adjustments will be an acceptable tool to measure stride timing, but sophisticated advances in software and hardware are necessary to improve Kinect™ sensitivity before it can be implemented for clinical use.", "year": 2014, "venue": "Journal of Medical Engineering & Technology", "authors": [ "A. Pfister", "A. West", "S. Bronner", "J. A. Noah" ], "externalIds": { "MAG": "2165744315", "DOI": "10.3109/03091902.2014.909540", "CorpusId": 10067554, "PubMed": "24878252" }, "url": "https://www.semanticscholar.org/paper/fe76c167920898330a66e3f2509ff1cd41e231bc", "referenceCount": 38, "citationCount": 445, "influentialCitationCount": 19, "isOpenAccess": false, "fieldsOfStudy": [ "Engineering", "Medicine" ] }, { "title": "What is the role of fear and escape/avoidance in chronic pain? Models, structural analysis and future directions.", "abstract": "SUMMARY The fear-avoidance model of chronic pain has stimulated extensive research and the development of a variety of interventions focused on reducing pain-related fear, avoidance behavior and functional disability in individuals with chronic pain. Although there is considerable evidence supporting the model, oversight of potentially important factors and inconsistent findings regarding postulated pathways have led to proposed model revisions. The purpose of this brief narrative review is to provide an overview of the original model, highlight key model revisions and review existing research that directly analyzed the pathways proposed in the original and revised models. The growing body of evidence emerging from analysis of proposed fear-avoidance pathways using structural equation modeling is generally supportive of the original model, as well as various proposed revisions. Additional evaluation using structural equation modeling may lead to a formulation of the fear-avoidance model that balances parsimony with heuristic value. Clinical implications and areas for future research are discussed.", "year": 2012, "venue": "Pain Management", "authors": [ "G. Asmundson", "Holly A Parkerson", "M. Petter", "M. Noel" ], "externalIds": { "MAG": "2068613946", "DOI": "10.2217/pmt.12.15", "CorpusId": 19949433, "PubMed": "24654671" }, "url": "https://www.semanticscholar.org/paper/2f9033fb17c6701a23e0c9d0269160ea915707cf", "referenceCount": 40, "citationCount": 42, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Causal Inference", "abstract": "Throughout POL572 and 573, we will learn how to use various statistical methods in order to make causal inference , which is a main goal of social science research. We all know the mantra “correlation is not causation.” The difficulty of inferring causality arises from the fact that we do not observe counterfactual outcomes, which are required to estimate causal effects. To formalize this intuition, we begin by describing the potential outcomes framework of causal inference.", "year": 2011, "venue": "Twenty-one Mental Models That Can Change Policing", "authors": [ "James M. Robins" ], "externalIds": { "MAG": "3003220710", "DOI": "10.4135/9781506326139.n102", "CorpusId": 3057769 }, "url": "https://www.semanticscholar.org/paper/7062f0be4e6ae4bbb96ba0dd0d726763dde875c6", "referenceCount": 54, "citationCount": 1429, "influentialCitationCount": 107, "isOpenAccess": true, "fieldsOfStudy": [ "Psychology" ] }, { "title": "Moving differently in pain: A new theory to explain the adaptation to pain", "abstract": null, "year": 2011, "venue": "Pain", "authors": [ "P. Hodges", "K. Tucker" ], "externalIds": { "MAG": "2062818347", "DOI": "10.1016/j.pain.2010.10.020", "CorpusId": 14332195, "PubMed": "21087823" }, "url": "https://www.semanticscholar.org/paper/75756c2a039eef0572a6869b2733c18a3aceaa7d", "referenceCount": 108, "citationCount": 830, "influentialCitationCount": 41, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine", "Psychology" ] }, { "title": "Effects of two neuromuscular fatigue protocols on landing performance.", "abstract": null, "year": 2010, "venue": "Journal of Electromyography & Kinesiology", "authors": [ "C. Roger James", "Barry W. Scheuermann", "Michael P. Smith" ], "externalIds": { "MAG": "2064783233", "DOI": "10.1016/j.jelekin.2009.10.007", "CorpusId": 1977483, "PubMed": "20006522" }, "url": "https://www.semanticscholar.org/paper/0ed0eaf415a96b2fceed4ae0c9827fa8c0dfa614", "referenceCount": 39, "citationCount": 66, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Changes in the degree of motor variability associated with experimental and chronic neck–shoulder pain during a standardised repetitive arm movement", "abstract": null, "year": 2008, "venue": "Experimental Brain Research", "authors": [ "P. Madeleine", "S. Mathiassen", "L. Arendt-Nielsen" ], "externalIds": { "MAG": "2057126442", "DOI": "10.1007/s00221-007-1199-2", "CorpusId": 12756856, "PubMed": "18030457" }, "url": "https://www.semanticscholar.org/paper/1ce37f1c6d61d325dd7bbee26b6298adbece0325", "referenceCount": 39, "citationCount": 199, "influentialCitationCount": 13, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine" ] }, { "title": "The max-min hill-climbing Bayesian network structure learning algorithm", "abstract": null, "year": 2006, "venue": "Machine-mediated learning", "authors": [ "I. Tsamardinos", "Laura E. Brown", "C. Aliferis" ], "externalIds": { "DBLP": "journals/ml/TsamardinosBA06", "MAG": "2165190832", "DOI": "10.1007/s10994-006-6889-7", "CorpusId": 6887420 }, "url": "https://www.semanticscholar.org/paper/40414d71c9706806960f6551a1ff53cc87488899", "referenceCount": 88, "citationCount": 1872, "influentialCitationCount": 274, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Adjacency-Faithfulness and Conservative Causal Inference", "abstract": "Most causal inference algorithms in the literature (e.g., Pearl (2000), Spirtes et al. (2000), Heckerman et al. (1999)) exploit an assumption usually referred to as the causal Faithfulness or Stability condition. In this paper, we highlight two components of the condition used in constraint-based algorithms, which we call \"Adjacency-Faithfulness\" and \"Orientation-Faithfulness\". We point out that assuming Adjacency-Faithfulness is true, it is in principle possible to test the validity of Orientation-Faithfulness. Based on this observation, we explore the consequence of making only the Adjacency-Faithfulness assumption. We show that the familiar PC algorithm has to be modified to be (asymptotically) correct under the weaker, Adjacency-Faithfulness assumption. Roughly the modified algorithm, called Conservative PC (CPC), checks whether Orientation-Faithfulness holds in the orientation phase, and if not, avoids drawing certain causal conclusions the PC algorithm would draw. However, if the stronger, standard causal Faithfulness condition actually obtains, the CPC algorithm is shown to output the same pattern as the PC algorithm does in the large sample limit. We also present a simulation study showing that the CPC algorithm runs almost as fast as the PC algorithm, and outputs significantly fewer false causal arrowheads than the PC algorithm does on realistic sample sizes. We end our paper by discussing how score-based algorithms such as GES perform when the Adjacency-Faithfulness but not the standard causal Faithfulness condition holds, and how to extend our work to the FCI algorithm, which allows for the possibility of latent variables.", "year": 2006, "venue": "Conference on Uncertainty in Artificial Intelligence", "authors": [ "J. Ramsey", "Jiji Zhang", "P. Spirtes" ], "externalIds": { "DBLP": "conf/uai/RamseyZS06", "MAG": "2963429013", "ArXiv": "1206.6843", "CorpusId": 1985686 }, "url": "https://www.semanticscholar.org/paper/ccd84f15d3320ce1a324e515c5536a85b9169883", "referenceCount": 13, "citationCount": 262, "influentialCitationCount": 31, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Causation, Prediction, and Search", "abstract": "The writing is not uniformly polished and is scattered with long, awkward sentences that require some effort to unravel. I wonder if this is the result of infelicitous translation from the original German version (Wellek 1994). There are also numerous small typographical errors. More careful editing could have solved these problems before publication. There are no exercises, and so I would hesitate to use the book as a text (although it should be noted that this is not one of the author’s stated aims). Although Testing Statistical Hypotheses of Equivalence has some weaknesses, it is a useful reference for those interested in the question of equivalence testing, particularly in biological applications.", "year": 2003, "venue": "Technometrics", "authors": [ "T. Burr" ], "externalIds": { "MAG": "2063081613", "DBLP": "journals/technometrics/Burr03", "DOI": "10.1198/tech.2003.s776", "CorpusId": 10562706 }, "url": "https://www.semanticscholar.org/paper/1c6356a688a273889fb71bab17973ed10483d97f", "referenceCount": 5, "citationCount": 4669, "influentialCitationCount": 779, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Theoretical perspectives on the relation between catastrophizing and pain.", "abstract": "The tendency to \"catastrophize\" during painful stimulation contributes to more intense pain experience and increased emotional distress. Catastrophizing has been broadly conceived as an exaggerated negative \"mental set\" brought to bear during painful experiences. Although findings have been consistent in showing a relation between catastrophizing and pain, research in this area has proceeded in the relative absence of a guiding theoretical framework. This article reviews the literature on the relation between catastrophizing and pain and examines the relative strengths and limitations of different theoretical models that could be advanced to account for the pattern of available findings. The article evaluates the explanatory power of a schema activation model, an appraisal model, an attention model, and a communal coping model of pain perception. It is suggested that catastrophizing might best be viewed from the perspective of hierarchical levels of analysis, where social factors and social goals may play a role in the development and maintenance of catastrophizing, whereas appraisal-related processes may point to the mechanisms that link catastrophizing to pain experience. Directions for future research are suggested.", "year": 2001, "venue": "The Clinical Journal of Pain", "authors": [ "M. Sullivan", "B. Thorn", "J. Haythornthwaite", "F. Keefe", "Michelle Y Martin", "L. Bradley", "J. Lefebvre" ], "externalIds": { "MAG": "2099493839", "DOI": "10.1097/00002508-200103000-00008", "CorpusId": 9578844, "PubMed": "11289089" }, "url": "https://www.semanticscholar.org/paper/082f7a02a6e8af4e3cebbb09398db91d5051d6c1", "referenceCount": 123, "citationCount": 2362, "influentialCitationCount": 142, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Psychology" ] }, { "title": "Fear-avoidance and its consequences in chronic musculoskeletal pain: a state of the art", "abstract": null, "year": 2000, "venue": "Pain", "authors": [ "J. Vlaeyen", "S. Linton" ], "externalIds": { "MAG": "2099093791", "DOI": "10.1016/S0304-3959(99)00242-0", "CorpusId": 14486753, "PubMed": "10781906" }, "url": "https://www.semanticscholar.org/paper/3d86fb7608f3cee070a646a820ad8e0a6226dd5d", "referenceCount": 148, "citationCount": 4338, "influentialCitationCount": 183, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Psychology" ] }, { "title": "The use of artificial intelligence in the analysis of sports performance: a review of applications in human gait analysis and future directions for sports biomechanics.", "abstract": "Computers have played an important supporting role in the development of experimental and theoretical sports biomechanics. The role of the computer now extends from data capture and data processing through to mathematical and statistical modelling and simulation and optimization. This paper seeks to demonstrate that elevation of the role of the computer to involvement in the decision-making process, through the use of artificial intelligence techniques, would be a potentially rewarding future direction for the discipline. In the absence of significant previous work in this area, this paper reviews experiences in a parallel field of medical informatics, namely gait analysis. Research into the application of expert systems and neural networks to gait analysis is reviewed, observations made and comparisons drawn with the biomechanical analysis of sports performance. Brief explanations of the artificial intelligence techniques discussed in the paper are provided. The paper concludes that the creation of an expert system for a specific well-defined sports technique would represent a significant advance in the development of sports biomechanics.", "year": 1995, "venue": "Jurnal sport science", "authors": [ "A. Lapham", "R. Bartlett" ], "externalIds": { "MAG": "2096123577", "DOI": "10.1080/02640419508732232", "CorpusId": 26030822, "PubMed": "7563290" }, "url": "https://www.semanticscholar.org/paper/5533766a24728ed19353deda023261b4e3433a43", "referenceCount": 32, "citationCount": 66, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine", "Psychology" ] }, { "title": "Catastrophizing, depression and the sensory, affective and evaluative aspects of chronic pain", "abstract": null, "year": 1994, "venue": "Pain", "authors": [ "M. Geisser", "M. Robinson", "F. Keefe", "M. L. Weiner" ], "externalIds": { "MAG": "2122861239", "DOI": "10.1016/0304-3959(94)90050-7", "CorpusId": 16773833, "PubMed": "7854806" }, "url": "https://www.semanticscholar.org/paper/0ea7a282e7cf966d6467689c181ea65fb38f8610", "referenceCount": 37, "citationCount": 286, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Psychology", "Medicine" ] }, { "title": "The future of performance-related sports biomechanics research.", "abstract": "An overview of performance-related research in sports biomechanics is presented describing the relevant techniques of data analysis and data processing together with the methods used in experimental and theoretical studies. Advances in data collection and processing techniques which are necessary for the future development of sports biomechanics research are identified. The difficulties associated with experimental studies in sports biomechanics are described with examples of the different approaches that have been used. The strengths and weaknesses of theoretical studies are discussed with examples drawn from a number of sports. It is concluded that progress in performance-related research will result from the application of a suitable combination of theoretical and experimental approaches to those sports in which technique is the primary requirement for success.", "year": 1994, "venue": "Jurnal sport science", "authors": [ "M. R. Yeadon", "J. Challis" ], "externalIds": { "MAG": "2038011343", "DOI": "10.1080/02640419408732156", "CorpusId": 6364274, "PubMed": "8158746" }, "url": "https://www.semanticscholar.org/paper/6ecd71c40547e3bdfad547ceffb2a4b409ffbeeb", "referenceCount": 167, "citationCount": 98, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "The EmoPain@Home Dataset: Capturing Pain Level and Activity Recognition for People with Chronic Pain in Their Homes", "abstract": "—Chronic pain is a prevalent condition where fear of movement and pain interfere with everyday functioning. Yet, there is no open body movement dataset for people with chronic pain in everyday settings. Our EmoPain@Home dataset addresses this with capture from 18 people with and without chronic pain in their homes, while they performed their routine activities. The data includes labels for pain, worry, and movement confidence continuously recorded for activity instances for the people with chronic pain. We explored baseline two-level pain detection based on this dataset and obtained 0.62 mean F1 score. However, extension of the dataset led to deterioration in performance confirming high variability in pain expressions for real world settings. We investigated baseline activity recognition for this setting as a first step in exploring the use of the activity label as contextual information for improving pain level classification performance. We obtained mean F1 score of 0.43 for 9 activity types, highlighting its feasibility. Further exploration, however, showed that data from healthy people cannot be easily leveraged for improving performance because worry and low confidence alter activity strategies for people with chronic pain. Our dataset and findings lay critical groundwork for automatic assessment of pain experience and behaviour in the wild.", "year": 2024, "venue": "IEEE Transactions on Affective Computing", "authors": [ "Temitayo A. Olugbade", "R. Buono", "Kyrill Potapov", "Alex Bujorianu", "A. C. D. C. Williams", "Santiago De Ossorno Garcia", "Nicolas Gold", "Catherine Holloway", "Nadia Bianchi-Berthouze" ], "externalIds": { "DOI": "10.1109/taffc.2024.3390837", "CorpusId": 269248785 }, "url": "https://www.semanticscholar.org/paper/9f7663a64b76b72485affc7fe4ae3366e4e0a4da", "referenceCount": 78, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "Unifying system identification and biomechanical formulations for the estimation of muscle, tendon and joint stiffness during human movement", "abstract": "In vivo joint stiffness estimation during time-varying conditions remains an open challenge. Multiple communities, e.g. system identification and biomechanics, have tackled the problem from different perspectives and using different methods, each of which entailing advantages and limitations, often complementary. System identification formulations provide data-driven estimates of stiffness at the joint level, while biomechanics often relies on musculoskeletal models to estimate stiffness at multiple levels, i.e. joint, muscle, and tendon. Collaboration across these two scientific communities seems to be a logical step toward a reliable multi-level understanding of joint stiffness. However, differences at the theoretical, computational, and experimental levels have limited inter-community interaction. In this article we present a roadmap to achieve a unified framework for the estimation of time-varying stiffness in the composite human neuromusculoskeletal system during movement. We present our perspective on future developments to obtain data-driven system identification and musculoskeletal models that are compatible at the theoretical, computational, and experimental levels. Moreover, we propose a novel combined closed-loop paradigm, in which reference estimates of joint stiffness via system identification are decomposed into underlying muscle and tendon contribution via high-density-electromyography-driven musculoskeletal modeling. We highlight the need for aligning experimental requirements to be able to compare both joint stiffness formulations. Unifying both biomechanics’ and system identification’s formulations is a necessary step for truly generalizing stiffness estimation across individuals, movement conditions, training and impairment levels. From an application point of view, this is central for enabling patient-specific neurorehabilitation therapies, as well as biomimetic control of assistive robotic technologies. The roadmap we propose could serve as an inspiration for future collaborations across broadly different scientific communities to truly understand joint stiffness bio- and neuromechanics. Video Abstract: Unifying system identification and biomechanical formulations for the estimation of muscle, tendon and joint stiffness during human movement", "year": 2021, "venue": "Progress in Biomedical Engineering", "authors": [ "Christopher P. Cop", "G. Cavallo", "R. C. van 't Veld", "Bart FJM Koopman", "J. Lataire", "A. Schouten", "Massimo Sartori" ], "externalIds": { "DOI": "10.1088/2516-1091/ac12c4", "CorpusId": 236508945 }, "url": "https://www.semanticscholar.org/paper/0d514d9b683d9fb33bbc0230bd2d6db6dacc84b9", "referenceCount": 115, "citationCount": 9, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Physics" ] }, { "title": "Understanding Foot Function During Stance Phase by Bayesian Network Based Causal Inference", "abstract": null, "year": 2014, "venue": "Machine Learning in Healthcare Informatics", "authors": [ "Myagmarbayar Nergui", "J. Inoue", "C. Murai", "Wenwei Yu", "U. Acharya" ], "externalIds": { "MAG": "124776883", "DBLP": "series/isrl/NerguiIMYA14", "DOI": "10.1007/978-3-642-40017-9_6", "CorpusId": 46157949 }, "url": "https://www.semanticscholar.org/paper/45db000a938b39fd3bbc6ac43fa56d052dbd3dfc", "referenceCount": 20, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Kullback-Leibler Divergence", "abstract": null, "year": 2011, "venue": "International Encyclopedia of Statistical Science", "authors": [ "James M. Joyce" ], "externalIds": { "MAG": "107619411", "DBLP": "reference/stat/Joyce11", "DOI": "10.1007/978-3-642-04898-2_327", "CorpusId": 37718089 }, "url": "https://www.semanticscholar.org/paper/3c7ab18a86779b2bfbb6c1fc86e9948acfb428d4", "referenceCount": 23, "citationCount": 565, "influentialCitationCount": 15, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Pain and Behavior", "abstract": null, "year": null, "venue": "", "authors": [ "Edmond L. Truelove" ], "externalIds": { "CorpusId": 267935380 }, "url": "https://www.semanticscholar.org/paper/17ba9b7c44357bfbb7872ed0c67eac50fdf3cff0", "referenceCount": 11, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Bmc Medical Research Methodology Open Access Identification of Candidate Categories of the International Classification of Functioning Disability and Health (icf) for a Generic Icf Core Set Based on Regression Modelling", "abstract": "Background: The International Classification of Functioning, Disability and Health (ICF) is the framework developed by WHO to describe functioning and disability at both the individual and population levels.", "year": null, "venue": "", "authors": [ "A. Cieza", "S. Geyh", "S. Chatterji", "N. Kostanjsek", "Bedirhan Üstün", "G. Stucki", "Int", "Int ; Bedirhan" ], "externalIds": { "CorpusId": 16653180 }, "url": "https://www.semanticscholar.org/paper/cebca87093802e9799ea9eb8e6ee3f5495eac24f", "referenceCount": 35, "citationCount": 134, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Edge Weight Assignment: Assign weights to the edges in the DAG based on the computed KL divergence values, reflecting the strength of the causal influence", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Adaptive Learning on User Segmentation: Universal to Specific Representation via Bipartite Neural Interaction": { "paper_title": "Adaptive Learning on User Segmentation: Universal to Specific Representation via Bipartite Neural Interaction", "arxiv_id": "2409.14945v1", "keyword": "representation learning", "authors": [ "Xiaoyu Tan", "Yongxin Deng", "Chao Qu", "Siqiao Xue", "Xiaoming Shi", "James Zhang", "Xihe Qiu" ], "references": [ { "title": "PaddlePaddle: A Production-Oriented Deep Learning Platform Facilitating the Competency of Enterprises", "abstract": "As a thriving branch of Artificial Intelligence (AI), deep learning has empowered industries through cost reduction and efficiency improvement. Enterprise decision-makers need skilled employees who are familiar with their business and deep learning. As a result, they are placing increasing emphasis on deep learning technology. However, mastering mathematics, probability theory, and programming is a prerequisite for deep learning studies, which is a high threshold for business employees, especially for those who are not computer science majors. To solve this problem, we created a production-oriented deep learning platform-PaddlePaddle, to meet the needs of employees from various technological backgrounds. PaddlePaddle is a four-tiered software architecture, which consists of Core Framework, Algorithm Suites, Scene Suites, and Visual Interface. Four components are inter-compatible and gradually progressive. It also proposes a new model of cultivating employees to start using the Visual Interface, then use the Core Framework with the improvement of technology. In this paper, we report the architecture and functions of PaddlePaddle in detail, with a special emphasis on its novel aspects:", "year": 2022, "venue": "2022 IEEE 24th Int Conf on High Performance Computing & Communications; 8th Int Conf on Data Science & Systems; 20th Int Conf on Smart City; 8th Int Conf on Dependability in Sensor, Cloud & Big Data Systems & Application (HPCC/DSS/SmartCity/DependSys)", "authors": [ "Ran Bi", "Tongtong Xu", "Mingxue Xu", "Enhong Chen" ], "externalIds": { "DBLP": "conf/hpcc/BiXXC22", "DOI": "10.1109/HPCC-DSS-SmartCity-DependSys57074.2022.00046", "CorpusId": 257809259 }, "url": "https://www.semanticscholar.org/paper/e1398f9b08a73e39a4f9ccd338342c8b83146495", "referenceCount": 33, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "HYPRO: A Hybridly Normalized Probabilistic Model for Long-Horizon Prediction of Event Sequences", "abstract": "In this paper, we tackle the important yet under-investigated problem of making long-horizon prediction of event sequences. Existing state-of-the-art models do not perform well at this task due to their autoregressive structure. We propose HYPRO, a hybridly normalized probabilistic model that naturally fits this task: its first part is an autoregressive base model that learns to propose predictions; its second part is an energy function that learns to reweight the proposals such that more realistic predictions end up with higher probabilities. We also propose efficient training and inference algorithms for this model. Experiments on multiple real-world datasets demonstrate that our proposed HYPRO model can significantly outperform previous models at making long-horizon predictions of future events. We also conduct a range of ablation studies to investigate the effectiveness of each component of our proposed methods.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Siqiao Xue", "X. Shi", "James Zhang", "Hongyuan Mei" ], "externalIds": { "DBLP": "journals/corr/abs-2210-01753", "ArXiv": "2210.01753", "DOI": "10.48550/arXiv.2210.01753", "CorpusId": 252692943 }, "url": "https://www.semanticscholar.org/paper/f2c44f9193dd087f2c2a3ecfb8c7dc166f6c6e1f", "referenceCount": 57, "citationCount": 23, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning Large-scale Universal User Representation with Sparse Mixture of Experts", "abstract": "Learning user sequence behaviour embedding is very sophisticated and challenging due to the complicated feature interactions over time and high dimensions of user features. Recent emerging foundation models, e.g., BERT and its variants, encourage a large body of researchers to investigate in this field. However, unlike natural language processing (NLP) tasks, the parameters of user behaviour model come mostly from user embedding layer, which makes most existing works fail in training a universal user embedding of large scale. Furthermore, user representations are learned from multiple downstream tasks, and the past research work do not address the seesaw phenomenon. In this paper, we propose SUPERMOE, a generic framework to obtain high quality user representation from multiple tasks. Specifically, the user behaviour sequences are encoded by MoE transformer, and we can thus increase the model capacity to billions of parameters, or even to trillions of parameters. In order to deal with seesaw phenomenon when learning across multiple tasks, we design a new loss function with task indicators. We perform extensive offline experiments on public datasets and online experiments on private real-world business scenarios. Our approach achieves the best performance over state-of-the-art models, and the results demonstrate the effectiveness of our framework.", "year": 2022, "venue": "arXiv.org", "authors": [ "Caigao Jiang", "Siqiao Xue", "James Zhang", "Lingyue Liu", "Zhibo Zhu", "Hongyan Hao" ], "externalIds": { "ArXiv": "2207.04648", "DBLP": "journals/corr/abs-2207-04648", "DOI": "10.48550/arXiv.2207.04648", "CorpusId": 250426537 }, "url": "https://www.semanticscholar.org/paper/58e4eb1967c38840b0ecf24f26e8f3f1a13ab8a7", "referenceCount": 25, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Meta Reinforcement Learning Approach for Predictive Autoscaling in the Cloud", "abstract": "Predictive autoscaling (autoscaling with workload forecasting) is an important mechanism that supports autonomous adjustment of computing resources in accordance with fluctuating workload demands in the Cloud. In recent works, Reinforcement Learning (RL) has been introduced as a promising approach to learn the resource management policies to guide the scaling actions under the dynamic and uncertain cloud environment. However, RL methods face the following challenges in steering predictive autoscaling, such as lack of accuracy in decision-making, inefficient sampling and significant variability in workload patterns that may cause policies to fail at test time. To this end, we propose an end-to-end predictive meta model-based RL algorithm, aiming to optimally allocate resource to maintain a stable CPU utilization level, which incorporates a specially-designed deep periodic workload prediction model as the input and embeds the Neural Process [11, 16] to guide the learning of the optimal scaling actions over numerous application services in the Cloud. Our algorithm not only ensures the predictability and accuracy of the scaling strategy, but also enables the scaling decisions to adapt to the changing workloads with high sample efficiency. Our method has achieved significant performance improvement compared to the existing algorithms and has been deployed online at Alipay, supporting the autoscaling of applications for the world-leading payment platform.", "year": 2022, "venue": "Knowledge Discovery and Data Mining", "authors": [ "Siqiao Xue", "C. Qu", "X. Shi", "Cong Liao", "Shiyi Zhu", "Xiaoyu Tan", "Lintao Ma", "Shiyu Wang", "Shijun Wang", "Yun Hu", "Lei Lei", "Yang Zheng", "Jianguo Li", "James Zhang" ], "externalIds": { "DBLP": "journals/corr/abs-2205-15795", "ArXiv": "2205.15795", "DOI": "10.1145/3534678.3539063", "CorpusId": 249209701 }, "url": "https://www.semanticscholar.org/paper/8f502a85ed14fecab7c04d3523ef01458e5e8d1d", "referenceCount": 33, "citationCount": 28, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Causal Representation Learning for Out-of-Distribution Recommendation", "abstract": "Modern recommender systems learn user representations from historical interactions, which suffer from the problem of user feature shifts, such as an income increase. Historical interactions will inject out-of-date information into the representation in conflict with the latest user feature, leading to improper recommendations. In this work, we consider the Out-Of-Distribution (OOD) recommendation problem in an OOD environment with user feature shifts. To pursue high fidelity, we set additional objectives for representation learning as: 1) strong OOD generalization and 2) fast OOD adaptation. This work formulates and solves the problem from a causal view. We formulate the user feature shift as an intervention and OOD recommendation as post-intervention inference of the interaction probability. Towards the learning objectives, we embrace causal modeling of the generation procedure from user features to interactions. However, the unobserved user features cannot be ignored, which make the estimation of the interaction probability intractable. We thus devise a new Variational Auto-Encoder for causal modeling by incorporating an encoder to infer unobserved user features from historical interactions. We further perform counterfactual inference to mitigate the effect of out-of-date interactions. Moreover, a decoder is used to model the interaction generation procedure and perform post-intervention inference. Fast adaptation is inherent owing to the reuse of partial user representations. Lastly, we devise an extension to encode fine-grained causal relationships from user features to preference. Empirical results on three datasets validate the strong OOD generalization and fast adaptation abilities of the proposed method.", "year": 2022, "venue": "The Web Conference", "authors": [ "Wenjie Wang", "Xinyu Lin", "Fuli Feng", "Xiangnan He", "Min Lin", "Tat-seng Chua" ], "externalIds": { "DBLP": "conf/www/WangLF0LC22", "DOI": "10.1145/3485447.3512251", "CorpusId": 248367478 }, "url": "https://www.semanticscholar.org/paper/7045126a2829e23e2294ca600b2fad1c2ba00b22", "referenceCount": 70, "citationCount": 82, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "TDR-CL: Targeted Doubly Robust Collaborative Learning for Debiased Recommendations", "abstract": "Bias is a common problem inherent in recommender systems, which is entangled with users' preferences and poses a great challenge to unbiased learning. For debiasing tasks, the doubly robust (DR) method and its variants show superior performance due to the double robustness property, that is, DR is unbiased when either imputed errors or learned propensities are accurate. However, our theoretical analysis reveals that DR usually has a large variance. Meanwhile, DR would suffer unexpectedly large bias and poor generalization caused by inaccurate imputed errors and learned propensities, which usually occur in practice. In this paper, we propose a principled approach that can effectively reduce bias and variance simultaneously for existing DR approaches when the error imputation model is misspecified. In addition, we further propose a novel semi-parametric collaborative learning approach that decomposes imputed errors into parametric and nonparametric parts and updates them collaboratively, resulting in more accurate predictions. Both theoretical analysis and experiments demonstrate the superiority of the proposed methods compared with existing debiasing methods.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Haoxuan Li", "Yan Lyu", "Chunyuan Zheng", "Peng Wu" ], "externalIds": { "DBLP": "conf/iclr/LiLZ023", "ArXiv": "2203.10258", "CorpusId": 257279766 }, "url": "https://www.semanticscholar.org/paper/84b9802042f29f4490eb36c2ac69a8ce9cdb2196", "referenceCount": 50, "citationCount": 26, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Click-through rate prediction in online advertising: A literature review", "abstract": null, "year": 2022, "venue": "Information Processing & Management", "authors": [ "Yanwu Yang", "Panyu Zhai" ], "externalIds": { "DBLP": "journals/corr/abs-2202-10462", "ArXiv": "2202.10462", "DOI": "10.1016/j.ipm.2021.102853", "CorpusId": 245795343 }, "url": "https://www.semanticscholar.org/paper/16c03a239600e64b2a5ca2f73764b43b4df2d147", "referenceCount": 169, "citationCount": 63, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Graph Neural Networks for Recommender System", "abstract": "Recently, graph neural network (GNN) has become the new state-of-the-art approach in many recommendation problems, with its strong ability to handle structured data and to explore high-order information. However, as the recommendation tasks are diverse and various in the real world, it is quite challenging to design proper GNN methods for specific problems. In this tutorial, we focus on the critical challenges of GNN-based recommendation and the potential solutions. Specifically, we start from an extensive background of recommender systems and graph neural networks. Then we fully discuss why GNNs are required in recommender systems and the four parts of challenges, including graph construction, network design, optimization, and computation efficiency. Then, we discuss how to address these challenges by elaborating on the recent advances of GNN-based recommendation models, with a systematic taxonomy from four critical perspectives: stages, scenarios, objectives, and applications. Last, we finalize this tutorial with conclusions and discuss important future directions.", "year": 2022, "venue": "Web Search and Data Mining", "authors": [ "Chen Gao", "Xiang Wang", "Xiangnan He", "Yong Li" ], "externalIds": { "DBLP": "conf/wsdm/GaoW0022", "DOI": "10.1145/3488560.3501396", "CorpusId": 246828791 }, "url": "https://www.semanticscholar.org/paper/4fa31616b834c377c4995c346a2b17464f25692a", "referenceCount": 25, "citationCount": 158, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Bellman Meets Hawkes: Model-Based Reinforcement Learning via Temporal Point Processes", "abstract": "We consider a sequential decision making problem where the agent faces the environment characterized by the stochastic discrete events and seeks an optimal intervention policy such that its long-term reward is maximized. This problem exists ubiquitously in social media, finance and health informatics but is rarely investigated by the conventional research in reinforcement learning. To this end, we present a novel framework of the model-based reinforcement learning where the agent's actions and observations are asynchronous stochastic discrete events occurring in continuous-time. We model the dynamics of the environment by Hawkes process with external intervention control term and develop an algorithm to embed such process in the Bellman equation which guides the direction of the value gradient. We demonstrate the superiority of our method in both synthetic simulator and real-data experiments.", "year": 2022, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "C. Qu", "Xiaoyu Tan", "Siqiao Xue", "X. Shi", "James Zhang", "Hongyuan Mei" ], "externalIds": { "ArXiv": "2201.12569", "DBLP": "conf/aaai/QuTXSZM23", "DOI": "10.1609/aaai.v37i8.26142", "CorpusId": 246431011 }, "url": "https://www.semanticscholar.org/paper/7cf779d889dbf155e089289bab1495be2b186b11", "referenceCount": 56, "citationCount": 16, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Survey of Recommendation Systems: Recommendation Models, Techniques, and Application Fields", "abstract": "This paper reviews the research trends that link the advanced technical aspects of recommendation systems that are used in various service areas and the business aspects of these services. First, for a reliable analysis of recommendation models for recommendation systems, data mining technology, and related research by application service, more than 135 top-ranking articles and top-tier conferences published in Google Scholar between 2010 and 2021 were collected and reviewed. Based on this, studies on recommendation system models and the technology used in recommendation systems were systematized, and research trends by year were analyzed. In addition, the application service fields where recommendation systems were used were classified, and research on the recommendation system model and recommendation technique used in each field was analyzed. Furthermore, vast amounts of application service-related data used by recommendation systems were collected from 2010 to 2021 without taking the journal ranking into consideration and reviewed along with various recommendation system studies, as well as applied service field industry data. As a result of this study, it was found that the flow and quantitative growth of various detailed studies of recommendation systems interact with the business growth of the actual applied service field. While providing a comprehensive summary of recommendation systems, this study provides insight to many researchers interested in recommendation systems through the analysis of its various technologies and trends in the service field to which recommendation systems are applied.", "year": 2022, "venue": "Electronics", "authors": [ "Hyeyoung Ko", "Suyeon Lee", "Yoonseo Park", "Anna Choi" ], "externalIds": { "DOI": "10.3390/electronics11010141", "CorpusId": 245712068 }, "url": "https://www.semanticscholar.org/paper/afbab8f863abb220ae1812d642da23f8d1ea002d", "referenceCount": 121, "citationCount": 227, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "A systematic review of ontology use in E-Learning recommender system", "abstract": null, "year": 2022, "venue": "Computers and Education: Artificial Intelligence", "authors": [ "N. Rahayu", "R. Ferdiana", "S. Kusumawardani" ], "externalIds": { "DBLP": "journals/caeai/RahayuFK22", "DOI": "10.1016/j.caeai.2022.100047", "CorpusId": 245960921 }, "url": "https://www.semanticscholar.org/paper/70fc3cdf6743cd9a43bb05d5ca58f328932baeb6", "referenceCount": 79, "citationCount": 77, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "AntOpt: A Multi-functional Large-scale Decision Optimization Platform", "abstract": "The orderly operation and development of any system are indivisible from decision optimization. Several issues in life are applicable to the thought of optimization problems to resolve. In this digital age, the size of information and data is obtaining larger and the potency of problem determination is changing into more demanding. Though there're many solvers for specific optimization problems, in the face of large-scale scenarios, there's no single platform that concurrently addresses usability, solvers' uniformity, and computing efficiency. In this demo, we present AntOpt, a decision optimization platform that integrates large-scale distributed computing engines, optimization algorithm solvers and productized services.", "year": 2021, "venue": "International Conference on Information and Knowledge Management", "authors": [ "Jun Zhou", "Yang Bao", "Hua Wu", "Zhigang Hua" ], "externalIds": { "DBLP": "conf/cikm/ZhouBWH21", "DOI": "10.1145/3459637.3481996", "CorpusId": 240230611 }, "url": "https://www.semanticscholar.org/paper/a0685a1d93cd2e52904a625dff2189e05cb3b338", "referenceCount": 11, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Signed Bipartite Graph Neural Networks", "abstract": "Signed networks are such social networks having both positive and negative links. A lot of theories and algorithms have been developed to model such networks (e.g., balance theory). However, previous work mainly focuses on the unipartite signed networks where the nodes have the same type. Signed bipartite networks are different from classical signed networks, which contain two different node sets and signed links between two node sets. Signed bipartite networks can be commonly found in many fields including business, politics, and academics, but have been less studied. In this work, we firstly define the signed relationship of the same set of nodes and provide a new perspective for analyzing signed bipartite networks. Then we do some comprehensive analysis of balance theory from two perspectives on several real-world datasets. Specifically, in the peer review dataset, we find that the ratio of balanced isomorphism in signed bipartite networks increased after rebuttal phases. Guided by these two perspectives, we propose a novel Signed Bipartite Graph Neural Networks (SBGNNs) to learn node embeddings for signed bipartite networks. SBGNNs follow most GNNs message-passing scheme, but we design new message functions, aggregation functions, and update functions for signed bipartite networks. We validate the effectiveness of our model on four real-world datasets on Link Sign Prediction task, which is the main machine learning task for signed networks. Experimental results show that our SBGNN model achieves significant improvement compared with strong baseline methods, including feature-based methods and network embedding methods.", "year": 2021, "venue": "International Conference on Information and Knowledge Management", "authors": [ "Junjie Huang", "Huawei Shen", "Qi Cao", "Shuchang Tao", "Xueqi Cheng" ], "externalIds": { "DBLP": "journals/corr/abs-2108-09638", "ArXiv": "2108.09638", "DOI": "10.1145/3459637.3482392", "CorpusId": 237267162 }, "url": "https://www.semanticscholar.org/paper/1fad5ac8623abb6940c91a8c12812fabb1c1b3e3", "referenceCount": 52, "citationCount": 30, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "User Retention: A Causal Approach with Triple Task Modeling", "abstract": "For many Internet companies, it has been an important focus to improve user retention rate. To achieve this goal, we need to recommend proper services in order to meet the demands of users. Unlike conventional click-through rate (CTR) estimation, there are lots of noise in the collected data when modeling retention, caused by two major issues: 1) implicit impression-revisit effect: users could revisit the APP even if they do not explicitly interact with the recommender system; 2) selection bias: recommender system suffers from selection bias caused by user's self-selection. To address the above challenges, we propose a novel method named UR-IPW (User Retention Modeling with Inverse Propensity Weighting), which 1) makes full use of both explicit and implicit interactions in the observed data. 2) models revisit rate estimation from a causal perspective accounting for the selection bias problem. The experiments on both offline and online environments from different scenarios demonstrate the superiority of UR-IPW over previous methods. To the best of our knowledge, this is the first work to model user retention by estimating the revisit rate from a causal perspective.", "year": 2021, "venue": "International Joint Conference on Artificial Intelligence", "authors": [ "Yang Zhang", "Dong Wang", "Qiang Li", "Yue Shen", "Ziqi Liu", "Xiaodong Zeng", "Zhiqiang Zhang", "Jinjie Gu", "Derek F. Wong" ], "externalIds": { "DBLP": "conf/ijcai/ZhangWLSLZZGW21", "DOI": "10.24963/ijcai.2021/468", "CorpusId": 237100524 }, "url": "https://www.semanticscholar.org/paper/e6d30095c1532ebc69ddc1cc84fe2d96fca72675", "referenceCount": 33, "citationCount": 7, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Conditional Neural Relational Inference for Interacting Systems", "abstract": null, "year": 2021, "venue": "ECML/PKDD", "authors": [ "J. Ramos", "Lionel Blondé", "S. Armand", "Alexandros Kalousis" ], "externalIds": { "DBLP": "conf/pkdd/RamosBAK21", "ArXiv": "2106.11083", "DOI": "10.1007/978-3-030-86517-7_12", "CorpusId": 235490557 }, "url": "https://www.semanticscholar.org/paper/18ce00b9262a96990d1e4f395ab7ab282bdbf90e", "referenceCount": 33, "citationCount": 55, "influentialCitationCount": 14, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Long-tail Session-based Recommendation", "abstract": "Session-based recommendation focuses on the prediction of user actions based on anonymous sessions and is a necessary method in the lack of user historical data. However, none of the existing session-based recommendation methods explicitly takes the long-tail recommendation into consideration, which plays an important role in improving the diversity of recommendation and producing the serendipity. As the distribution of items with long-tail is prevalent in session-based recommendation scenarios (e.g., e-commerce, music, and TV program recommendations), more attention should be put on the long-tail session-based recommendation. In this paper, we propose a novel network architecture, namely TailNet, to improve long-tail recommendation performance, while maintaining competitive accuracy performance compared with other methods. We start by classifying items into short-head (popular) and long-tail (niche) items based on click frequency. Then a novel preference mechanism is proposed and applied in TailNet to determine user preference between two types of items, so as to softly adjust and personalize recommendations. Extensive experiments on two real-world datasets verify the superiority of our method compared with state-of-the-art works.", "year": 2020, "venue": "ACM Conference on Recommender Systems", "authors": [ "Siyi Liu", "Yujia Zheng" ], "externalIds": { "MAG": "3044694072", "ArXiv": "2007.12329", "DBLP": "journals/corr/abs-2007-12329", "DOI": "10.1145/3383313.3412222", "CorpusId": 220768631 }, "url": "https://www.semanticscholar.org/paper/e96d683754e33b11a81549404389131c97d6edbb", "referenceCount": 27, "citationCount": 76, "influentialCitationCount": 10, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "NVAE: A Deep Hierarchical Variational Autoencoder", "abstract": "Normalizing flows, autoregressive models, variational autoencoders (VAEs), and deep energy-based models are among competing likelihood-based frameworks for deep generative learning. Among them, VAEs have the advantage of fast and tractable sampling and easy-to-access encoding networks. However, they are currently outperformed by other models such as normalizing flows and autoregressive models. While the majority of the research in VAEs is focused on the statistical challenges, we explore the orthogonal direction of carefully designing neural architectures for hierarchical VAEs. We propose Nouveau VAE (NVAE), a deep hierarchical VAE built for image generation using depth-wise separable convolutions and batch normalization. NVAE is equipped with a residual parameterization of Normal distributions and its training is stabilized by spectral regularization. We show that NVAE achieves state-of-the-art results among non-autoregressive likelihood-based models on the MNIST, CIFAR-10, CelebA 64, and CelebA HQ datasets and it provides a strong baseline on FFHQ. For example, on CIFAR-10, NVAE pushes the state-of-the-art from 2.98 to 2.91 bits per dimension, and it produces high-quality images on CelebA HQ. To the best of our knowledge, NVAE is the first successful VAE applied to natural images as large as 256$\\times$256 pixels. The source code is available at this https URL .", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Arash Vahdat", "J. Kautz" ], "externalIds": { "ArXiv": "2007.03898", "MAG": "3041956526", "DBLP": "conf/nips/VahdatK20", "CorpusId": 220403493 }, "url": "https://www.semanticscholar.org/paper/f6d32ed0eee5fb3f6ac518f3aebc8ceff2aae397", "referenceCount": 82, "citationCount": 758, "influentialCitationCount": 92, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Meta-learning on Heterogeneous Information Networks for Cold-start Recommendation", "abstract": "Cold-start recommendation has been a challenging problem due to sparse user-item interactions for new users or items. Existing efforts have alleviated the cold-start issue to some extent, most of which approach the problem at the data level. Earlier methods often incorporate auxiliary data as user or item features, while more recent methods leverage heterogeneous information networks (HIN) to capture richer semantics via higher-order graph structures. On the other hand, recent meta-learning paradigm sheds light on addressing cold-start recommendation at the model level, given its ability to rapidly adapt to new tasks with scarce labeled data, or in the context of cold-start recommendation, new users and items with very few interactions. Thus, we are inspired to develop a novel meta-learning approach named MetaHIN to address cold-start recommendation on HINs, to exploit the power of meta-learning at the model level and HINs at the data level simultaneously. The solution is non-trivial, for how to capture HIN-based semantics in the meta-learning setting, and how to learn the general knowledge that can be easily adapted to multifaceted semantics, remain open questions. In MetaHIN, we propose a novel semantic-enhanced tasks constructor and a co-adaptation meta-learner to address the two questions. Extensive experiments demonstrate that MetaHIN significantly outperforms the state of the arts in various cold-start scenarios. (Code and dataset are available at https://github.com/rootlu/MetaHIN.)", "year": 2020, "venue": "Knowledge Discovery and Data Mining", "authors": [ "Yuanfu Lu", "Yuan Fang", "C. Shi" ], "externalIds": { "MAG": "3043239945", "DBLP": "conf/kdd/Lu0S20", "DOI": "10.1145/3394486.3403207", "CorpusId": 221191103 }, "url": "https://www.semanticscholar.org/paper/cbf914243ff5051fff0d17333aae6459ade0b4f9", "referenceCount": 35, "citationCount": 211, "influentialCitationCount": 20, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "An Attention-based Model for Conversion Rate Prediction with Delayed Feedback via Post-click Calibration", "abstract": "Conversion rate (CVR) prediction is becoming increasingly important in the multi-billion dollar online display advertising industry. It has two major challenges: firstly, the scarce user history data is very complicated and non-linear; secondly, the time delay between the clicks and the corresponding conversions can be very large, e.g., ranging from seconds to weeks. Existing models usually suffer from such scarce and delayed conversion behaviors. In this paper, we propose a novel deep learning framework to tackle the two challenges. Specifically, we extract the pre-trained embedding from impressions/clicks to assist in conversion models and propose an inner/self-attention mechanism to capture the fine-grained personalized product purchase interests from the sequential click data. Besides, to overcome the time-delay issue, we calibrate the delay model by learning dynamic hazard function with the abundant post-click data more in line with the real distribution. Empirical experiments with real-world user behavior data prove the effectiveness of the proposed method.", "year": 2020, "venue": "International Joint Conference on Artificial Intelligence", "authors": [ "Yumin Su", "Liang Zhang", "Quanyu Dai", "Bo Zhang", "Jinyao Yan", "Dan Wang", "Yongjun Bao", "Sulong Xu", "Yang He", "Weipeng P. Yan" ], "externalIds": { "DBLP": "conf/ijcai/SuZDZYWBXHY20", "MAG": "3034260007", "DOI": "10.24963/ijcai.2020/487", "CorpusId": 265039019 }, "url": "https://www.semanticscholar.org/paper/43286f2d8dff3cde0beefd0540cb632048ea7231", "referenceCount": 27, "citationCount": 28, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Fi-GNN: Modeling Feature Interactions via Graph Neural Networks for CTR Prediction", "abstract": "Click-through rate (CTR) prediction is an essential task in web applications such as online advertising and recommender systems, whose features are usually in multi-field form. The key of this task is to model feature interactions among different feature fields. Recently proposed deep learning based models follow a general paradigm: raw sparse input multi-field features are first mapped into dense field embedding vectors, and then simply concatenated together to feed into deep neural networks (DNN) or other specifically designed networks to learn high-order feature interactions. However, the simple unstructured combination of feature fields will inevitably limit the capability to model sophisticated interactions among different fields in a sufficiently flexible and explicit fashion. In this work, we propose to represent the multi-field features in a graph structure intuitively, where each node corresponds to a feature field and different fields can interact through edges. The task of modeling feature interactions can be thus converted to modeling node interactions on the corresponding graph. To this end, we design a novel model Feature Interaction Graph Neural Networks (Fi-GNN). Taking advantage of the strong representative power of graphs, our proposed model can not only model sophisticated feature interactions in a flexible and explicit fashion, but also provide good model explanations for CTR prediction. Experimental results on two real-world datasets show its superiority over the state-of-the-arts.", "year": 2019, "venue": "International Conference on Information and Knowledge Management", "authors": [ "Zekun Li", "Zeyu Cui", "Shu Wu", "Xiaoyu Zhang", "Liang Wang" ], "externalIds": { "DBLP": "conf/cikm/LiCWZW19", "MAG": "3104439459", "ArXiv": "1910.05552", "DOI": "10.1145/3357384.3357951", "CorpusId": 204512528 }, "url": "https://www.semanticscholar.org/paper/3718a4b0443c294c78fd2140b258985175822aa9", "referenceCount": 42, "citationCount": 186, "influentialCitationCount": 21, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MeLU: Meta-Learned User Preference Estimator for Cold-Start Recommendation", "abstract": "This paper proposes a recommender system to alleviate the cold-start problem that can estimate user preferences based on only a small number of items. To identify a user's preference in the cold state, existing recommender systems, such as Netflix, initially provide items to a user; we call those items evidence candidates. Recommendations are then made based on the items selected by the user. Previous recommendation studies have two limitations: (1) the users who consumed a few items have poor recommendations and (2) inadequate evidence candidates are used to identify user preferences. We propose a meta-learning-based recommender system called MeLU to overcome these two limitations. From meta-learning, which can rapidly adopt new task with a few examples, MeLU can estimate new user's preferences with a few consumed items. In addition, we provide an evidence candidate selection strategy that determines distinguishing items for customized preference estimation. We validate MeLU with two benchmark datasets, and the proposed model reduces at least 5.92% mean absolute error than two comparative models on the datasets. We also conduct a user study experiment to verify the evidence selection strategy.", "year": 2019, "venue": "Knowledge Discovery and Data Mining", "authors": [ "Hoyeop Lee", "Jinbae Im", "Seongwon Jang", "Hyunsouk Cho", "Sehee Chung" ], "externalIds": { "MAG": "2964983698", "DBLP": "journals/corr/abs-1908-00413", "ArXiv": "1908.00413", "DOI": "10.1145/3292500.3330859", "CorpusId": 198952436 }, "url": "https://www.semanticscholar.org/paper/27d2e30bb3ddbed3023340a5ef4d25d4a10bcc70", "referenceCount": 32, "citationCount": 313, "influentialCitationCount": 55, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FiBiNET: combining feature importance and bilinear feature interaction for click-through rate prediction", "abstract": "Advertising and feed ranking are essential to many Internet companies such as Facebook and Sina Weibo. Among many real-world advertising and feed ranking systems, click through rate (CTR) prediction plays a central role. There are many proposed models in this field such as logistic regression, tree based models, factorization machine based models and deep learning based CTR models. However, many current works calculate the feature interactions in a simple way such as Hadamard product and inner product and they care less about the importance of features. In this paper, a new model named FiBiNET as an abbreviation for Feature Importance and Bilinear feature Interaction NETwork is proposed to dynamically learn the feature importance and fine-grained feature interactions. On the one hand, the FiBiNET can dynamically learn the importance of features via the Squeeze-Excitation network (SENET) mechanism; on the other hand, it is able to effectively learn the feature interactions via bilinear function. We conduct extensive experiments on two real-world datasets and show that our shallow model outperforms other shallow models such as factorization machine(FM) and field-aware factorization machine(FFM). In order to improve performance further, we combine a classical deep neural network(DNN) component with the shallow model to be a deep model. The deep FiBiNET consistently outperforms the other state-of-the-art deep models such as DeepFM and extreme deep factorization machine(XdeepFM).", "year": 2019, "venue": "ACM Conference on Recommender Systems", "authors": [ "Tongwen Huang", "Zhiqi Zhang", "Junlin Zhang" ], "externalIds": { "ArXiv": "1905.09433", "DBLP": "journals/corr/abs-1905-09433", "MAG": "2973085388", "DOI": "10.1145/3298689.3347043", "CorpusId": 162184358 }, "url": "https://www.semanticscholar.org/paper/37908e8b2a6ff6d6592c9ee98afe824cf4100eaa", "referenceCount": 27, "citationCount": 239, "influentialCitationCount": 34, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "BERT4Rec: Sequential Recommendation with Bidirectional Encoder Representations from Transformer", "abstract": "Modeling users' dynamic preferences from their historical behaviors is challenging and crucial for recommendation systems. Previous methods employ sequential neural networks to encode users' historical interactions from left to right into hidden representations for making recommendations. Despite their effectiveness, we argue that such left-to-right unidirectional models are sub-optimal due to the limitations including: \\begin enumerate* [label=series\\itshape\\alph*\\upshape)] \\item unidirectional architectures restrict the power of hidden representation in users' behavior sequences; \\item they often assume a rigidly ordered sequence which is not always practical. \\end enumerate* To address these limitations, we proposed a sequential recommendation model called BERT4Rec, which employs the deep bidirectional self-attention to model user behavior sequences. To avoid the information leakage and efficiently train the bidirectional model, we adopt the Cloze objective to sequential recommendation, predicting the random masked items in the sequence by jointly conditioning on their left and right context. In this way, we learn a bidirectional representation model to make recommendations by allowing each item in user historical behaviors to fuse information from both left and right sides. Extensive experiments on four benchmark datasets show that our model outperforms various state-of-the-art sequential models consistently.", "year": 2019, "venue": "International Conference on Information and Knowledge Management", "authors": [ "Fei Sun", "Jun Liu", "Jian Wu", "Changhua Pei", "Xiao Lin", "Wenwu Ou", "Peng Jiang" ], "externalIds": { "MAG": "2984100107", "DBLP": "conf/cikm/SunLWPLOJ19", "ArXiv": "1904.06690", "DOI": "10.1145/3357384.3357895", "CorpusId": 119181611 }, "url": "https://www.semanticscholar.org/paper/690edf44e8739fd80bdfb76f40c9a4a222f3bba8", "referenceCount": 61, "citationCount": 1666, "influentialCitationCount": 255, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Operation-aware Neural Networks for User Response Prediction", "abstract": null, "year": 2019, "venue": "Neural Networks", "authors": [ "Yi Yang", "Baile Xu", "S. Furao", "Jian Zhao" ], "externalIds": { "DBLP": "journals/nn/YangXSSZ20", "MAG": "2977032157", "ArXiv": "1904.12579", "DOI": "10.1016/j.neunet.2019.09.020", "CorpusId": 139106012, "PubMed": "31563699" }, "url": "https://www.semanticscholar.org/paper/7a97791ec6fdd6b8411942c8de2aa2089ee17ff6", "referenceCount": 24, "citationCount": 61, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine", "Mathematics" ] }, { "title": "AutoInt: Automatic Feature Interaction Learning via Self-Attentive Neural Networks", "abstract": "Click-through rate (CTR) prediction, which aims to predict the probability of a user clicking on an ad or an item, is critical to many online applications such as online advertising and recommender systems. The problem is very challenging since (1) the input features (e.g., the user id, user age, item id, item category) are usually sparse and high-dimensional, and (2) an effective prediction relies on high-order combinatorial features (a.k.a. cross features), which are very time-consuming to hand-craft by domain experts and are impossible to be enumerated. Therefore, there have been efforts in finding low-dimensional representations of the sparse and high-dimensional raw features and their meaningful combinations. In this paper, we propose an effective and efficient method called the AutoInt to automatically learn the high-order feature interactions of input features. Our proposed algorithm is very general, which can be applied to both numerical and categorical input features. Specifically, we map both the numerical and categorical features into the same low-dimensional space. Afterwards, a multi-head self-attentive neural network with residual connections is proposed to explicitly model the feature interactions in the low-dimensional space. With different layers of the multi-head self-attentive neural networks, different orders of feature combinations of input features can be modeled. The whole model can be efficiently fit on large-scale raw data in an end-to-end fashion. Experimental results on four real-world datasets show that our proposed approach not only outperforms existing state-of-the-art approaches for prediction but also offers good explainability. Code is available at: \\urlhttps://github.com/DeepGraphLearning/RecommenderSystems.", "year": 2018, "venue": "International Conference on Information and Knowledge Management", "authors": [ "Weiping Song", "Chence Shi", "Zhiping Xiao", "Zhijian Duan", "Yewen Xu", "Ming Zhang", "Jian Tang" ], "externalIds": { "DBLP": "conf/cikm/SongS0DX0T19", "ArXiv": "1810.11921", "MAG": "2898085636", "DOI": "10.1145/3357384.3357925", "CorpusId": 53100214 }, "url": "https://www.semanticscholar.org/paper/08588a4e596b02f22ac77dc8300aaabc27cb66b4", "referenceCount": 45, "citationCount": 701, "influentialCitationCount": 107, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "xDeepFM: Combining Explicit and Implicit Feature Interactions for Recommender Systems", "abstract": "Combinatorial features are essential for the success of many commercial models. Manually crafting these features usually comes with high cost due to the variety, volume and velocity of raw data in web-scale systems. Factorization based models, which measure interactions in terms of vector product, can learn patterns of combinatorial features automatically and generalize to unseen features as well. With the great success of deep neural networks (DNNs) in various fields, recently researchers have proposed several DNN-based factorization model to learn both low- and high-order feature interactions. Despite the powerful ability of learning an arbitrary function from data, plain DNNs generate feature interactions implicitly and at the bit-wise level. In this paper, we propose a novel Compressed Interaction Network (CIN), which aims to generate feature interactions in an explicit fashion and at the vector-wise level. We show that the CIN share some functionalities with convolutional neural networks (CNNs) and recurrent neural networks (RNNs). We further combine a CIN and a classical DNN into one unified model, and named this new model eXtreme Deep Factorization Machine (xDeepFM). On one hand, the xDeepFM is able to learn certain bounded-degree feature interactions explicitly; on the other hand, it can learn arbitrary low- and high-order feature interactions implicitly. We conduct comprehensive experiments on three real-world datasets. Our results demonstrate that xDeepFM outperforms state-of-the-art models. We have released the source code of xDeepFM at https://github.com/Leavingseason/xDeepFM.", "year": 2018, "venue": "Knowledge Discovery and Data Mining", "authors": [ "Jianxun Lian", "Xiaohuan Zhou", "Fuzheng Zhang", "Zhongxia Chen", "Xing Xie", "Guangzhong Sun" ], "externalIds": { "DBLP": "conf/kdd/LianZZCXS18", "MAG": "2964129278", "ArXiv": "1803.05170", "DOI": "10.1145/3219819.3220023", "CorpusId": 3930042 }, "url": "https://www.semanticscholar.org/paper/3448e6a5039417dc1ae890efeca3bef5390ace7c", "referenceCount": 49, "citationCount": 934, "influentialCitationCount": 92, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Deep & Cross Network for Ad Click Predictions", "abstract": "Feature engineering has been the key to the success of many prediction models. However, the process is nontrivial and often requires manual feature engineering or exhaustive searching. DNNs are able to automatically learn feature interactions; however, they generate all the interactions implicitly, and are not necessarily efficient in learning all types of cross features. In this paper, we propose the Deep & Cross Network (DCN) which keeps the benefits of a DNN model, and beyond that, it introduces a novel cross network that is more efficient in learning certain bounded-degree feature interactions. In particular, DCN explicitly applies feature crossing at each layer, requires no manual feature engineering, and adds negligible extra complexity to the DNN model. Our experimental results have demonstrated its superiority over the state-of-art algorithms on the CTR prediction dataset and dense classification dataset, in terms of both model accuracy and memory usage.", "year": 2017, "venue": "ADKDD@KDD", "authors": [ "Ruoxi Wang", "Bin Fu", "G. Fu", "Mingliang Wang" ], "externalIds": { "ArXiv": "1708.05123", "MAG": "2750075801", "DBLP": "conf/kdd/WangFFW17", "DOI": "10.1145/3124749.3124754", "CorpusId": 6011288 }, "url": "https://www.semanticscholar.org/paper/ca870d3dbfdddaef4a92e60f53d2077dbe0fdd0a", "referenceCount": 18, "citationCount": 1045, "influentialCitationCount": 169, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Variational Graph Auto-Encoders", "abstract": "We introduce the variational graph auto-encoder (VGAE), a framework for unsupervised learning on graph-structured data based on the variational auto-encoder (VAE). This model makes use of latent variables and is capable of learning interpretable latent representations for undirected graphs. We demonstrate this model using a graph convolutional network (GCN) encoder and a simple inner product decoder. Our model achieves competitive results on a link prediction task in citation networks. In contrast to most existing models for unsupervised learning on graph-structured data and link prediction, our model can naturally incorporate node features, which significantly improves predictive performance on a number of benchmark datasets.", "year": 2016, "venue": "arXiv.org", "authors": [ "Thomas Kipf", "M. Welling" ], "externalIds": { "DBLP": "journals/corr/KipfW16a", "ArXiv": "1611.07308", "MAG": "2554952599", "CorpusId": 14249137 }, "url": "https://www.semanticscholar.org/paper/54906484f42e871f7c47bbfe784a358b1448231f", "referenceCount": 12, "citationCount": 3081, "influentialCitationCount": 715, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Variational Autoencoder for Deep Learning of Images, Labels and Captions", "abstract": "A novel variational autoencoder is developed to model images, as well as associated labels or captions. The Deep Generative Deconvolutional Network (DGDN) is used as a decoder of the latent image features, and a deep Convolutional Neural Network (CNN) is used as an image encoder; the CNN is used to approximate a distribution for the latent DGDN features/code. The latent code is also linked to generative models for labels (Bayesian support vector machine) or captions (recurrent neural network). When predicting a label/caption for a new image at test, averaging is performed across the distribution of latent codes; this is computationally efficient as a consequence of the learned CNN-based encoder. Since the framework is capable of modeling the image in the presence/absence of associated labels/captions, a new semi-supervised setting is manifested for CNN learning with images; the framework even allows unsupervised CNN learning, based on images alone.", "year": 2016, "venue": "Neural Information Processing Systems", "authors": [ "Yunchen Pu", "Zhe Gan", "Ricardo Henao", "Xin Yuan", "Chunyuan Li", "Andrew Stevens", "L. Carin" ], "externalIds": { "DBLP": "journals/corr/PuGHYLSC16", "MAG": "2527569769", "ArXiv": "1609.08976", "CorpusId": 2665144 }, "url": "https://www.semanticscholar.org/paper/f4c5d13a8e9e80edcd4f69f0eab0b4434364c6dd", "referenceCount": 37, "citationCount": 685, "influentialCitationCount": 23, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Using neural word embeddings to model user behavior and detect user segments", "abstract": null, "year": 2016, "venue": "Knowledge-Based Systems", "authors": [ "Ludovico Boratto", "S. Carta", "G. Fenu", "Roberto Saia" ], "externalIds": { "DBLP": "journals/kbs/BorattoCFS16", "MAG": "2390196958", "DOI": "10.1016/j.knosys.2016.05.002", "CorpusId": 206712806 }, "url": "https://www.semanticscholar.org/paper/89c21926a1c9c592be2a5c9dd42032046ba663aa", "referenceCount": 41, "citationCount": 27, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Predicting clicks: CTR estimation of advertisements using Logistic Regression classifier", "abstract": "Search engine advertising in the present day is a pronounced component of the Web. Choosing the appropriate and relevant ad for a particular query and positioning of the ad critically impacts the probability of being noticed and clicked. It also strategically impacts the revenue, the search engine shall generate from a particular Ad. Needless to say, showing the user an Ad that is relevant to his/her need greatly improves users satisfaction. For all the aforesaid reasons, its of utmost importance to correctly determine the click-through rate (CTR) of ads in a system. For frequently appearing ads, CTR is empirically measurable, but for the new ads, other means have to be devised. In this paper we propose and establish a model to predict the CTRs of advertisements adopting Logistic Regression as the effective framework for representing and constructing conditions and vulnerabilities among variables. Logistic Regression is a type of probabilistic statistical classification model that predicts a binary response from a binary predictor, based on one or more predictor variables. Advertisements that have the most elevated to be clicked are chosen using supervised machine learning calculation. We tested Logistic Regression algorithm on a one week advertisement data of size around 25 GB by considering position and impression as predictor variables. Using this prescribed model we were able to achieve around 90% accuracy for CTR estimation.", "year": 2015, "venue": "IEEE International Advance Computing Conference", "authors": [ "R. Kumar", "Sneha Naik", "V. D. Naik", "Smita Shiralli", "Sunil V.G", "M. Husain" ], "externalIds": { "MAG": "1570168312", "DOI": "10.1109/IADCC.2015.7154880", "CorpusId": 15407106 }, "url": "https://www.semanticscholar.org/paper/6299162e781e0bd81674525479b034a779e553ac", "referenceCount": 24, "citationCount": 34, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "E-Commerce Strategy", "abstract": null, "year": 2014, "venue": "Springer Berlin Heidelberg", "authors": [ "Zheng Qin", "Yang Chang", "Shundong Li", "Fengxiang Li" ], "externalIds": { "MAG": "656930997", "DBLP": "books/sp/QinCLL14", "DOI": "10.1007/978-3-642-39414-0", "CorpusId": 31792113 }, "url": "https://www.semanticscholar.org/paper/1ca14f6d5393b493745ea4d126194510e6bc6743", "referenceCount": 0, "citationCount": 56, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Business", "Computer Science" ] }, { "title": "Predicting CTR of new ads via click prediction", "abstract": "Predicting CTR of ads on the search result page is an urgent topic. The reason for this is that choosing the right advertisement greatly affects revenue of the search engine and advertisers and user's satisfaction. For ads with the large click history it is quite clear how to predict CTR by utilizing statistical data. But for new ads with a poor click history such approach is not robust and reliable. We suggest a model for predicting CTR of such new ads. Contrary to the previous models of predicting CTR of new ads, our model uses events - clicks and skips1 instead of the observed CTR. In addition we have implemented several novel features, that resulted into the increase of the performance of our model. Offline and online experiments on the real search engine system demonstrated that our model outperforms the baseline and the approaches suggested in previous papers.", "year": 2012, "venue": "International Conference on Information and Knowledge Management", "authors": [ "Alexander Kolesnikov", "Yury Logachev", "V. Topinskiy" ], "externalIds": { "MAG": "2088046516", "DBLP": "conf/cikm/KolesnikovLT12", "DOI": "10.1145/2396761.2398688", "CorpusId": 16623917 }, "url": "https://www.semanticscholar.org/paper/135e3f458f49e55edfc04a359cf91e43a0685384", "referenceCount": 2, "citationCount": 8, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A new collaborative filtering metric that improves the behavior of recommender systems", "abstract": null, "year": 2010, "venue": "Knowledge-Based Systems", "authors": [ "Jesús Bobadilla", "F. Serradilla", "J. Bernal" ], "externalIds": { "MAG": "2039353303", "DBLP": "journals/kbs/BobadillaSB10", "DOI": "10.1016/j.knosys.2010.03.009", "CorpusId": 8199573 }, "url": "https://www.semanticscholar.org/paper/aa72b5074430fe383cde29242b678e0b161a98b5", "referenceCount": 35, "citationCount": 299, "influentialCitationCount": 20, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Probabilistic latent semantic user segmentation for behavioral targeted advertising", "abstract": "Behavioral Targeting (BT), which aims to deliver the most appropriate advertisements to the most appropriate users, is attracting much attention in online advertising market. A key challenge of BT is how to automatically segment users for ads delivery, and good user segmentation may significantly improve the ad click-through rate (CTR). Different from classical user segmentation strategies, which rarely take the semantics of user behaviors into consideration, we propose in this paper a novel user segmentation algorithm named Probabilistic Latent Semantic User Segmentation (PLSUS). PLSUS adopts the probabilistic latent semantic analysis to mine the relationship between users and their behaviors so as to segment users in a semantic manner. We perform experiments on the real world ad click through log of a commercial search engine. Comparing with the other two classical clustering algorithms, K-Means and CLUTO, PLSUS can further improve the ads CTR up to 100%. To our best knowledge, this work is an early semantic user segmentation study for BT in academia.", "year": 2009, "venue": "KDD Workshop on Data Mining and Audience Intelligence for Advertising", "authors": [ "Xiaohui Wu", "Jun Yan", "Ning Liu", "Shuicheng Yan", "Ying Chen", "Zheng Chen" ], "externalIds": { "MAG": "2160741926", "DBLP": "conf/kdd/WuYLYCC09", "DOI": "10.1145/1592748.1592751", "CorpusId": 8593813 }, "url": "https://www.semanticscholar.org/paper/8383802d1d392a732280522ade83de0b9017d0b0", "referenceCount": 23, "citationCount": 51, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "An Efficient Hybrid Music Recommender System Using an Incrementally Trainable Probabilistic Generative Model", "abstract": "This paper presents a hybrid music recommender system that ranks musical pieces while efficiently maintaining collaborative and content-based data, i.e., rating scores given by users and acoustic features of audio signals. This hybrid approach overcomes the conventional tradeoff between recommendation accuracy and variety of recommended artists. Collaborative filtering, which is used on e-commerce sites, cannot recommend nonbrated pieces and provides a narrow variety of artists. Content-based filtering does not have satisfactory accuracy because it is based on the heuristics that the user's favorite pieces will have similar musical content despite there being exceptions. To attain a higher recommendation accuracy along with a wider variety of artists, we use a probabilistic generative model that unifies the collaborative and content-based data in a principled way. This model can explain the generative mechanism of the observed data in the probability theory. The probability distribution over users, pieces, and features is decomposed into three conditionally independent ones by introducing latent variables. This decomposition enables us to efficiently and incrementally adapt the model for increasing numbers of users and rating scores. We evaluated our system by using audio signals of commercial CDs and their corresponding rating scores obtained from an e-commerce site. The results revealed that our system accurately recommended pieces including nonrated ones from a wide variety of artists and maintained a high degree of accuracy even when new users and rating scores were added.", "year": 2008, "venue": "IEEE Transactions on Audio, Speech, and Language Processing", "authors": [ "Kazuyoshi Yoshii", "Masataka Goto", "K. Komatani", "T. Ogata", "HIroshi G. Okuno" ], "externalIds": { "MAG": "2161937612", "DBLP": "journals/taslp/YoshiiGKOO08", "DOI": "10.1109/TASL.2007.911503", "CorpusId": 14690059 }, "url": "https://www.semanticscholar.org/paper/38351d2e2f09190cadb643fa8d3c3ae3963ec682", "referenceCount": 48, "citationCount": 149, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "AUTO-ENCODING VARIATIONAL BAYES", "abstract": "To make decisions based on a model fit by Auto-Encoding Variational Bayes (AEVB), practitioners typically use importance sampling to estimate a functional of the posterior distribution. The variational distribution found by AEVB serves as the proposal distribution for importance sampling. However, this proposal distribution may give unreliable (high variance) importance sampling estimates, thus leading to poor decisions. We explore how changing the objective function for learning the variational distribution, while continuing to learn the generative model based on the ELBO, affects the quality of downstream decisions. For a particular model, we characterize the error of importance sampling as a function of posterior variance and show that proposal distributions learned with evidence upper bounds are better. Motivated by these theoretical results, we propose a novel variant of the VAE. In addition to experimenting with MNIST, we present a full-fledged application of the proposed method to single-cell RNA sequencing. In this challenging instance of multiple hypothesis testing, the proposed method surpasses the current state of the art.", "year": 2020, "venue": "", "authors": [ "Romain Lopez", "Pierre Boyeau", "N. Yosef", "Michael I. Jordan", "J. Regier" ], "externalIds": { "CorpusId": 211146177 }, "url": "https://www.semanticscholar.org/paper/ef4f5a50837a7c1b3e87b9300ffc7ba00d461a0f", "referenceCount": 53, "citationCount": 11952, "influentialCitationCount": 1706, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "AnOpen-SourceDeepLearningPlatformOriginatedfromIndustrial Practice, PaddlePaddle is dedicated to facilitating innovations and applications of deep learning", "abstract": null, "year": 2020, "venue": "github", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Continualunsupervisedrepresentationlearning", "abstract": null, "year": 2019, "venue": "Advances in Neural Information Processing Systems", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "DeepFM:afactorization-machinebasedneuralnetworkforCTRprediction", "abstract": null, "year": 2017, "venue": "arXiv", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Abaselinefordetectingmisclassifiedandout-of-distributionexamplesinneuralnetworks", "abstract": null, "year": 2016, "venue": "arXiv", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "DisplayAdvertisingChallengePredictclick-throughratesondisplay ads", "abstract": null, "year": 2014, "venue": "www", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "NEWER: A system for NEuro-fuzzy WEb Recommendation", "abstract": null, "year": 2011, "venue": "Applied Soft Computing", "authors": [ "Giovanna Castellano", "A. Fanelli", "M. Torsello" ], "externalIds": { "DBLP": "journals/asc/CastellanoFT11", "MAG": "2088524577", "DOI": "10.1016/j.asoc.2009.12.040", "CorpusId": 28972758 }, "url": "https://www.semanticscholar.org/paper/c1de8386df34050b0865e8e2d5446674a0a30a81", "referenceCount": 23, "citationCount": 51, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Sem-Fit: A semantic based expertsystemtoproviderecommendationsinthetourismdomain", "abstract": null, "year": 2011, "venue": "Expertsystems with applications", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "A multi-disciplinarrecommendersystemtoadviceresearchresourcesinuniversitydigitallibraries", "abstract": null, "year": 2009, "venue": "Expert systems with applications", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "SIGIR-AP ’23, November 26–28, 2023, Beijing,", "abstract": null, "year": null, "venue": "China", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2022. Asurveyonpersonality-awarerecommendationsystems", "abstract": null, "year": null, "venue": "Artificial Intelligence Review", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Adaptive Learning on User Segmentation: Universal to Specific Representation via Bipartite Neural Interaction*", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Received 20 February 2007; revised 12 March 2009; accepted 5 June 2009", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2022. LITERATUREREVIEW:COMPETITIVESTRAT-EGY, COMPETITIVE ADVANTAGES, AND MARKETING PERFORMANCE ON E-COMMERCE SHOPEE INDONESIA", "abstract": null, "year": null, "venue": "Dinasti International Journal of Digital Business Management", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Kriformer: A Novel Spatiotemporal Kriging Approach Based on Graph Transformers": { "paper_title": "Kriformer: A Novel Spatiotemporal Kriging Approach Based on Graph Transformers", "arxiv_id": "2409.14906v1", "keyword": "representation learning", "authors": [ "Renbin Pan", "Feng Xiao", "Hegui Zhang", "Minyu Shen" ], "references": [ { "title": "Fifty Years of Kriging", "abstract": null, "year": 2018, "venue": "", "authors": [ "J. Chilès", "N. Desassis" ], "externalIds": { "MAG": "2810919783", "DOI": "10.1007/978-3-319-78999-6_29", "CorpusId": 125362741 }, "url": "https://www.semanticscholar.org/paper/03bce278e06514ab8cd9c6505f309bdd0a05a1d0", "referenceCount": 65, "citationCount": 62, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Graph Classification via Deep Learning with Virtual Nodes", "abstract": "Learning representation for graph classification turns a variable-size graph into a fixed-size vector (or matrix). Such a representation works nicely with algebraic manipulations. Here we introduce a simple method to augment an attributed graph with a virtual node that is bidirectionally connected to all existing nodes. The virtual node represents the latent aspects of the graph, which are not immediately available from the attributes and local connectivity structures. The expanded graph is then put through any node representation method. The representation of the virtual node is then the representation of the entire graph. In this paper, we use the recently introduced Column Network for the expanded graph, resulting in a new end-to-end graph classification model dubbed Virtual Column Network (VCN). The model is validated on two tasks: (i) predicting bio-activity of chemical compounds, and (ii) finding software vulnerability from source code. Results demonstrate that VCN is competitive against well-established rivals.", "year": 2017, "venue": "arXiv.org", "authors": [ "Trang Pham", "T. Tran", "K. Dam", "S. Venkatesh" ], "externalIds": { "DBLP": "journals/corr/abs-1708-04357", "ArXiv": "1708.04357", "MAG": "2748414854", "CorpusId": 22370828 }, "url": "https://www.semanticscholar.org/paper/ba429df663f9976dee9ab77907b5c84101c21d7f", "referenceCount": 31, "citationCount": 39, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Interpolation of Spatial Data: Some Theory for Kriging", "abstract": null, "year": 2000, "venue": "", "authors": [ "G. Heuvelink" ], "externalIds": { "MAG": "2023661375", "DOI": "10.1016/S0016-7061(00)00010-0", "CorpusId": 70687801 }, "url": "https://www.semanticscholar.org/paper/7784e45516fe443458d07b21d4298dbc9f2c4984", "referenceCount": 0, "citationCount": 732, "influentialCitationCount": 100, "isOpenAccess": false, "fieldsOfStudy": [ "Geology" ] } ] }, "Disentanglement with Factor Quantized Variational Autoencoders": { "paper_title": "Disentanglement with Factor Quantized Variational Autoencoders", "arxiv_id": "2409.14851v1", "keyword": "representation learning", "authors": [ "Gulcin Baykal", "Melih Kandemir", "Gozde Unal" ], "references": [ { "title": "FADES: Fair Disentanglement with Sensitive Relevance", "abstract": "Learning fair representation in deep learning is essential to mitigate discriminatory outcomes and enhance trustworthiness. However, previous research has been commonly established on inappropriate assumptions prone to unrealistic counterfactuals and performance degradation. Although some proposed alternative approaches, such as employing correlation-aware causal graphs or proxies for mutual information, these methods are less practical and not applicable in general. In this work, we propose FAir DisEntanglement with Sensitive relevance (FADES), a novel approach that leverages conditional mutual information from the information theory perspective to address these challenges. We employ sensitive relevant code to direct correlated information between target labels and sensitive attributes by imposing conditional independence, allowing better separation of the features of interest in the latent space. Utilizing an intuitive disentangling approach, FADES consistently achieves superior performance and fairness both quantitatively and qualitatively with its straightforward structure. Specifically, the proposed method outperforms existing works in downstream classification and counterfactual generations on various benchmarks.", "year": 2024, "venue": "Computer Vision and Pattern Recognition", "authors": [ "T. Jang", "Xiaoqian Wang" ], "externalIds": { "DOI": "10.1109/CVPR52733.2024.01147", "CorpusId": 272724986 }, "url": "https://www.semanticscholar.org/paper/b8180dba2b8a79e1c7fd6c9f4eac066e3170743f", "referenceCount": 58, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Neural Language of Thought Models", "abstract": "The Language of Thought Hypothesis suggests that human cognition operates on a structured, language-like system of mental representations. While neural language models can naturally benefit from the compositional structure inherently and explicitly expressed in language data, learning such representations from non-linguistic general observations, like images, remains a challenge. In this work, we introduce the Neural Language of Thought Model (NLoTM), a novel approach for unsupervised learning of LoTH-inspired representation and generation. NLoTM comprises two key components: (1) the Semantic Vector-Quantized Variational Autoencoder, which learns hierarchical, composable discrete representations aligned with objects and their properties, and (2) the Autoregressive LoT Prior, an autoregressive transformer that learns to generate semantic concept tokens compositionally, capturing the underlying data distribution. We evaluate NLoTM on several 2D and 3D image datasets, demonstrating superior performance in downstream tasks, out-of-distribution generalization, and image generation quality compared to patch-based VQ-VAE and continuous object-centric representations. Our work presents a significant step towards creating neural networks exhibiting more human-like understanding by developing LoT-like representations and offers insights into the intersection of cognitive science and machine learning.", "year": 2024, "venue": "International Conference on Learning Representations", "authors": [ "Yi-Fu Wu", "Minseung Lee", "Sungjin Ahn" ], "externalIds": { "DBLP": "conf/iclr/WuLA24", "ArXiv": "2402.01203", "CorpusId": 267406235 }, "url": "https://www.semanticscholar.org/paper/c561b3f596106a079733399961ea3d3044fb2747", "referenceCount": 67, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Self-Supervised Disentanglement by Leveraging Structure in Data Augmentations", "abstract": "Self-supervised representation learning often uses data augmentations to induce some invariance to\"style\"attributes of the data. However, with downstream tasks generally unknown at training time, it is difficult to deduce a priori which attributes of the data are indeed\"style\"and can be safely discarded. To deal with this, current approaches try to retain some style information by tuning the degree of invariance to some particular task, such as ImageNet object classification. However, prior work has shown that such task-specific tuning can lead to significant performance degradation on other tasks that rely on the discarded style. To address this, we introduce a more principled approach that seeks to disentangle style features rather than discard them. The key idea is to add multiple style embedding spaces where: (i) each is invariant to all-but-one augmentation; and (ii) joint entropy is maximized. We formalize our structured data-augmentation procedure from a causal latent-variable-model perspective, and prove identifiability of both content and individual style variables. We empirically demonstrate the benefits of our approach on both synthetic and real-world data.", "year": 2023, "venue": "arXiv.org", "authors": [ "Cian Eastwood", "Julius von Kügelgen", "Linus Ericsson", "Diane Bouchacourt", "Pascal Vincent", "B. Scholkopf", "Mark Ibrahim" ], "externalIds": { "ArXiv": "2311.08815", "DBLP": "journals/corr/abs-2311-08815", "DOI": "10.48550/arXiv.2311.08815", "CorpusId": 265212662 }, "url": "https://www.semanticscholar.org/paper/a8eba0a3356bd24b9690a771b9cb2e48bfb7d1a4", "referenceCount": 62, "citationCount": 7, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Efficient disentangled representation learning for multi-modal finger biometrics", "abstract": null, "year": 2023, "venue": "Pattern Recognition", "authors": [ "Weili Yang", "Junduan Huang", "Dacan Luo", "Wenxiong Kang" ], "externalIds": { "DBLP": "journals/pr/YangHLK24", "DOI": "10.1016/j.patcog.2023.109944", "CorpusId": 261603100 }, "url": "https://www.semanticscholar.org/paper/cfe4ea6bdb4726cab66f6efa14ccde9229b525b2", "referenceCount": 22, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Disentanglement via Latent Quantization", "abstract": "In disentangled representation learning, a model is asked to tease apart a dataset's underlying sources of variation and represent them independently of one another. Since the model is provided with no ground truth information about these sources, inductive biases take a paramount role in enabling disentanglement. In this work, we construct an inductive bias towards encoding to and decoding from an organized latent space. Concretely, we do this by (i) quantizing the latent space into discrete code vectors with a separate learnable scalar codebook per dimension and (ii) applying strong model regularization via an unusually high weight decay. Intuitively, the latent space design forces the encoder to combinatorially construct codes from a small number of distinct scalar values, which in turn enables the decoder to assign a consistent meaning to each value. Regularization then serves to drive the model towards this parsimonious strategy. We demonstrate the broad applicability of this approach by adding it to both basic data-reconstructing (vanilla autoencoder) and latent-reconstructing (InfoGAN) generative models. For reliable evaluation, we also propose InfoMEC, a new set of metrics for disentanglement that is cohesively grounded in information theory and fixes well-established shortcomings in previous metrics. Together with regularization, latent quantization dramatically improves the modularity and explicitness of learned representations on a representative suite of benchmark datasets. In particular, our quantized-latent autoencoder (QLAE) consistently outperforms strong methods from prior work in these key disentanglement properties without compromising data reconstruction.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Kyle Hsu", "W. Dorrell", "James C. R. Whittington", "Jiajun Wu", "Chelsea Finn" ], "externalIds": { "ArXiv": "2305.18378", "DBLP": "journals/corr/abs-2305-18378", "DOI": "10.48550/arXiv.2305.18378", "CorpusId": 258967507 }, "url": "https://www.semanticscholar.org/paper/97f13455fb54310159464689b8b35f69c040f382", "referenceCount": 80, "citationCount": 14, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Where you edit is what you get: Text-guided image editing with region-based attention", "abstract": null, "year": 2023, "venue": "Pattern Recognition", "authors": [ "Changming Xiao", "Qi Yang", "Xiaoqiang Xu", "Jianwei Zhang", "Feng Zhou", "Changshui Zhang" ], "externalIds": { "DBLP": "journals/pr/XiaoYXZZZ23", "DOI": "10.1016/j.patcog.2023.109458", "CorpusId": 257212775 }, "url": "https://www.semanticscholar.org/paper/a41b72b0ec4561dbbf8557529ab3bb8423bda29a", "referenceCount": 39, "citationCount": 9, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Disentangled Representation Learning", "abstract": "Disentangled Representation Learning (DRL) aims to learn a model capable of identifying and disentangling the underlying factors hidden in the observable data in representation form. The process of separating underlying factors of variation into variables with semantic meaning benefits in learning explainable representations of data, which imitates the meaningful understanding process of humans when observing an object or relation. As a general learning strategy, DRL has demonstrated its power in improving the model explainability, controlability, robustness, as well as generalization capacity in a wide range of scenarios such as computer vision, natural language processing, and data mining. In this article, we comprehensively investigate DRL from various aspects including motivations, definitions, methodologies, evaluations, applications, and model designs. We first present two well-recognized definitions, i.e., Intuitive Definition and Group Theory Definition for disentangled representation learning. We further categorize the methodologies for DRL into four groups from the following perspectives, the model type, representation structure, supervision signal, and independence assumption. We also analyze principles to design different DRL models that may benefit different tasks in practical applications. Finally, we point out challenges in DRL as well as potential research directions deserving future investigations. We believe this work may provide insights for promoting the DRL research in the community.", "year": 2022, "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", "authors": [ "Xin Wang", "Hong Chen", "Siao Tang", "Zihao Wu", "Wenwu Zhu" ], "externalIds": { "DBLP": "journals/corr/abs-2211-11695", "ArXiv": "2211.11695", "DOI": "10.48550/arXiv.2211.11695", "CorpusId": 253735310, "PubMed": "38949944" }, "url": "https://www.semanticscholar.org/paper/31b5eedca669ec6a19c89ae6975cd2963f730667", "referenceCount": 192, "citationCount": 49, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Disentanglement with Biological Constraints: A Theory of Functional Cell Types", "abstract": "Neurons in the brain are often finely tuned for specific task variables. Moreover, such disentangled representations are highly sought after in machine learning. Here we mathematically prove that simple biological constraints on neurons, namely nonnegativity and energy efficiency in both activity and weights, promote such sought after disentangled representations by enforcing neurons to become selective for single factors of task variation. We demonstrate these constraints lead to disentanglement in a variety of tasks and architectures, including variational autoencoders. We also use this theory to explain why the brain partitions its cells into distinct cell types such as grid and object-vector cells, and also explain when the brain instead entangles representations in response to entangled task factors. Overall, this work provides a mathematical understanding of why single neurons in the brain often represent single human-interpretable factors, and steps towards an understanding task structure shapes the structure of brain representation.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "James C. R. Whittington", "W. Dorrell", "S. Ganguli", "T. Behrens" ], "externalIds": { "ArXiv": "2210.01768", "DBLP": "conf/iclr/WhittingtonDGB23", "CorpusId": 252693327 }, "url": "https://www.semanticscholar.org/paper/43694ae3de01d0ebd730485b5890ed84094f7ad1", "referenceCount": 95, "citationCount": 39, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Biology", "Computer Science" ] }, { "title": "Learning Disentangled Representations for Controllable Human Motion Prediction", "abstract": "Generative model-based motion prediction techniques have recently realized predicting controlled human motions, such as predicting multiple upper human body motions with similar lower-body motions. However, to achieve this, the state-of-the-art methods require either subsequently learning mapping functions to seek similar motions or training the model repetitively to enable control over the desired portion of body. In this paper, we propose a novel framework to learn disentangled representations for controllable human motion prediction. Our network involves a conditional variational auto-encoder (CVAE) architecture to model full-body human motion, and an extra CVAE path to learn only the corresponding partial-body (e.g., lower-body) motion. Specifically, the inductive bias imposed by the extra CVAE path encourages two latent variables in two paths to respectively govern separate representations for each partial-body motion. With a single training, our model is able to provide two types of controls for the generated human motions: (i) strictly controlling one portion of human body and (ii) adaptively controlling the other portion, by sampling from a pair of latent spaces. Additionally, we extend and adapt a sampling strategy to our trained model to diversify the controllable predictions. Our framework also potentially allows new forms of control by flexibly customizing the input for the extra CVAE path. Extensive experimental results and ablation studies demonstrate that our approach is capable of predicting state-of-the-art controllable human motions both qualitatively and quantitatively.", "year": 2022, "venue": "Pattern Recognition", "authors": [ "Chunzhi Gu", "Junzhou Yu", "Chao Zhang" ], "externalIds": { "DBLP": "journals/pr/GuYZ24", "ArXiv": "2207.01388", "DOI": "10.48550/arXiv.2207.01388", "CorpusId": 250264639 }, "url": "https://www.semanticscholar.org/paper/c2ce93584e6f0dc5cfedbd6262bffb9f7e1ddbf7", "referenceCount": 58, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "High-Resolution Image Synthesis with Latent Diffusion Models", "abstract": "By decomposing the image formation process into a sequential application of denoising autoencoders, diffusion models (DMs) achieve state-of-the-art synthesis results on image data and beyond. Additionally, their formulation allows for a guiding mechanism to control the image generation process without retraining. However, since these models typically operate directly in pixel space, optimization of powerful DMs often consumes hundreds of GPU days and inference is expensive due to sequential evaluations. To enable DM training on limited computational resources while retaining their quality and flexibility, we apply them in the latent space of powerful pretrained autoencoders. In contrast to previous work, training diffusion models on such a representation allows for the first time to reach a near-optimal point between complexity reduction and detail preservation, greatly boosting visual fidelity. By introducing cross-attention layers into the model architecture, we turn diffusion models into powerful and flexible generators for general conditioning inputs such as text or bounding boxes and high-resolution synthesis becomes possible in a convolutional manner. Our latent diffusion models (LDMs) achieve new state of the art scores for image inpainting and class-conditional image synthesis and highly competitive performance on various tasks, including unconditional image generation, text-to-image synthesis, and super-resolution, while significantly reducing computational requirements compared to pixel-based DMs.", "year": 2021, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Robin Rombach", "A. Blattmann", "Dominik Lorenz", "Patrick Esser", "B. Ommer" ], "externalIds": { "ArXiv": "2112.10752", "DBLP": "journals/corr/abs-2112-10752", "DOI": "10.1109/CVPR52688.2022.01042", "CorpusId": 245335280 }, "url": "https://www.semanticscholar.org/paper/c10075b3746a9f3dd5811970e93c8ca3ad39b39d", "referenceCount": 110, "citationCount": 9847, "influentialCitationCount": 2744, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Disentangling Generative Factors in Natural Language with Discrete Variational Autoencoders", "abstract": "The ability of learning disentangled representations represents a major step for interpretable NLP systems as it allows latent linguistic features to be controlled. Most approaches to disentanglement rely on continuous variables, both for images and text. We argue that despite being suitable for image datasets, continuous variables may not be ideal to model features of textual data, due to the fact that most generative factors in text are discrete. We propose a Variational Autoencoder based method which models language features as discrete variables and encourages independence between variables for learning disentangled representations. The proposed model outperforms continuous and discrete baselines on several qualitative and quantitative benchmarks for disentanglement as well as on a text style transfer downstream application.", "year": 2021, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Giangiacomo Mercatali", "A. Freitas" ], "externalIds": { "ArXiv": "2109.07169", "DBLP": "journals/corr/abs-2109-07169", "DOI": "10.18653/v1/2021.findings-emnlp.301", "CorpusId": 237513367 }, "url": "https://www.semanticscholar.org/paper/b19536e8baf580c68f81b62035799c94ca25a58d", "referenceCount": 35, "citationCount": 21, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Zero-Shot Text-to-Image Generation", "abstract": "Text-to-image generation has traditionally focused on finding better modeling assumptions for training on a fixed dataset. These assumptions might involve complex architectures, auxiliary losses, or side information such as object part labels or segmentation masks supplied during training. We describe a simple approach for this task based on a transformer that autoregressively models the text and image tokens as a single stream of data. With sufficient data and scale, our approach is competitive with previous domain-specific models when evaluated in a zero-shot fashion.", "year": 2021, "venue": "International Conference on Machine Learning", "authors": [ "A. Ramesh", "Mikhail Pavlov", "Gabriel Goh", "Scott Gray", "Chelsea Voss", "Alec Radford", "Mark Chen", "I. Sutskever" ], "externalIds": { "DBLP": "journals/corr/abs-2102-12092", "MAG": "3170016573", "ArXiv": "2102.12092", "CorpusId": 232035663 }, "url": "https://www.semanticscholar.org/paper/2cd605106b88c85d7d8b865b1ef0f8c8293debf1", "referenceCount": 64, "citationCount": 3802, "influentialCitationCount": 331, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Taming Transformers for High-Resolution Image Synthesis", "abstract": "Designed to learn long-range interactions on sequential data, transformers continue to show state-of-the-art results on a wide variety of tasks. In contrast to CNNs, they contain no inductive bias that prioritizes local interactions. This makes them expressive, but also computationally infeasible for long sequences, such as high-resolution images. We demonstrate how combining the effectiveness of the inductive bias of CNNs with the expressivity of transformers enables them to model and thereby synthesize high-resolution images. We show how to (i) use CNNs to learn a context-rich vocabulary of image constituents, and in turn (ii) utilize transformers to efficiently model their composition within high-resolution images. Our approach is readily applied to conditional synthesis tasks, where both non-spatial information, such as object classes, and spatial information, such as segmentations, can control the generated image. In particular, we present the first results on semantically-guided synthesis of megapixel images with transformers. Project page at https://git.io/JLlvY.", "year": 2020, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Patrick Esser", "Robin Rombach", "B. Ommer" ], "externalIds": { "ArXiv": "2012.09841", "MAG": "3111551570", "DBLP": "journals/corr/abs-2012-09841", "DOI": "10.1109/CVPR46437.2021.01268", "CorpusId": 229297973 }, "url": "https://www.semanticscholar.org/paper/47f7ec3d0a5e6e83b6768ece35206a94dc81919c", "referenceCount": 82, "citationCount": 2069, "influentialCitationCount": 389, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Denoising Diffusion Probabilistic Models", "abstract": "We present high quality image synthesis results using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics. Our best results are obtained by training on a weighted variational bound designed according to a novel connection between diffusion probabilistic models and denoising score matching with Langevin dynamics, and our models naturally admit a progressive lossy decompression scheme that can be interpreted as a generalization of autoregressive decoding. On the unconditional CIFAR10 dataset, we obtain an Inception score of 9.46 and a state-of-the-art FID score of 3.17. On 256x256 LSUN, we obtain sample quality similar to ProgressiveGAN. Our implementation is available at this https URL", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Jonathan Ho", "Ajay Jain", "P. Abbeel" ], "externalIds": { "ArXiv": "2006.11239", "MAG": "3100572490", "DBLP": "journals/corr/abs-2006-11239", "CorpusId": 219955663 }, "url": "https://www.semanticscholar.org/paper/5c126ae3421f05768d8edd97ecd44b1364e2c99a", "referenceCount": 73, "citationCount": 10778, "influentialCitationCount": 2337, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Structure by Architecture: Structured Representations without Regularization", "abstract": "We study the problem of self-supervised structured representation learning using autoencoders for downstream tasks such as generative modeling. Unlike most methods which rely on matching an arbitrary, relatively unstructured, prior distribution for sampling, we propose a sampling technique that relies solely on the independence of latent variables, thereby avoiding the trade-off between reconstruction quality and generative performance typically observed in VAEs. We design a novel autoencoder architecture capable of learning a structured representation without the need for aggressive regularization. Our structural decoders learn a hierarchy of latent variables, thereby ordering the information without any additional regularization or supervision. We demonstrate how these models learn a representation that improves results in a variety of downstream tasks including generation, disentanglement, and extrapolation using several challenging and natural image datasets.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Felix Leeb", "Giulia Lanzillotta", "Yashas Annadani", "M. Besserve", "Stefan Bauer", "B. Scholkopf" ], "externalIds": { "DBLP": "conf/iclr/LeebLABBS23", "ArXiv": "2006.07796", "CorpusId": 259298578 }, "url": "https://www.semanticscholar.org/paper/e79271b3bf7f9c73f55bdba2e98b4fbe9619655a", "referenceCount": 68, "citationCount": 7, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Jukebox: A Generative Model for Music", "abstract": "We introduce Jukebox, a model that generates music with singing in the raw audio domain. We tackle the long context of raw audio using a multi-scale VQ-VAE to compress it to discrete codes, and modeling those using autoregressive Transformers. We show that the combined model at scale can generate high-fidelity and diverse songs with coherence up to multiple minutes. We can condition on artist and genre to steer the musical and vocal style, and on unaligned lyrics to make the singing more controllable. We are releasing thousands of non cherry-picked samples at this https URL, along with model weights and code at this https URL", "year": 2020, "venue": "arXiv.org", "authors": [ "Prafulla Dhariwal", "Heewoo Jun", "Christine Payne", "Jong Wook Kim", "Alec Radford", "I. Sutskever" ], "externalIds": { "DBLP": "journals/corr/abs-2005-00341", "ArXiv": "2005.00341", "MAG": "3021164770", "CorpusId": 218470180 }, "url": "https://www.semanticscholar.org/paper/67dea28495cab71703993d0d52ca4733b9a66077", "referenceCount": 85, "citationCount": 594, "influentialCitationCount": 76, "isOpenAccess": false, "fieldsOfStudy": [ "Engineering", "Computer Science", "Mathematics" ] }, { "title": "CausalVAE: Disentangled Representation Learning via Neural Structural Causal Models", "abstract": "Learning disentanglement aims at finding a low dimensional representation which consists of multiple explanatory and generative factors of the observational data. The framework of variational autoencoder (VAE) is commonly used to disentangle independent factors from observations. However, in real scenarios, factors with semantics are not necessarily independent. Instead, there might be an underlying causal structure which renders these factors dependent. We thus propose a new VAE based framework named CausalVAE, which includes a Causal Layer to transform independent exogenous factors into causal endogenous ones that correspond to causally related concepts in data. We further analyze the model identifiabitily, showing that the proposed model learned from observations recovers the true one up to a certain degree. Experiments are conducted on various datasets, including synthetic and real word benchmark CelebA. Results show that the causal representations learned by CausalVAE are semantically interpretable, and their causal relationship as a Directed Acyclic Graph (DAG) is identified with good accuracy. Furthermore, we demonstrate that the proposed CausalVAE model is able to generate counterfactual data through \"do-operation\" to the causal factors.", "year": 2020, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Mengyue Yang", "Furui Liu", "Zhitang Chen", "Xinwei Shen", "Jianye Hao", "Jun Wang" ], "externalIds": { "MAG": "3038522363", "DBLP": "conf/cvpr/YangLCSHW21", "DOI": "10.1109/CVPR46437.2021.00947", "CorpusId": 220280826 }, "url": "https://www.semanticscholar.org/paper/d2599ccb2401198b5e6e1d867c7d0f22b5055f5e", "referenceCount": 43, "citationCount": 213, "influentialCitationCount": 26, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Theory of Usable Information Under Computational Constraints", "abstract": "We propose a new framework for reasoning about information in complex systems. Our foundation is based on a variational extension of Shannon's information theory that takes into account the modeling power and computational constraints of the observer. The resulting \\emph{predictive $\\mathcal{V}$-information} encompasses mutual information and other notions of informativeness such as the coefficient of determination. Unlike Shannon's mutual information and in violation of the data processing inequality, $\\mathcal{V}$-information can be created through computation. This is consistent with deep neural networks extracting hierarchies of progressively more informative features in representation learning. Additionally, we show that by incorporating computational constraints, $\\mathcal{V}$-information can be reliably estimated from data even in high dimensions with PAC-style guarantees. Empirically, we demonstrate predictive $\\mathcal{V}$-information is more effective than mutual information for structure learning and fair representation learning.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Yilun Xu", "Shengjia Zhao", "Jiaming Song", "Russell Stewart", "Stefano Ermon" ], "externalIds": { "MAG": "3006705448", "DBLP": "conf/iclr/XuZSSE20", "ArXiv": "2002.10689", "CorpusId": 211296302 }, "url": "https://www.semanticscholar.org/paper/0e4f7290f9cce44284665ddb399abeea0d72c557", "referenceCount": 48, "citationCount": 150, "influentialCitationCount": 27, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Weakly-Supervised Disentanglement Without Compromises", "abstract": "Intelligent agents should be able to learn useful representations by observing changes in their environment. We model such observations as pairs of non-i.i.d. images sharing at least one of the underlying factors of variation. First, we theoretically show that only knowing how many factors have changed, but not which ones, is sufficient to learn disentangled representations. Second, we provide practical algorithms that learn disentangled representations from pairs of images without requiring annotation of groups, individual factors, or the number of factors that have changed. Third, we perform a large-scale empirical study and show that such pairs of observations are sufficient to reliably learn disentangled representations on several benchmark data sets. Finally, we evaluate our learned representations and find that they are simultaneously useful on a diverse suite of tasks, including generalization under covariate shifts, fairness, and abstract reasoning. Overall, our results demonstrate that weak supervision enables learning of useful disentangled representations in realistic scenarios.", "year": 2020, "venue": "International Conference on Machine Learning", "authors": [ "Francesco Locatello", "Ben Poole", "Gunnar Rätsch", "B. Scholkopf", "Olivier Bachem", "M. Tschannen" ], "externalIds": { "ArXiv": "2002.02886", "DBLP": "conf/icml/LocatelloPRSBT20", "MAG": "3005031238", "CorpusId": 211066424 }, "url": "https://www.semanticscholar.org/paper/3538c520244b508945476f0814d2ba1e8f22307e", "referenceCount": 79, "citationCount": 283, "influentialCitationCount": 49, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "On the Transfer of Inductive Bias from Simulation to the Real World: a New Disentanglement Dataset", "abstract": "Learning meaningful and compact representations with disentangled semantic aspects is considered to be of key importance in representation learning. Since real-world data is notoriously costly to collect, many recent state-of-the-art disentanglement models have heavily relied on synthetic toy data-sets. In this paper, we propose a novel data-set which consists of over one million images of physical 3D objects with seven factors of variation, such as object color, shape, size and position. In order to be able to control all the factors of variation precisely, we built an experimental platform where the objects are being moved by a robotic arm. In addition, we provide two more datasets which consist of simulations of the experimental setup. These datasets provide for the first time the possibility to systematically investigate how well different disentanglement methods perform on real data in comparison to simulation, and how simulated data can be leveraged to build better representations of the real world. We provide a first experimental study of these questions and our results indicate that learned models transfer poorly, but that model and hyperparameter selection is an effective means of transferring information to the real world.", "year": 2019, "venue": "Neural Information Processing Systems", "authors": [ "Muhammad Waleed Gondal", "Manuel Wüthrich", "Ðorðe Miladinovic", "Francesco Locatello", "M. Breidt", "V. Volchkov", "J. Akpo", "Olivier Bachem", "B. Scholkopf", "Stefan Bauer" ], "externalIds": { "DBLP": "journals/corr/abs-1906-03292", "MAG": "2990376402", "ArXiv": "1906.03292", "CorpusId": 182952649 }, "url": "https://www.semanticscholar.org/paper/a3da5fa82d316513ade2dc355ee058af58487751", "referenceCount": 60, "citationCount": 124, "influentialCitationCount": 23, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Challenging Common Assumptions in the Unsupervised Learning of Disentangled Representations", "abstract": "The key idea behind the unsupervised learning of disentangled representations is that real-world data is generated by a few explanatory factors of variation which can be recovered by unsupervised learning algorithms. In this paper, we provide a sober look at recent progress in the field and challenge some common assumptions. We first theoretically show that the unsupervised learning of disentangled representations is fundamentally impossible without inductive biases on both the models and the data. Then, we train more than 12000 models covering most prominent methods and evaluation metrics in a reproducible large-scale experimental study on seven different data sets. We observe that while the different methods successfully enforce properties ``encouraged'' by the corresponding losses, well-disentangled models seemingly cannot be identified without supervision. Furthermore, increased disentanglement does not seem to lead to a decreased sample complexity of learning for downstream tasks. Our results suggest that future work on disentanglement learning should be explicit about the role of inductive biases and (implicit) supervision, investigate concrete benefits of enforcing disentanglement of the learned representations, and consider a reproducible experimental setup covering several data sets.", "year": 2018, "venue": "International Conference on Machine Learning", "authors": [ "Francesco Locatello", "Stefan Bauer", "Mario Lucic", "S. Gelly", "B. Scholkopf", "Olivier Bachem" ], "externalIds": { "ArXiv": "1811.12359", "DBLP": "conf/icml/LocatelloBLRGSB19", "MAG": "3006182312", "CorpusId": 54089884 }, "url": "https://www.semanticscholar.org/paper/9c5c794094fbf5da8c48df5c3242615dc0b1d245", "referenceCount": 68, "citationCount": 1323, "influentialCitationCount": 183, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Fast Decoding in Sequence Models using Discrete Latent Variables", "abstract": "Autoregressive sequence models based on deep neural networks, such as RNNs, Wavenet and the Transformer attain state-of-the-art results on many tasks. However, they are difficult to parallelize and are thus slow at processing long sequences. RNNs lack parallelism both during training and decoding, while architectures like WaveNet and Transformer are much more parallelizable during training, yet still operate sequentially during decoding. \nInspired by [arXiv:1711.00937], we present a method to extend sequence models using discrete latent variables that makes decoding much more parallelizable. We first auto-encode the target sequence into a shorter sequence of discrete latent variables, which at inference time is generated autoregressively, and finally decode the output sequence from this shorter latent sequence in parallel. To this end, we introduce a novel method for constructing a sequence of discrete latent variables and compare it with previously introduced methods. Finally, we evaluate our model end-to-end on the task of neural machine translation, where it is an order of magnitude faster at decoding than comparable autoregressive models. While lower in BLEU than purely autoregressive models, our model achieves higher scores than previously proposed non-autoregressive translation models.", "year": 2018, "venue": "International Conference on Machine Learning", "authors": [ "Łukasz Kaiser", "Aurko Roy", "Ashish Vaswani", "Niki Parmar", "Samy Bengio", "Jakob Uszkoreit", "Noam M. Shazeer" ], "externalIds": { "DBLP": "journals/corr/abs-1803-03382", "MAG": "2789543585", "ArXiv": "1803.03382", "CorpusId": 4720016 }, "url": "https://www.semanticscholar.org/paper/2d08ed53491053d84b6de89aedbf2178b9c8cf84", "referenceCount": 51, "citationCount": 220, "influentialCitationCount": 22, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Disentangling by Factorising", "abstract": "We define and address the problem of unsupervised learning of disentangled representations on data generated from independent factors of variation. We propose FactorVAE, a method that disentangles by encouraging the distribution of representations to be factorial and hence independent across the dimensions. We show that it improves upon $\\beta$-VAE by providing a better trade-off between disentanglement and reconstruction quality. Moreover, we highlight the problems of a commonly used disentanglement metric and introduce a new metric that does not suffer from them.", "year": 2018, "venue": "International Conference on Machine Learning", "authors": [ "Hyunjik Kim", "A. Mnih" ], "externalIds": { "MAG": "2949452211", "DBLP": "conf/icml/KimM18", "ArXiv": "1802.05983", "CorpusId": 3380833 }, "url": "https://www.semanticscholar.org/paper/04541599accc47d8174f63345ce9c987ef21685b", "referenceCount": 61, "citationCount": 1239, "influentialCitationCount": 265, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "A Framework for the Quantitative Evaluation of Disentangled Representations", "abstract": "Recent AI research has emphasised the importance of learning disentangled representations of the explanatory factors behind data. Despite the growing interest in models which can learn such representations, visual inspection remains the standard evaluation metric. While various desiderata have been implied in recent definitions, it is currently unclear what exactly makes one disentangled representation better than another. In this work we propose a framework for the quantitative evaluation of disentangled representations when the ground-truth latent structure is available. Three criteria are explicitly defined and quantified to elucidate the quality of learnt representations and thus compare models on an equal basis. To illustrate the appropriateness of the framework, we employ it to compare quantitatively the representations learned by recent state-of-the-art models.", "year": 2018, "venue": "International Conference on Learning Representations", "authors": [ "Cian Eastwood", "Christopher K. I. Williams" ], "externalIds": { "MAG": "2785961484", "DBLP": "conf/iclr/EastwoodW18", "CorpusId": 19571619 }, "url": "https://www.semanticscholar.org/paper/adf2ac6b99b7d48b6a9c908532ca249de2cec3ae", "referenceCount": 37, "citationCount": 428, "influentialCitationCount": 80, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Neural Discrete Representation Learning", "abstract": "Learning useful representations without supervision remains a key challenge in machine learning. In this paper, we propose a simple yet powerful generative model that learns such discrete representations. Our model, the Vector Quantised-Variational AutoEncoder (VQ-VAE), differs from VAEs in two key ways: the encoder network outputs discrete, rather than continuous, codes; and the prior is learnt rather than static. In order to learn a discrete latent representation, we incorporate ideas from vector quantisation (VQ). Using the VQ method allows the model to circumvent issues of \"posterior collapse\" -- where the latents are ignored when they are paired with a powerful autoregressive decoder -- typically observed in the VAE framework. Pairing these representations with an autoregressive prior, the model can generate high quality images, videos, and speech as well as doing high quality speaker conversion and unsupervised learning of phonemes, providing further evidence of the utility of the learnt representations.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "Aäron van den Oord", "O. Vinyals", "K. Kavukcuoglu" ], "externalIds": { "MAG": "2752796333", "ArXiv": "1711.00937", "DBLP": "conf/nips/OordVK17", "CorpusId": 20282961 }, "url": "https://www.semanticscholar.org/paper/f466157848d1a7772fb6d02cdac9a7a5e7ef982e", "referenceCount": 43, "citationCount": 3716, "influentialCitationCount": 609, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "beta-VAE: Learning Basic Visual Concepts with a Constrained Variational Framework", "abstract": "an", "year": 2016, "venue": "International Conference on Learning Representations", "authors": [ "I. Higgins", "L. Matthey", "Arka Pal", "Christopher P. Burgess", "Xavier Glorot", "M. Botvinick", "S. Mohamed", "Alexander Lerchner" ], "externalIds": { "MAG": "2753738274", "CorpusId": 46798026 }, "url": "https://www.semanticscholar.org/paper/a90226c41b79f8b06007609f39f82757073641e2", "referenceCount": 34, "citationCount": 4451, "influentialCitationCount": 549, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Categorical Reparameterization with Gumbel-Softmax", "abstract": "Categorical variables are a natural choice for representing discrete structure in the world. However, stochastic neural networks rarely use categorical latent variables due to the inability to backpropagate through samples. In this work, we present an efficient gradient estimator that replaces the non-differentiable sample from a categorical distribution with a differentiable sample from a novel Gumbel-Softmax distribution. This distribution has the essential property that it can be smoothly annealed into a categorical distribution. We show that our Gumbel-Softmax estimator outperforms state-of-the-art gradient estimators on structured output prediction and unsupervised generative modeling tasks with categorical latent variables, and enables large speedups on semi-supervised classification.", "year": 2016, "venue": "International Conference on Learning Representations", "authors": [ "Eric Jang", "S. Gu", "Ben Poole" ], "externalIds": { "MAG": "2950151997", "DBLP": "journals/corr/JangGP16", "ArXiv": "1611.01144", "CorpusId": 2428314 }, "url": "https://www.semanticscholar.org/paper/29e944711a354c396fad71936f536e83025b6ce0", "referenceCount": 33, "citationCount": 4786, "influentialCitationCount": 592, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "The Concrete Distribution: A Continuous Relaxation of Discrete Random Variables", "abstract": "The reparameterization trick enables optimizing large scale stochastic computation graphs via gradient descent. The essence of the trick is to refactor each stochastic node into a differentiable function of its parameters and a random variable with fixed distribution. After refactoring, the gradients of the loss propagated by the chain rule through the graph are low variance unbiased estimators of the gradients of the expected loss. While many continuous random variables have such reparameterizations, discrete random variables lack useful reparameterizations due to the discontinuous nature of discrete states. In this work we introduce Concrete random variables---continuous relaxations of discrete random variables. The Concrete distribution is a new family of distributions with closed form densities and a simple reparameterization. Whenever a discrete stochastic node of a computation graph can be refactored into a one-hot bit representation that is treated continuously, Concrete stochastic nodes can be used with automatic differentiation to produce low-variance biased gradients of objectives (including objectives that depend on the log-probability of latent stochastic nodes) on the corresponding discrete graph. We demonstrate the effectiveness of Concrete relaxations on density estimation and structured prediction tasks using neural networks.", "year": 2016, "venue": "International Conference on Learning Representations", "authors": [ "Chris J. Maddison", "A. Mnih", "Y. Teh" ], "externalIds": { "ArXiv": "1611.00712", "DBLP": "conf/iclr/MaddisonMT17", "MAG": "2548228487", "CorpusId": 14307651 }, "url": "https://www.semanticscholar.org/paper/515a21e90117941150923e559729c59f5fdade1c", "referenceCount": 57, "citationCount": 2334, "influentialCitationCount": 241, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Representation Learning: A Review and New Perspectives", "abstract": "The success of machine learning algorithms generally depends on data representation, and we hypothesize that this is because different representations can entangle and hide more or less the different explanatory factors of variation behind the data. Although specific domain knowledge can be used to help design representations, learning with generic priors can also be used, and the quest for AI is motivating the design of more powerful representation-learning algorithms implementing such priors. This paper reviews recent work in the area of unsupervised feature learning and deep learning, covering advances in probabilistic models, autoencoders, manifold learning, and deep networks. This motivates longer term unanswered questions about the appropriate objectives for learning good representations, for computing representations (i.e., inference), and the geometrical connections between representation learning, density estimation, and manifold learning.", "year": 2012, "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", "authors": [ "Yoshua Bengio", "Aaron C. Courville", "Pascal Vincent" ], "externalIds": { "ArXiv": "1206.5538", "MAG": "2952111767", "DBLP": "journals/pami/BengioCV13", "DOI": "10.1109/TPAMI.2013.50", "CorpusId": 393948, "PubMed": "23787338" }, "url": "https://www.semanticscholar.org/paper/184ac0766262312ba76bbdece4e7ffad0aa8180b", "referenceCount": 264, "citationCount": 11612, "influentialCitationCount": 550, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine", "Mathematics" ] }, { "title": "When is Unsupervised Disentanglement Possible?", "abstract": "A common assumption in many domains is that high dimensional data are a smooth nonlinear function of a small number of independent factors. When is it possible to recover the factors from unlabeled data? In the context of deep models this problem is called “disentanglement” and was recently shown to be impossible without additional strong assumptions [17, 19]. In this paper, we show that the assumption of local isometry together with non-Gaussianity of the factors, is sufficient to provably recover disentangled representations from data. We leverage recent advances in deep generative models to construct manifolds of highly realistic images for which the ground truth latent representation is known, and test whether modern and classical methods succeed in recovering the latent factors. For many different manifolds, we find that a spectral method that explicitly optimizes local isometry and non-Gaussianity consistently finds the correct latent factors, while baseline deep autoencoders do not. We propose how to encourage deep autoencoders to find encodings that satisfy local isometry and show that this helps them discover disentangled representations. Overall, our results suggest that in some realistic settings, unsupervised disentanglement is provably possible, without any domain-specific assumptions. representation found by HLLE+ICA approximately satisfies local isometry while the encoding of the baseline autoencoder does not (bottom). In this paper we show that the assumptions of local isometry and non-Gaussianity are sufficient to provably recover disentangled representations.", "year": 2021, "venue": "", "authors": [ "Daniel P. Horan", "Eitan Richardson", "Yair Weiss" ], "externalIds": { "CorpusId": 248497880 }, "url": "https://www.semanticscholar.org/paper/db562200ddcdcddb582ad40570a1b015c7d332bc", "referenceCount": 29, "citationCount": 36, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "High resolution disentanglement datasets", "abstract": null, "year": 2019, "venue": "github", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "GENERATIVE ADVERSARIAL NETS", "abstract": "Estimating individualized treatment effects (ITE) is a challenging task due to the need for an individual’s potential outcomes to be learned from biased data and without having access to the counterfactuals. We propose a novel method for inferring ITE based on the Generative Adversarial Nets (GANs) framework. Our method, termed Generative Adversarial Nets for inference of Individualized Treatment Effects (GANITE), is motivated by the possibility that we can capture the uncertainty in the counterfactual distributions by attempting to learn them using a GAN. We generate proxies of the counterfactual outcomes using a counterfactual generator, G, and then pass these proxies to an ITE generator, I, in order to train it. By modeling both of these using the GAN framework, we are able to infer based on the factual data, while still accounting for the unseen counterfactuals. We test our method on three real-world datasets (with both binary and multiple treatments) and show that GANITE outperforms state-of-the-art methods.", "year": 2018, "venue": "", "authors": [ "Individualized Treat", "Jinsung Yoon" ], "externalIds": { "CorpusId": 10319744 }, "url": "https://www.semanticscholar.org/paper/c68796f833a7151f0a63d1d1608dc902b4fdc9b6", "referenceCount": 24, "citationCount": 28002, "influentialCitationCount": 3321, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Isolating sources of disen-tanglement in vaes", "abstract": null, "year": 2018, "venue": "Proceedings of the 32nd International Conference on Neural Information Processing Systems", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Continuous Relaxation Training of Discrete Latent Variable Image Models", "abstract": "Despite recent improvements in training methodology, discrete latent variable models have failed to achieve the performance and popularity of their continuous counterparts. Here, we evaluate several approaches to training large-scale image models on CIFAR-10 using a probabilistic variant of the recently proposed Vector Quantized VAE architecture. We find that biased estimators such as continuous relaxations provide reliable methods for training these models while unbiased score-function-based estimators like VIMCO struggle in high-dimensional discrete spaces. Furthermore, we observe that the learned discrete codes lie on low-dimensional manifolds, indicating that discrete latent variables can learn to represent continuous latent quantities. Our findings show that continuous relaxation training of discrete latent variable models is a powerful method for learning representations that can flexibly capture both continuous and discrete aspects of natural data.", "year": 2017, "venue": "", "authors": [ "C. Sønderby" ], "externalIds": { "MAG": "2969310783", "CorpusId": 37808701 }, "url": "https://www.semanticscholar.org/paper/4dc3e32445a01be787b13c4f3aa7af848130bec8", "referenceCount": 15, "citationCount": 24, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Measuring disentan-glement: A review of metrics", "abstract": null, "year": null, "venue": "IEEE Transactions on Neural Networks and Learning Systems", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "The PyTorch Lightning team", "abstract": null, "year": null, "venue": "PyTorch Lightning", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Are Music Foundation Models Better at Singing Voice Deepfake Detection? Far-Better Fuse them with Speech Foundation Models": { "paper_title": "Are Music Foundation Models Better at Singing Voice Deepfake Detection? Far-Better Fuse them with Speech Foundation Models", "arxiv_id": "2409.14131v1", "keyword": "representation learning", "authors": [ "Orchid Chetia Phukan", "Sarthak Jain", "Swarup Ranjan Behera", "Arun Balaji Buduru", "Rajesh Sharma", "S. R Mahadeva Prasanna" ], "references": [ { "title": "Singing Voice Graph Modeling for SingFake Detection", "abstract": "Detecting singing voice deepfakes, or SingFake, involves determining the authenticity and copyright of a singing voice. Existing models for speech deepfake detection have struggled to adapt to unseen attacks in this unique singing voice domain of human vocalization. To bridge the gap, we present a groundbreaking SingGraph model. The model synergizes the capabilities of the MERT acoustic music understanding model for pitch and rhythm analysis with the wav2vec2.0 model for linguistic analysis of lyrics. Additionally, we advocate for using RawBoost and beat matching techniques grounded in music domain knowledge for singing voice augmentation, thereby enhancing SingFake detection performance. Our proposed method achieves new state-of-the-art (SOTA) results within the SingFake dataset, surpassing the previous SOTA model across three distinct scenarios: it improves EER relatively for seen singers by 13.2%, for unseen singers by 24.3%, and unseen singers using different codecs by 37.1%.", "year": 2024, "venue": "Interspeech", "authors": [ "Xuan-Bo Chen", "Haibin Wu", "Jyh-Shing Roger Jang", "Hung-yi Lee" ], "externalIds": { "ArXiv": "2406.03111", "DOI": "10.21437/interspeech.2024-1185", "CorpusId": 270257838 }, "url": "https://www.semanticscholar.org/paper/9f256a7fd8ace1bf4bea7742eb329ed2be4bdf93", "referenceCount": 33, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Engineering" ] }, { "title": "CtrSVDD: A Benchmark Dataset and Baseline Analysis for Controlled Singing Voice Deepfake Detection", "abstract": "Recent singing voice synthesis and conversion advancements necessitate robust singing voice deepfake detection (SVDD) models. Current SVDD datasets face challenges due to limited controllability, diversity in deepfake methods, and licensing restrictions. Addressing these gaps, we introduce CtrSVDD, a large-scale, diverse collection of bonafide and deepfake singing vocals. These vocals are synthesized using state-of-the-art methods from publicly accessible singing voice datasets. CtrSVDD includes 47.64 hours of bonafide and 260.34 hours of deepfake singing vocals, spanning 14 deepfake methods and involving 164 singer identities. We also present a baseline system with flexible front-end features, evaluated against a structured train/dev/eval split. The experiments show the importance of feature selection and highlight a need for generalization towards deepfake methods that deviate further from training distribution. The CtrSVDD dataset and baselines are publicly accessible.", "year": 2024, "venue": "Interspeech", "authors": [ "Yongyi Zang", "Jiatong Shi", "You Zhang", "Ryuichi Yamamoto", "Jionghao Han", "Yuxun Tang", "Shengyuan Xu", "Wenxiao Zhao", "Jing Guo", "T. Toda", "Zhiyao Duan" ], "externalIds": { "ArXiv": "2406.02438", "DBLP": "journals/corr/abs-2406-02438", "DOI": "10.21437/Interspeech.2024-2242", "CorpusId": 270226377 }, "url": "https://www.semanticscholar.org/paper/c0ed3c760fed07d59d0792561d8cd929d0cd0c13", "referenceCount": 34, "citationCount": 6, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Heterogeneity over Homogeneity: Investigating Multilingual Speech Pre-Trained Models for Detecting Audio Deepfake", "abstract": "In this work, we investigate multilingual speech Pre-Trained models (PTMs) for Audio deepfake detection (ADD). We hypothesize that multilingual PTMs trained on large-scale diverse multilingual data gain knowledge about diverse pitches, accents, and tones, during their pre-training phase and making them more robust to variations. As a result, they will be more effective for detecting audio deepfakes. To validate our hypothesis, we extract representations from state-of-the-art (SOTA) PTMs including monolingual, multilingual as well as PTMs trained for speaker and emotion recognition, and evaluated them on ASVSpoof 2019 (ASV), In-the-Wild (ITW), and DECRO benchmark databases. We show that representations from multilingual PTMs, with simple downstream networks, attain the best performance for ADD compared to other PTM representations, which validates our hypothesis. We also explore the possibility of fusion of selected PTM representations for further improvements in ADD, and we propose a framework, MiO (Merge into One) for this purpose. With MiO, we achieve SOTA performance on ASV and ITW and comparable performance on DECRO with current SOTA works.", "year": 2024, "venue": "NAACL-HLT", "authors": [ "Orchid Chetia Phukan", "Gautam Siddharth Kashyap", "Arun Balaji Buduru", "Rajesh Sharma" ], "externalIds": { "DBLP": "conf/naacl/PhukanKBS24", "ArXiv": "2404.00809", "DOI": "10.18653/v1/2024.findings-naacl.160", "CorpusId": 268820148 }, "url": "https://www.semanticscholar.org/paper/3c3c8b523a828374d35e40b7262508c8b34e19ba", "referenceCount": 42, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Experimental Study: Enhancing Voice Spoofing Detection Models with wav2vec 2.0", "abstract": "Conventional spoofing detection systems have heavily relied on the use of handcrafted features derived from speech data. However, a notable shift has recently emerged towards the direct utilization of raw speech waveforms, as demonstrated by methods like SincNet filters. This shift underscores the demand for more sophisticated audio sample features. Moreover, the success of deep learning models, particularly those utilizing large pretrained wav2vec 2.0 as a featurization front-end, highlights the importance of refined feature encoders. In response, this research assessed the representational capability of wav2vec 2.0 as an audio feature extractor, modifying the size of its pretrained Transformer layers through two key adjustments: (1) selecting a subset of layers starting from the leftmost one and (2) fine-tuning a portion of the selected layers from the rightmost one. We complemented this analysis with five spoofing detection back-end models, with a primary focus on AASIST, enabling us to pinpoint the optimal configuration for the selection and fine-tuning process. In contrast to conventional handcrafted features, our investigation identified several spoofing detection systems that achieve state-of-the-art performance in the ASVspoof 2019 LA dataset. This comprehensive exploration offers valuable insights into feature selection strategies, advancing the field of spoofing detection.", "year": 2024, "venue": "arXiv.org", "authors": [ "Taein Kang", "Soyul Han", "Sunmook Choi", "Jaejin Seo", "Sanghyeok Chung", "Seungeun Lee", "Seungsang Oh", "Il-Youp Kwak" ], "externalIds": { "ArXiv": "2402.17127", "DBLP": "journals/corr/abs-2402-17127", "DOI": "10.48550/arXiv.2402.17127", "CorpusId": 268037025 }, "url": "https://www.semanticscholar.org/paper/35b81a9ffa080ab29ab047eabca74b6993796533", "referenceCount": 26, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Do Vision and Language Encoders Represent the World Similarly?", "abstract": "Aligned text-image encoders such as CLIP have become the de-facto model for vision-language tasks. Further-more, modality-specific encoders achieve impressive per-formances in their respective domains. This raises a cen-tral question: does an alignment exist between uni-modal vision and language encoders since they fundamentally rep-resent the same physical world? Analyzing the latent spaces structure of vision and language models on image-caption benchmarks using the Centered Kernel Alignment (CKA), we find that the representation spaces of unaligned and aligned encoders are semantically similar. In the absence of statistical similarity in aligned encoders like CLIP, we show that a possible matching of unaligned encoders exists with-out any training. We frame this as a seeded graph-matching problem exploiting the semantic similarity between graphs and propose two methods - a Fast Quadratic Assignment Problem optimization, and a novel localized CKA metric-based matching/retrieval. We demonstrate the effectiveness of this on several downstream tasks including cross-lingual, cross-domain caption matching and image classification. Code available at github.com/mayug/0-shot-llm-vision.", "year": 2024, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Mayug Maniparambil", "Raiymbek Akshulakov", "Y. A. D. Djilali", "Sanath Narayan", "M. Seddik", "K. Mangalam", "Noel E. O'Connor" ], "externalIds": { "ArXiv": "2401.05224", "DBLP": "journals/corr/abs-2401-05224", "DOI": "10.1109/CVPR52733.2024.01359", "CorpusId": 266902604 }, "url": "https://www.semanticscholar.org/paper/50d405ad93f903573fc37d5ecf449181648bc348", "referenceCount": 49, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Investigating the Effectiveness of Speaker Embeddings for Shout Intensity Prediction", "abstract": "The automatic detection of shouted speeches has attracted much research attention as a core technology of audio surveillance systems. A common strategy in the past literature has been to train a binary classifier using labels of shouted or normal speeches. Although it is known that the acoustic properties of shouted speech usually differ among speakers, especially male and female groups, the conventional methods did not pay attention to encoding such personal and gender-related style information. There are recent findings that speaker embeddings, which are produced by a model trained for speaker identification, can improve other tasks such as speech emotion classification. Thus, this paper investigates the effectiveness of such speaker embeddings for a shouted speech detection problem. Specifically, we verify whether x-vector embeddings can work as effective auxiliary features for the target problem compared with a simple gender label of an input speech whose ground truth is generally unavailable in real situations. Our experiments on predicting the shout intensity beyond the traditional binary classification demonstrated the x-vector embeddings achieved performance improvement over the single use of speech features.", "year": 2023, "venue": "Asia-Pacific Signal and Information Processing Association Annual Summit and Conference", "authors": [ "Takahiro Fukumori", "Taito Ishida", "Yoichi Yamashita" ], "externalIds": { "DBLP": "conf/apsipa/FukumoriIY23", "DOI": "10.1109/APSIPAASC58517.2023.10317562", "CorpusId": 265317523 }, "url": "https://www.semanticscholar.org/paper/f387265343c7354d672020e28ebf3da999d59113", "referenceCount": 21, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "SingFake: Singing Voice Deepfake Detection", "abstract": "The rise of singing voice synthesis presents critical challenges to artists and industry stakeholders over unauthorized voice usage. Unlike synthesized speech, synthesized singing voices are typically released in songs containing strong background music that may hide synthesis artifacts. Additionally, singing voices present different acoustic and linguistic characteristics from speech utterances. These unique properties make singing voice deepfake detection a relevant but significantly different problem from synthetic speech detection. In this work, we propose the singing voice deepfake detection task. We first present SingFake, the first curated in-the-wild dataset consisting of 28.93 hours of bonafide and 29.40 hours of deepfake song clips in five languages from 40 singers. We provide a train/validation/test split where the test sets include various scenarios. We then use SingFake to evaluate four state-of-the-art speech countermeasure systems trained on speech utterances. We find these systems lag significantly behind their performance on speech test data. When trained on SingFake, either using separated vocal tracks or song mixtures, these systems show substantial improvement. However, our evaluations also identify challenges associated with unseen singers, communication codecs, languages, and musical contexts, calling for dedicated research into singing voice deepfake detection. The SingFake dataset and related resources are available1.", "year": 2023, "venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", "authors": [ "Yongyi Zang", "You Zhang", "Mojtaba Heydari", "Zhiyao Duan" ], "externalIds": { "DBLP": "conf/icassp/Zang0HD24", "ArXiv": "2309.07525", "DOI": "10.1109/ICASSP48485.2024.10448184", "CorpusId": 261822998 }, "url": "https://www.semanticscholar.org/paper/e6e57d8e5deb0be00305045f274af0b12d878b8b", "referenceCount": 22, "citationCount": 18, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "FSD: An Initial Chinese Dataset for Fake Song Detection", "abstract": "Singing voice synthesis and singing voice conversion have significantly advanced, revolutionizing musical experiences. However, the rise of \"Deepfake Songs\" generated by these technologies raises concerns about authenticity. Unlike Audio DeepFake Detection (ADD), the field of song deepfake detection lacks specialized datasets or methods for song authenticity verification. In this paper, we initially construct a Chinese Fake Song Detection (FSD) dataset to investigate the field of song deepfake detection. The fake songs in the FSD dataset are generated by five state-of-the-art singing voice synthesis and singing voice conversion methods. Our initial experiments on FSD revealed the ineffectiveness of existing speech-trained ADD models for the task of song deepfake detection. Thus, we employ the FSD dataset for the training of ADD models. We subsequently evaluate these models under two scenarios: one with the original songs and another with separated vocal tracks. Experiment results show that song-trained ADD models exhibit a 38.58% reduction in average equal error rate compared to speech-trained ADD models on the FSD test set.", "year": 2023, "venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", "authors": [ "Yuankun Xie", "Jingjing Zhou", "Xiaolin Lu", "Zhenghao Jiang", "Yuxin Yang", "Haonan Cheng", "Long Ye" ], "externalIds": { "DBLP": "journals/corr/abs-2309-02232", "ArXiv": "2309.02232", "DOI": "10.1109/ICASSP48485.2024.10446271", "CorpusId": 261556677 }, "url": "https://www.semanticscholar.org/paper/6c59296e3986826d816a6860bc90b6b34ce43539", "referenceCount": 26, "citationCount": 11, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Audio Deepfake Detection: A Survey", "abstract": "Audio deepfake detection is an emerging active topic. A growing number of literatures have aimed to study deepfake detection algorithms and achieved effective performance, the problem of which is far from being solved. Although there are some review literatures, there has been no comprehensive survey that provides researchers with a systematic overview of these developments with a unified evaluation. Accordingly, in this survey paper, we first highlight the key differences across various types of deepfake audio, then outline and analyse competitions, datasets, features, classifications, and evaluation of state-of-the-art approaches. For each aspect, the basic techniques, advanced developments and major challenges are discussed. In addition, we perform a unified comparison of representative features and classifiers on ASVspoof 2021, ADD 2023 and In-the-Wild datasets for audio deepfake detection, respectively. The survey shows that future research should address the lack of large scale datasets in the wild, poor generalization of existing detection methods to unknown fake attacks, as well as interpretability of detection results.", "year": 2023, "venue": "arXiv.org", "authors": [ "Jiangyan Yi", "Chenglong Wang", "J. Tao", "Xiaohui Zhang", "Chu Yuan Zhang", "Yan Zhao" ], "externalIds": { "ArXiv": "2308.14970", "DBLP": "journals/corr/abs-2308-14970", "DOI": "10.48550/arXiv.2308.14970", "CorpusId": 261276979 }, "url": "https://www.semanticscholar.org/paper/79ba04e381b5e6538767d74e599a08e0f048779c", "referenceCount": 173, "citationCount": 21, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "The Singing Voice Conversion Challenge 2023", "abstract": "We present the latest iteration of the voice conversion challenge (VCC) series, a bi-annual scientific event aiming to compare and understand different voice conversion (VC) systems based on a common dataset. This year we shifted our focus to singing voice conversion (SVC), thus named the challenge the Singing Voice Conversion Challenge (SVCC). A new database was constructed for two tasks, namely in-domain and cross-domain SVC. The challenge was run for two months, and in total we received 26 submissions, including 2 baselines. Through a large-scale crowd-sourced listening test, we observed that for both tasks, although human-level naturalness was achieved by the top system, no team was able to obtain a similarity score as high as the target speakers. Also, as expected, cross-domain SVC is harder than in-domain SVC, especially in the similarity aspect. We also investigated whether existing objective measurements were able to predict perceptual performance, and found that only few of them could reach a significant correlation.", "year": 2023, "venue": "Automatic Speech Recognition & Understanding", "authors": [ "Wen-Chin Huang", "Lester Phillip Violeta", "Songxiang Liu", "Jiatong Shi", "Yusuke Yasuda", "T. Toda" ], "externalIds": { "DBLP": "journals/corr/abs-2306-14422", "ArXiv": "2306.14422", "DOI": "10.1109/ASRU57964.2023.10389671", "CorpusId": 259251521 }, "url": "https://www.semanticscholar.org/paper/d7b71afea8a94ab772249bbb19202ca59c38740b", "referenceCount": 63, "citationCount": 33, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "MERT: Acoustic Music Understanding Model with Large-Scale Self-supervised Training", "abstract": "Self-supervised learning (SSL) has recently emerged as a promising paradigm for training generalisable models on large-scale data in the fields of vision, text, and speech. Although SSL has been proven effective in speech and audio, its application to music audio has yet to be thoroughly explored. This is partially due to the distinctive challenges associated with modelling musical knowledge, particularly tonal and pitched characteristics of music. To address this research gap, we propose an acoustic Music undERstanding model with large-scale self-supervised Training (MERT), which incorporates teacher models to provide pseudo labels in the masked language modelling (MLM) style acoustic pre-training. In our exploration, we identified an effective combination of teacher models, which outperforms conventional speech and audio approaches in terms of performance. This combination includes an acoustic teacher based on Residual Vector Quantisation - Variational AutoEncoder (RVQ-VAE) and a musical teacher based on the Constant-Q Transform (CQT). Furthermore, we explore a wide range of settings to overcome the instability in acoustic language model pre-training, which allows our designed paradigm to scale from 95M to 330M parameters. Experimental results indicate that our model can generalise and perform well on 14 music understanding tasks and attain state-of-the-art (SOTA) overall scores.", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Yizhi Li", "Ruibin Yuan", "Ge Zhang", "Yi Ma", "Xingran Chen", "Hanzhi Yin", "Chen-Li Lin", "A. Ragni", "Emmanouil Benetos", "N. Gyenge", "R. Dannenberg", "Ruibo Liu", "Wenhu Chen", "Gus G. Xia", "Yemin Shi", "Wen-Fen Huang", "Yi-Ting Guo", "Jie Fu" ], "externalIds": { "ArXiv": "2306.00107", "DBLP": "journals/corr/abs-2306-00107", "DOI": "10.48550/arXiv.2306.00107", "CorpusId": 258999603 }, "url": "https://www.semanticscholar.org/paper/fb08b08916caab4b22c60fa96753f6b9a5886d75", "referenceCount": 64, "citationCount": 59, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Transforming the Embeddings: A Lightweight Technique for Speech Emotion Recognition Tasks", "abstract": "Speech emotion recognition (SER) is a field that has drawn a lot of attention due to its applications in diverse fields. A current trend in methods used for SER is to leverage embeddings from pre-trained models (PTMs) as input features to downstream models. However, the use of embeddings from speaker recognition PTMs hasn't garnered much focus in comparison to other PTM embeddings. To fill this gap and in order to understand the efficacy of speaker recognition PTM embeddings, we perform a comparative analysis of five PTM embeddings. Among all, x-vector embeddings performed the best possibly due to its training for speaker recognition leading to capturing various components of speech such as tone, pitch, etc. Our modeling approach which utilizes x-vector embeddings and mel-frequency cepstral coefficients (MFCC) as input features is the most lightweight approach while achieving comparable accuracy to previous state-of-the-art (SOTA) methods in the CREMA-D benchmark.", "year": 2023, "venue": "Interspeech", "authors": [ "Orchid Chetia Phukan", "Arun Balaji Buduru", "Rajesh Sharma" ], "externalIds": { "ArXiv": "2305.18640", "DBLP": "conf/interspeech/PhukanB023", "DOI": "10.21437/interspeech.2023-2561", "CorpusId": 258967416 }, "url": "https://www.semanticscholar.org/paper/4cbf260c8dd0a401b1577ce22d0d9a5a92f0e50d", "referenceCount": 41, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Self-Supervised Representations for Singing Voice Conversion", "abstract": "A singing voice conversion model converts a song in the voice of an arbitrary source singer to the voice of a target singer. Recently, methods that leverage self-supervised audio representations such as HuBERT and Wav2Vec 2.0 have helped further the state-of-the-art. Though these methods produce more natural and melodic singing outputs, they often rely on confusion and disentanglement losses to render the self-supervised representations speaker and pitch-invariant. In this paper, we circumvent disentanglement training and propose a new model that leverages ASR fine-tuned self-supervised representations as inputs to a HiFi-GAN neural vocoder for singing voice conversion. We experiment with different f0 encoding schemes and show that an f0 harmonic generation module that uses a parallel bank of transposed convolutions (PBTC) alongside ASR fine-tuned Wav2Vec 2.0 features results in the best singing voice conversion quality. Additionally, the model is capable of making a spoken voice sing. We also show that a simple f0 shifting scheme during inference helps retain singer identity and bolsters the performance of our singing voice conversion model. Our results are backed up by extensive MOS studies that compare different ablations and baselines.", "year": 2023, "venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", "authors": [ "Tejas Jayashankar", "Jilong Wu", "Leda Sari", "David Kant", "Vimal Manohar", "Qing He" ], "externalIds": { "ArXiv": "2303.12197", "DBLP": "conf/icassp/JayashankarWSKMH23", "DOI": "10.1109/ICASSP49357.2023.10097147", "CorpusId": 257663367 }, "url": "https://www.semanticscholar.org/paper/6528ec4d54f75607d45bf73d187c2df5f7209e06", "referenceCount": 26, "citationCount": 13, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Learning From Yourself: A Self-Distillation Method For Fake Speech Detection", "abstract": "In this paper, we propose a novel self-distillation method for fake speech detection (FSD), which can significantly improve the performance of FSD without increasing the model complexity. For FSD, some fine-grained information is very important, such as spectrogram defects, mute segments, and so on, which are often perceived by shallow networks. However, shallow networks have much noise, which can not capture this very well. To address this problem, we propose using the deepest network instruct shallow network for enhancing shallow networks. Specifically, the networks of FSD are divided into several segments, the deepest network being used as the teacher model, and all shallow networks become multiple student models by adding classifiers. Meanwhile, the distillation path between the deepest network feature and shallow network features is used to reduce the feature difference. A series of experimental results on the ASVspoof 2019 LA and PA datasets show the effectiveness of the proposed method, with significant improvements compared to the baseline.", "year": 2023, "venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", "authors": [ "Jun Xue", "Cunhang Fan", "Jiangyan Yi", "Chenglong Wang", "Zhengqi Wen", "Dan Zhang", "Zhao Lv" ], "externalIds": { "DBLP": "journals/corr/abs-2303-01211", "ArXiv": "2303.01211", "DOI": "10.1109/ICASSP49357.2023.10096837", "CorpusId": 257280391 }, "url": "https://www.semanticscholar.org/paper/437478a8b73532936d53f5c96cc84ec1dc5db0b6", "referenceCount": 19, "citationCount": 26, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "MAP-Music2Vec: A Simple and Effective Baseline for Self-Supervised Music Audio Representation Learning", "abstract": "The deep learning community has witnessed an exponentially growing interest in self-supervised learning (SSL). However, it still remains unexplored how to build a framework for learning useful representations of raw music waveforms in a self-supervised manner. In this work, we design Music2Vec, a framework exploring different SSL algorithmic components and tricks for music audio recordings. Our model achieves comparable results to the state-of-the-art (SOTA) music SSL model Jukebox, despite being significantly smaller with less than 2% of parameters of the latter. The model will be released on Huggingface(Please refer to: https://huggingface.co/m-a-p/music2vec-v1)", "year": 2022, "venue": "arXiv.org", "authors": [ "Yizhi Li", "Ruibin Yuan", "Ge Zhang", "Yi Ma", "Chenghua Lin", "Xingran Chen", "A. Ragni", "Hanzhi Yin", "Zhijie Hu", "Haoyu He", "Emmanouil Benetos", "Norbert Gyenge", "Ruibo Liu", "Jie Fu" ], "externalIds": { "DBLP": "journals/corr/abs-2212-02508", "ArXiv": "2212.02508", "DOI": "10.48550/arXiv.2212.02508", "CorpusId": 254275123 }, "url": "https://www.semanticscholar.org/paper/ac36557d477dec06d80c76772094acdc84c60a17", "referenceCount": 15, "citationCount": 15, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "SAMO: Speaker Attractor Multi-Center One-Class Learning For Voice Anti-Spoofing", "abstract": "Voice anti-spoofing systems are crucial auxiliaries for automatic speaker verification (ASV) systems. A major challenge is caused by unseen attacks empowered by advanced speech synthesis technologies. Our previous research on one-class learning has improved the generalization ability to unseen attacks by compacting the bona fide speech in the embedding space. However, such compactness lacks consideration of the diversity of speakers. In this work, we propose speaker attractor multi-center one-class learning (SAMO), which clusters bona fide speech around a number of speaker attractors and pushes away spoofing attacks from all the attractors in a high-dimensional embedding space. For training, we propose an algorithm for the co-optimization of bona fide speech clustering and bona fide/spoof classification. For inference, we propose strategies to enable anti-spoofing for speakers without enrollment. Our proposed system outperforms existing state-of-the-art single systems with a relative improvement of 38% on equal error rate (EER) on the ASVspoof2019 LA evaluation set.", "year": 2022, "venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", "authors": [ "Sivan Ding", "You Zhang", "Z. Duan" ], "externalIds": { "DBLP": "journals/corr/abs-2211-02718", "ArXiv": "2211.02718", "DOI": "10.1109/ICASSP49357.2023.10094704", "CorpusId": 253384255 }, "url": "https://www.semanticscholar.org/paper/8eda6d50a54bd1f292808d6564a1b2d65e7245c2", "referenceCount": 35, "citationCount": 19, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Reliability of CKA as a Similarity Measure in Deep Learning", "abstract": "Comparing learned neural representations in neural networks is a challenging but important problem, which has been approached in different ways. The Centered Kernel Alignment (CKA) similarity metric, particularly its linear variant, has recently become a popular approach and has been widely used to compare representations of a network's different layers, of architecturally similar networks trained differently, or of models with different architectures trained on the same data. A wide variety of conclusions about similarity and dissimilarity of these various representations have been made using CKA. In this work we present analysis that formally characterizes CKA sensitivity to a large class of simple transformations, which can naturally occur in the context of modern machine learning. This provides a concrete explanation of CKA sensitivity to outliers, which has been observed in past works, and to transformations that preserve the linear separability of the data, an important generalization attribute. We empirically investigate several weaknesses of the CKA similarity metric, demonstrating situations in which it gives unexpected or counter-intuitive results. Finally we study approaches for modifying representations to maintain functional behaviour while changing the CKA value. Our results illustrate that, in many cases, the CKA value can be easily manipulated without substantial changes to the functional behaviour of the models, and call for caution when leveraging activation alignment metrics.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Mohammad-Javad Davari", "Stefan Horoi", "A. Natik", "Guillaume Lajoie", "Guy Wolf", "Eugene Belilovsky" ], "externalIds": { "ArXiv": "2210.16156", "DBLP": "journals/corr/abs-2210-16156", "DOI": "10.48550/arXiv.2210.16156", "CorpusId": 253224118 }, "url": "https://www.semanticscholar.org/paper/dfb149bfbfb81de225dbad69aac41ffa617141e8", "referenceCount": 67, "citationCount": 25, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing", "abstract": "Self-supervised learning (SSL) achieves great success in speech recognition, while limited exploration has been attempted for other speech processing tasks. As speech signal contains multi-faceted information including speaker identity, paralinguistics, spoken content, etc., learning universal representations for all speech tasks is challenging. To tackle the problem, we propose a new pre-trained model, WavLM, to solve full-stack downstream speech tasks. WavLM jointly learns masked speech prediction and denoising in pre-training. By this means, WavLM does not only keep the speech content modeling capability by the masked speech prediction, but also improves the potential to non-ASR tasks by the speech denoising. In addition, WavLM employs gated relative position bias for the Transformer structure to better capture the sequence ordering of input speech. We also scale up the training dataset from 60 k hours to 94 k hours. WavLM Large achieves state-of-the-art performance on the SUPERB benchmark, and brings significant improvements for various speech processing tasks on their representative benchmarks.", "year": 2021, "venue": "IEEE Journal on Selected Topics in Signal Processing", "authors": [ "Sanyuan Chen", "Chengyi Wang", "Zhengyang Chen", "Yu Wu", "Shujie Liu", "Zhuo Chen", "Jinyu Li", "Naoyuki Kanda", "Takuya Yoshioka", "Xiong Xiao", "Jian Wu", "Long Zhou", "Shuo Ren", "Y. Qian", "Yao Qian", "Micheal Zeng", "Furu Wei" ], "externalIds": { "DBLP": "journals/jstsp/ChenWCWLCLKYXWZ22", "ArXiv": "2110.13900", "DOI": "10.1109/JSTSP.2022.3188113", "CorpusId": 239885872 }, "url": "https://www.semanticscholar.org/paper/416dab850fda842b13a4f28164514d98f836fff7", "referenceCount": 94, "citationCount": 1246, "influentialCitationCount": 294, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "VISinger: Variational Inference with Adversarial Learning for End-to-End Singing Voice Synthesis", "abstract": "In this paper, we propose VISinger, a complete end-to-end high-quality singing voice synthesis (SVS) system that directly generates singing audio from lyrics and musical score. Our approach is inspired by VITS [1], an end-to-end speech generation model which adopts VAE-based posterior encoder augmented with normalizing flow based prior encoder and adversarial decoder. VISinger follows the main architecture of VITS, but makes substantial improvements to the prior encoder according to the characteristics of singing. First, instead of using phoneme-level mean and variance of acoustic features, we introduce a length regulator and a frame prior network to get the frame-level mean and variance on acoustic features, modeling the rich acoustic variation in singing. Second, we further introduce an F0 predictor to guide the frame prior network, leading to stabler singing performance. Finally, to improve the singing rhythm, we modify the duration predictor to specifically predict the phoneme to note duration ratio, helped with singing note normalization. Experiments on a professional Mandarin singing corpus show that VISinger significantly outperforms FastSpeech+Neural-Vocoder two-stage approach and the oracle VITS; ablation study demonstrates the effectiveness of different contributions.", "year": 2021, "venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", "authors": [ "Yongmao Zhang", "Jian Cong", "Heyang Xue", "Lei Xie", "Pengcheng Zhu", "Mengxiao Bi" ], "externalIds": { "DBLP": "conf/icassp/ZhangCXXZB22", "ArXiv": "2110.08813", "DOI": "10.1109/icassp43922.2022.9747664", "CorpusId": 239016924 }, "url": "https://www.semanticscholar.org/paper/7353bcb7ef870c53a0318d3bf7d5b42c1d58b8d8", "referenceCount": 24, "citationCount": 62, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Unispeech-Sat: Universal Speech Representation Learning With Speaker Aware Pre-Training", "abstract": "Self-supervised learning (SSL) is a long-standing goal for speech processing, since it utilizes large-scale unlabeled data and avoids extensive human labeling. Recent years have witnessed great successes in applying self-supervised learning in speech recognition, while limited exploration was attempted in applying SSL for modeling speaker characteristics. In this paper, we aim to improve the existing SSL framework for speaker representation learning. Two methods are introduced for enhancing the unsupervised speaker information extraction. First, we apply multi-task learning to the current SSL framework, where we integrate utterance-wise contrastive loss with the SSL objective function. Second, for better speaker discrimination, we propose an utterance mixing strategy for data augmentation, where additional overlapped utterances are created unsupervisely and incorporated during training. We integrate the proposed methods into the HuBERT framework. Experiment results on the SUPERB benchmark show that the proposed system achieves state-of-the-art performance in universal representation learning, especially for speaker identification oriented tasks. An ablation study is performed verifying the efficacy of each proposed method. Finally, we scale up the training dataset to 94 thousand hours of public audio data and achieve further performance improvement in all SUPERB tasks.", "year": 2021, "venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", "authors": [ "Sanyuan Chen", "Yu Wu", "Chengyi Wang", "Zhengyang Chen", "Zhuo Chen", "Shujie Liu", "Jian Wu", "Yao Qian", "Furu Wei", "Jinyu Li", "Xiangzhan Yu" ], "externalIds": { "ArXiv": "2110.05752", "DBLP": "journals/corr/abs-2110-05752", "DOI": "10.1109/ICASSP43922.2022.9747077", "CorpusId": 238634641 }, "url": "https://www.semanticscholar.org/paper/e871d035824f24f2f82be58f636451214d5b5a71", "referenceCount": 21, "citationCount": 71, "influentialCitationCount": 14, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "AASIST: Audio Anti-Spoofing Using Integrated Spectro-Temporal Graph Attention Networks", "abstract": "Artefacts that differentiate spoofed from bona-fide utterances can reside in specific temporal or spectral intervals. Their reliable detection usually depends upon computationally demanding ensemble systems where each subsystem is tuned to some specific artefacts. We seek to develop an efficient, single system that can detect a broad range of different spoofing attacks without score-level ensembles. We propose a novel heterogeneous stacking graph attention layer that models artefacts spanning heterogeneous temporal and spectral intervals with a heterogeneous attention mechanism and a stack node. With a new max graph operation that involves a competitive mechanism and a new readout scheme, our approach, named AASIST, outperforms the current state-of-the-art by 20% relative. Even a lightweight variant, AASIST-L, with only 85k parameters, outperforms all competing systems.", "year": 2021, "venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", "authors": [ "Jee-weon Jung", "Hee-Soo Heo", "Hemlata Tak", "Hye-jin Shim", "Joon Son Chung", "Bong-Jin Lee", "Ha-jin Yu", "N. Evans" ], "externalIds": { "DBLP": "conf/icassp/JungHTSCLYE22", "ArXiv": "2110.01200", "DOI": "10.1109/icassp43922.2022.9747766", "CorpusId": 238259153 }, "url": "https://www.semanticscholar.org/paper/83ed100306b892da76a7468e6eca6f99074fbd02", "referenceCount": 42, "citationCount": 205, "influentialCitationCount": 57, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Do Vision Transformers See Like Convolutional Neural Networks?", "abstract": "Convolutional neural networks (CNNs) have so far been the de-facto model for visual data. Recent work has shown that (Vision) Transformer models (ViT) can achieve comparable or even superior performance on image classification tasks. This raises a central question: how are Vision Transformers solving these tasks? Are they acting like convolutional networks, or learning entirely different visual representations? Analyzing the internal representation structure of ViTs and CNNs on image classification benchmarks, we find striking differences between the two architectures, such as ViT having more uniform representations across all layers. We explore how these differences arise, finding crucial roles played by self-attention, which enables early aggregation of global information, and ViT residual connections, which strongly propagate features from lower to higher layers. We study the ramifications for spatial localization, demonstrating ViTs successfully preserve input spatial information, with noticeable effects from different classification methods. Finally, we study the effect of (pretraining) dataset scale on intermediate features and transfer learning, and conclude with a discussion on connections to new architectures such as the MLP-Mixer.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "M. Raghu", "Thomas Unterthiner", "Simon Kornblith", "Chiyuan Zhang", "Alexey Dosovitskiy" ], "externalIds": { "DBLP": "journals/corr/abs-2108-08810", "ArXiv": "2108.08810", "CorpusId": 237213700 }, "url": "https://www.semanticscholar.org/paper/39b492db00faead70bc3f4fb4b0364d94398ffdb", "referenceCount": 61, "citationCount": 738, "influentialCitationCount": 54, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "DiffSinger: Singing Voice Synthesis via Shallow Diffusion Mechanism", "abstract": "Singing voice synthesis (SVS) systems are built to synthesize high-quality and expressive singing voice, in which the acoustic model generates the acoustic features (e.g., mel-spectrogram) given a music score. Previous singing acoustic models adopt a simple loss (e.g., L1 and L2) or generative adversarial network (GAN) to reconstruct the acoustic features, while they suffer from over-smoothing and unstable training issues respectively, which hinder the naturalness of synthesized singing. \nIn this work, we propose DiffSinger, an acoustic model for SVS based on the diffusion probabilistic model. DiffSinger is a parameterized Markov chain that iteratively converts the noise into mel-spectrogram conditioned on the music score. By implicitly optimizing variational bound, DiffSinger can be stably trained and generate realistic outputs. \nTo further improve the voice quality and speed up inference, we introduce a shallow diffusion mechanism to make better use of the prior knowledge learned by the simple loss. Specifically, DiffSinger starts generation at a shallow step smaller than the total number of diffusion steps, according to the intersection of the diffusion trajectories of the ground-truth mel-spectrogram and the one predicted by a simple mel-spectrogram decoder. Besides, we propose boundary prediction methods to locate the intersection and determine the shallow step adaptively.\nThe evaluations conducted on a Chinese singing dataset demonstrate that DiffSinger outperforms state-of-the-art SVS work. Extensional experiments also prove the generalization of our methods on text-to-speech task (DiffSpeech). Audio samples: https://diffsinger.github.io. Codes: https://github.com/MoonInTheRiver/DiffSinger.", "year": 2021, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Jinglin Liu", "Chengxi Li", "Yi Ren", "Feiyang Chen", "Zhou Zhao" ], "externalIds": { "DBLP": "conf/aaai/Liu00CZ22", "ArXiv": "2105.02446", "DOI": "10.1609/aaai.v36i10.21350", "CorpusId": 235262772 }, "url": "https://www.semanticscholar.org/paper/fe92f3f7ceec008118842d42b578dc25bcba63f9", "referenceCount": 43, "citationCount": 208, "influentialCitationCount": 21, "isOpenAccess": true, "fieldsOfStudy": [ "Engineering", "Computer Science" ] }, { "title": "Emotion Recognition from Speech Using Wav2vec 2.0 Embeddings", "abstract": "Emotion recognition datasets are relatively small, making the use of the more sophisticated deep learning approaches challenging. In this work, we propose a transfer learning method for speech emotion recognition where features extracted from pre-trained wav2vec 2.0 models are modeled using simple neural networks. We propose to combine the output of several layers from the pre-trained model using trainable weights which are learned jointly with the downstream model. Further, we compare performance using two different wav2vec 2.0 models, with and without finetuning for speech recognition. We evaluate our proposed approaches on two standard emotion databases IEMOCAP and RAVDESS, showing superior performance compared to results in the literature.", "year": 2021, "venue": "Interspeech", "authors": [ "Leonardo Pepino", "P. Riera", "Luciana Ferrer" ], "externalIds": { "ArXiv": "2104.03502", "DBLP": "journals/corr/abs-2104-03502", "DOI": "10.21437/interspeech.2021-703", "CorpusId": 233181984 }, "url": "https://www.semanticscholar.org/paper/52a69f1e4bcf5043b51b79fddb6ae0b285e5d7c1", "referenceCount": 38, "citationCount": 295, "influentialCitationCount": 73, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Do Wide and Deep Networks Learn the Same Things? Uncovering How Neural Network Representations Vary with Width and Depth", "abstract": "A key factor in the success of deep neural networks is the ability to scale models to improve performance by varying the architecture depth and width. This simple property of neural network design has resulted in highly effective architectures for a variety of tasks. Nevertheless, there is limited understanding of effects of depth and width on the learned representations. In this paper, we study this fundamental question. We begin by investigating how varying depth and width affects model hidden representations, finding a characteristic block structure in the hidden representations of larger capacity (wider or deeper) models. We demonstrate that this block structure arises when model capacity is large relative to the size of the training set, and is indicative of the underlying layers preserving and propagating the dominant principal component of their representations. This discovery has important ramifications for features learned by different models, namely, representations outside the block structure are often similar across architectures with varying widths and depths, but the block structure is unique to each model. We analyze the output predictions of different model architectures, finding that even when the overall accuracy is similar, wide and deep models exhibit distinctive error patterns and variations across classes.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Thao Nguyen", "M. Raghu", "Simon Kornblith" ], "externalIds": { "ArXiv": "2010.15327", "DBLP": "journals/corr/abs-2010-15327", "MAG": "3095872829", "CorpusId": 225103395 }, "url": "https://www.semanticscholar.org/paper/d21806115a79c960298cfca45a49b24682cac71a", "referenceCount": 47, "citationCount": 229, "influentialCitationCount": 30, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "HiFiSinger: Towards High-Fidelity Neural Singing Voice Synthesis", "abstract": "High-fidelity singing voices usually require higher sampling rate (e.g., 48kHz) to convey expression and emotion. However, higher sampling rate causes the wider frequency band and longer waveform sequences and throws challenges for singing voice synthesis (SVS) in both frequency and time domains. Conventional SVS systems that adopt small sampling rate cannot well address the above challenges. In this paper, we develop HiFiSinger, an SVS system towards high-fidelity singing voice. HiFiSinger consists of a FastSpeech based acoustic model and a Parallel WaveGAN based vocoder to ensure fast training and inference and also high voice quality. To tackle the difficulty of singing modeling caused by high sampling rate (wider frequency band and longer waveform), we introduce multi-scale adversarial training in both the acoustic model and vocoder to improve singing modeling. Specifically, 1) To handle the larger range of frequencies caused by higher sampling rate, we propose a novel sub-frequency GAN (SF-GAN) on mel-spectrogram generation, which splits the full 80-dimensional mel-frequency into multiple sub-bands and models each sub-band with a separate discriminator. 2) To model longer waveform sequences caused by higher sampling rate, we propose a multi-length GAN (ML-GAN) for waveform generation to model different lengths of waveform sequences with separate discriminators. 3) We also introduce several additional designs and findings in HiFiSinger that are crucial for high-fidelity voices, such as adding F0 (pitch) and V/UV (voiced/unvoiced flag) as acoustic features, choosing an appropriate window/hop size for mel-spectrogram, and increasing the receptive field in vocoder for long vowel modeling. Experiment results show that HiFiSinger synthesizes high-fidelity singing voices with much higher quality: 0.32/0.44 MOS gain over 48kHz/24kHz baseline and 0.83 MOS gain over previous SVS systems.", "year": 2020, "venue": "arXiv.org", "authors": [ "Jiawei Chen", "Xu Tan", "Jian Luan", "Tao Qin", "Tie-Yan Liu" ], "externalIds": { "ArXiv": "2009.01776", "MAG": "3082910224", "DBLP": "journals/corr/abs-2009-01776", "CorpusId": 221470340 }, "url": "https://www.semanticscholar.org/paper/5e6e88267d7e6f2f15b778f3d2bba09ab7a6bdde", "referenceCount": 44, "citationCount": 84, "influentialCitationCount": 10, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Unsupervised Cross-Domain Singing Voice Conversion", "abstract": "We present a wav-to-wav generative model for the task of singing voice conversion from any identity. Our method utilizes both an acoustic model, trained for the task of automatic speech recognition, together with melody extracted features to drive a waveform-based generator. The proposed generative architecture is invariant to the speaker's identity and can be trained to generate target singers from unlabeled training data, using either speech or singing sources. The model is optimized in an end-to-end fashion without any manual supervision, such as lyrics, musical notes or parallel samples. The proposed approach is fully-convolutional and can generate audio in real-time. Experiments show that our method significantly outperforms the baseline methods while generating convincingly better audio samples than alternative attempts.", "year": 2020, "venue": "Interspeech", "authors": [ "Adam Polyak", "Lior Wolf", "Yossi Adi", "Yaniv Taigman" ], "externalIds": { "DBLP": "journals/corr/abs-2008-02830", "ArXiv": "2008.02830", "MAG": "3095948607", "DOI": "10.21437/interspeech.2020-1862", "CorpusId": 221083376 }, "url": "https://www.semanticscholar.org/paper/49eddc0b2c1c400a93960c0287484078497a07d4", "referenceCount": 53, "citationCount": 42, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "DeepSinger: Singing Voice Synthesis with Data Mined From the Web", "abstract": "In this paper, we develop DeepSinger, a multi-lingual multi-singer singing voice synthesis (SVS) system, which is built from scratch using singing training data mined from music websites. The pipeline of DeepSinger consists of several steps, including data crawling, singing and accompaniment separation, lyrics-to-singing alignment, data filtration, and singing modeling. Specifically, we design a lyrics-to-singing alignment model to automatically extract the duration of each phoneme in lyrics starting from coarse-grained sentence level to fine-grained phoneme level, and further design a multi-lingual multi-singer singing model based on a feed-forward Transformer to directly generate linear-spectrograms from lyrics, and synthesize voices using Griffn-Lim. DeepSinger has several advantages over previous SVS systems: 1) to the best of our knowledge, it is the first SVS system that directly mines training data from music websites, 2) the lyrics-to-singing alignment model further avoids any human efforts for alignment labeling and greatly reduces labeling cost, 3) the singing model based on a feed-forward Transformer is simple and efficient, by removing the complicated acoustic feature modeling in parametric synthesis and leveraging a reference encoder to capture the timbre of a singer from noisy singing data, and 4) it can synthesize singing voices in multiple languages and multiple singers. We evaluate DeepSinger on our mined singing dataset that consists of about 92 hours data from 89 singers on three languages (Chinese, Cantonese and English). The results demonstrate that with the singing data purely mined from the Web, DeepSinger can synthesize high-quality singing voices in terms of both pitch accuracy and voice naturalness. Our audio samples are shown in https://speechresearch.github.io/deepsinger/.", "year": 2020, "venue": "Knowledge Discovery and Data Mining", "authors": [ "Yi Ren", "Xu Tan", "Tao Qin", "Jian Luan", "Zhou Zhao", "Tie-Yan Liu" ], "externalIds": { "MAG": "3041199652", "DBLP": "conf/kdd/RenTQLZL20", "ArXiv": "2007.04590", "DOI": "10.1145/3394486.3403249", "CorpusId": 220424540 }, "url": "https://www.semanticscholar.org/paper/f8c73d6dcac8aeb0ac377135db69e6ca6fa13779", "referenceCount": 46, "citationCount": 64, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations", "abstract": "We show for the first time that learning powerful representations from speech audio alone followed by fine-tuning on transcribed speech can outperform the best semi-supervised methods while being conceptually simpler. wav2vec 2.0 masks the speech input in the latent space and solves a contrastive task defined over a quantization of the latent representations which are jointly learned. Experiments using all labeled data of Librispeech achieve 1.8/3.3 WER on the clean/other test sets. When lowering the amount of labeled data to one hour, wav2vec 2.0 outperforms the previous state of the art on the 100 hour subset while using 100 times less labeled data. Using just ten minutes of labeled data and pre-training on 53k hours of unlabeled data still achieves 4.8/8.2 WER. This demonstrates the feasibility of speech recognition with limited amounts of labeled data.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Alexei Baevski", "Henry Zhou", "Abdel-rahman Mohamed", "Michael Auli" ], "externalIds": { "ArXiv": "2006.11477", "DBLP": "conf/nips/BaevskiZMA20", "MAG": "3036601975", "CorpusId": 219966759 }, "url": "https://www.semanticscholar.org/paper/49a049dc85e2380dde80501a984878341dd8efdf", "referenceCount": 62, "citationCount": 4417, "influentialCitationCount": 1383, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "XiaoiceSing: A High-Quality and Integrated Singing Voice Synthesis System", "abstract": "This paper presents XiaoiceSing, a high-quality singing voice synthesis system which employs an integrated network for spectrum, F0 and duration modeling. We follow the main architecture of FastSpeech while proposing some singing-specific design: 1) Besides phoneme ID and position encoding, features from musical score (e.g.note pitch and length) are also added. 2) To attenuate off-key issues, we add a residual connection in F0 prediction. 3) In addition to the duration loss of each phoneme, the duration of all the phonemes in a musical note is accumulated to calculate the syllable duration loss for rhythm enhancement. Experiment results show that XiaoiceSing outperforms the baseline system of convolutional neural networks by 1.44 MOS on sound quality, 1.18 on pronunciation accuracy and 1.38 on naturalness respectively. In two A/B tests, the proposed F0 and duration modeling methods achieve 97.3% and 84.3% preference rate over baseline respectively, which demonstrates the overwhelming advantages of XiaoiceSing.", "year": 2020, "venue": "Interspeech", "authors": [ "Peiling Lu", "Jie Wu", "Jian Luan", "Xu Tan", "Li Zhou" ], "externalIds": { "ArXiv": "2006.06261", "DBLP": "conf/interspeech/LuWL0Z20", "MAG": "3097514409", "DOI": "10.21437/interspeech.2020-1410", "CorpusId": 219573847 }, "url": "https://www.semanticscholar.org/paper/ef264beb5ed8b995ada36c67f1d2d20e61e4c0f7", "referenceCount": 23, "citationCount": 83, "influentialCitationCount": 12, "isOpenAccess": true, "fieldsOfStudy": [ "Engineering", "Computer Science" ] }, { "title": "Pitchnet: Unsupervised Singing Voice Conversion with Pitch Adversarial Network", "abstract": "Singing voice conversion is to convert a singer’s voice to another one’s voice without changing singing content. Recent work shows that unsupervised singing voice conversion can be achieved with an autoencoder-based approach [1]. However, the converted singing voice can be easily out of key, showing that the existing approach cannot model the pitch information precisely. In this paper, we propose to advance the existing unsupervised singing voice conversion method proposed in [1] to achieve more accurate pitch translation and flexible pitch manipulation. Specifically, the proposed Pitch-Net added an adversarially trained pitch regression network to enforce the encoder network to learn pitch invariant phoneme representation, and a separate module to feed pitch extracted from the source audio to the decoder network. Our evaluation shows that the proposed method can greatly improve the quality of the converted singing voice (2.92 vs 3.75 in MOS). We also demonstrate that the pitch of converted singing can be easily controlled during generation by changing the levels of the extracted pitch before passing it to the decoder network.", "year": 2019, "venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", "authors": [ "Chen Deng", "Chengzhu Yu", "Heng Lu", "Chao Weng", "Dong Yu" ], "externalIds": { "DBLP": "journals/corr/abs-1912-01852", "MAG": "2994065875", "ArXiv": "1912.01852", "DOI": "10.1109/ICASSP40776.2020.9054199", "CorpusId": 208617331 }, "url": "https://www.semanticscholar.org/paper/e6daa0eca6297d1cef9185aad315f15484e2b159", "referenceCount": 20, "citationCount": 35, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "ASVspoof 2019: A large-scale public database of synthesized, converted and replayed speech", "abstract": null, "year": 2019, "venue": "Computer Speech and Language", "authors": [ "Xin Wang", "J. Yamagishi", "M. Todisco", "Héctor Delgado", "A. Nautsch", "N. Evans", "M. Sahidullah", "Ville Vestman", "T. Kinnunen", "Kong Aik LEE", "Lauri Juvela", "P. Alku", "Yu-Huai Peng", "Hsin-Te Hwang", "Yu Tsao", "Hsin-Min Wang", "Sébastien Le Maguer", "Markus Becker", "Zhenhua Ling" ], "externalIds": { "DBLP": "journals/csl/WangYTDNESVKLJA20", "MAG": "3026777299", "DOI": "10.1016/j.csl.2020.101114", "CorpusId": 211532840 }, "url": "https://www.semanticscholar.org/paper/0f79266f1c9f6fe0d0f8abcca138d19073c013da", "referenceCount": 65, "citationCount": 317, "influentialCitationCount": 22, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Similarity of Neural Network Representations Revisited", "abstract": "Recent work has sought to understand the behavior of neural networks by comparing representations between layers and between different trained models. We examine methods for comparing neural network representations based on canonical correlation analysis (CCA). We show that CCA belongs to a family of statistics for measuring multivariate similarity, but that neither CCA nor any other statistic that is invariant to invertible linear transformation can measure meaningful similarities between representations of higher dimension than the number of data points. We introduce a similarity index that measures the relationship between representational similarity matrices and does not suffer from this limitation. This similarity index is equivalent to centered kernel alignment (CKA) and is also closely connected to CCA. Unlike CCA, CKA can reliably identify correspondences between representations in networks trained from different initializations.", "year": 2019, "venue": "International Conference on Machine Learning", "authors": [ "Simon Kornblith", "Mohammad Norouzi", "Honglak Lee", "Geoffrey E. Hinton" ], "externalIds": { "MAG": "2949558627", "ArXiv": "1905.00414", "DBLP": "journals/corr/abs-1905-00414", "CorpusId": 141460329 }, "url": "https://www.semanticscholar.org/paper/726320cdbd04804ffa8f3a78c095bd1b55a2a695", "referenceCount": 61, "citationCount": 1121, "influentialCitationCount": 240, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Biology", "Mathematics" ] }, { "title": "X-Vectors: Robust DNN Embeddings for Speaker Recognition", "abstract": "In this paper, we use data augmentation to improve performance of deep neural network (DNN) embeddings for speaker recognition. The DNN, which is trained to discriminate between speakers, maps variable-length utterances to fixed-dimensional embeddings that we call x-vectors. Prior studies have found that embeddings leverage large-scale training datasets better than i-vectors. However, it can be challenging to collect substantial quantities of labeled data for training. We use data augmentation, consisting of added noise and reverberation, as an inexpensive method to multiply the amount of training data and improve robustness. The x-vectors are compared with i-vector baselines on Speakers in the Wild and NIST SRE 2016 Cantonese. We find that while augmentation is beneficial in the PLDA classifier, it is not helpful in the i-vector extractor. However, the x-vector DNN effectively exploits data augmentation, due to its supervised training. As a result, the x-vectors achieve superior performance on the evaluation datasets.", "year": 2018, "venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", "authors": [ "David Snyder", "D. Garcia-Romero", "Gregory Sell", "Daniel Povey", "S. Khudanpur" ], "externalIds": { "DBLP": "conf/icassp/SnyderGSPK18", "MAG": "2890964092", "DOI": "10.1109/ICASSP.2018.8461375", "CorpusId": 46954166 }, "url": "https://www.semanticscholar.org/paper/389cd9824428be98a710f5f4de67121a70c15fd3", "referenceCount": 30, "citationCount": 2444, "influentialCitationCount": 299, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "“An a.i. hit of fake ‘drake’ and ‘the weeknd’ rattles the music world,”", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "FineMolTex: Towards Fine-grained Molecular Graph-Text Pre-training": { "paper_title": "FineMolTex: Towards Fine-grained Molecular Graph-Text Pre-training", "arxiv_id": "2409.14106v1", "keyword": "representation learning", "authors": [ "Yibo Li", "Yuan Fang", "Mengmei Zhang", "Chuan Shi" ], "references": [ { "title": "MolTC: Towards Molecular Relational Modeling In Language Models", "abstract": "Molecular Relational Learning (MRL), aiming to understand interactions between molecular pairs, plays a pivotal role in advancing biochemical research. Recently, the adoption of large language models (LLMs), known for their vast knowledge repositories and advanced logical inference capabilities, has emerged as a promising way for efficient and effective MRL. Despite their potential, these methods predominantly rely on the textual data, thus not fully harnessing the wealth of structural information inherent in molecular graphs. Moreover, the absence of a unified framework exacerbates the issue of information underutilization, as it hinders the sharing of interaction mechanism learned across diverse datasets. To address these challenges, this work proposes a novel LLM-based multi-modal framework for Molecular inTeraction prediction following Chain-of-Thought (CoT) theory, termed MolTC, which effectively integrate graphical information of two molecules in pair. To train MolTC efficiently, we introduce a Multi-hierarchical CoT concept to refine its training paradigm, and conduct a comprehensive Molecular Interactive Instructions dataset for the development of biochemical LLMs involving MRL. Our experiments, conducted across various datasets involving over 4,000,000 molecular pairs, exhibit the superiority of our method over current GNN and LLM-based baselines. Code is available at https://github.com/MangoKiller/MolTC.", "year": 2024, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Junfeng Fang", "Shuai Zhang", "Chang Wu", "Zhengyi Yang", "Zhiyuan Liu", "Sihang Li", "Kun Wang", "Wenjie Du", "Xiang Wang" ], "externalIds": { "DBLP": "conf/acl/FangZW00LWD024", "ArXiv": "2402.03781", "DOI": "10.48550/arXiv.2402.03781", "CorpusId": 267499784 }, "url": "https://www.semanticscholar.org/paper/81448c69a0b900f3721596c635c849987eec1a4b", "referenceCount": 78, "citationCount": 10, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Biology" ] }, { "title": "Towards 3D Molecule-Text Interpretation in Language Models", "abstract": "Language Models (LMs) have greatly influenced diverse domains. However, their inherent limitation in comprehending 3D molecular structures has considerably constrained their potential in the biomolecular domain. To bridge this gap, we focus on 3D molecule-text interpretation, and propose 3D-MoLM: 3D-Molecular Language Modeling. Specifically, 3D-MoLM enables an LM to interpret and analyze 3D molecules by equipping the LM with a 3D molecular encoder. This integration is achieved by a 3D molecule-text projector, bridging the 3D molecular encoder's representation space and the LM's input space. Moreover, to enhance 3D-MoLM's ability of cross-modal molecular understanding and instruction following, we meticulously curated a 3D molecule-centric instruction tuning dataset -- 3D-MoIT. Through 3D molecule-text alignment and 3D molecule-centric instruction tuning, 3D-MoLM establishes an integration of 3D molecular encoder and LM. It significantly surpasses existing baselines on downstream tasks, including molecule-text retrieval, molecule captioning, and more challenging open-text molecular QA tasks, especially focusing on 3D-dependent properties. We release our codes and datasets at https://github.com/lsh0520/3D-MoLM.", "year": 2024, "venue": "International Conference on Learning Representations", "authors": [ "Sihang Li", "Zhiyuan Liu", "Yancheng Luo", "Xiang Wang", "Xiangnan He", "Kenji Kawaguchi", "Tat-Seng Chua", "Qi Tian" ], "externalIds": { "ArXiv": "2401.13923", "DBLP": "journals/corr/abs-2401-13923", "DOI": "10.48550/arXiv.2401.13923", "CorpusId": 267211722 }, "url": "https://www.semanticscholar.org/paper/f58a49ea99e9a2754dd050927b9d830e3743f844", "referenceCount": 58, "citationCount": 20, "influentialCitationCount": 7, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Biology" ] }, { "title": "CoDet: Co-Occurrence Guided Region-Word Alignment for Open-Vocabulary Object Detection", "abstract": "Deriving reliable region-word alignment from image-text pairs is critical to learn object-level vision-language representations for open-vocabulary object detection. Existing methods typically rely on pre-trained or self-trained vision-language models for alignment, which are prone to limitations in localization accuracy or generalization capabilities. In this paper, we propose CoDet, a novel approach that overcomes the reliance on pre-aligned vision-language space by reformulating region-word alignment as a co-occurring object discovery problem. Intuitively, by grouping images that mention a shared concept in their captions, objects corresponding to the shared concept shall exhibit high co-occurrence among the group. CoDet then leverages visual similarities to discover the co-occurring objects and align them with the shared concept. Extensive experiments demonstrate that CoDet has superior performances and compelling scalability in open-vocabulary detection, e.g., by scaling up the visual backbone, CoDet achieves 37.0 $\\text{AP}^m_{novel}$ and 44.7 $\\text{AP}^m_{all}$ on OV-LVIS, surpassing the previous SoTA by 4.2 $\\text{AP}^m_{novel}$ and 9.8 $\\text{AP}^m_{all}$. Code is available at https://github.com/CVMI-Lab/CoDet.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Chuofan Ma", "Yi Jiang", "Xin Wen", "Zehuan Yuan", "Xiaojuan Qi" ], "externalIds": { "ArXiv": "2310.16667", "DBLP": "conf/nips/MaJWYQ23", "DOI": "10.48550/arXiv.2310.16667", "CorpusId": 264451894 }, "url": "https://www.semanticscholar.org/paper/5e112d07da6e6b13d1bd447e3151391aa6ffb810", "referenceCount": 64, "citationCount": 26, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ReLM: Leveraging Language Models for Enhanced Chemical Reaction Prediction", "abstract": "Predicting chemical reactions, a fundamental challenge in chemistry, involves forecasting the resulting products from a given reaction process. Conventional techniques, notably those employing Graph Neural Networks (GNNs), are often limited by insufficient training data and their inability to utilize textual information, undermining their applicability in real-world applications. In this work, we propose ReLM, a novel framework that leverages the chemical knowledge encoded in language models (LMs) to assist GNNs, thereby enhancing the accuracy of real-world chemical reaction predictions. To further enhance the model's robustness and interpretability, we incorporate the confidence score strategy, enabling the LMs to self-assess the reliability of their predictions. Our experimental results demonstrate that ReLM improves the performance of state-of-the-art GNN-based methods across various chemical reaction datasets, especially in out-of-distribution settings. Codes are available at https://github.com/syr-cn/ReLM.", "year": 2023, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Yaorui Shi", "An Zhang", "Enzhi Zhang", "Zhiyuan Liu", "Xiang Wang" ], "externalIds": { "ArXiv": "2310.13590", "DBLP": "conf/emnlp/Shi0ZL023", "DOI": "10.48550/arXiv.2310.13590", "CorpusId": 264406019 }, "url": "https://www.semanticscholar.org/paper/e391d266b0d43475567f59efeaeabc884a48abd0", "referenceCount": 28, "citationCount": 18, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MolCA: Molecular Graph-Language Modeling with Cross-Modal Projector and Uni-Modal Adapter", "abstract": "Language Models (LMs) have demonstrated impressive molecule understanding ability on various 1D text-related tasks. However, they inherently lack 2D graph perception - a critical ability of human professionals in comprehending molecules' topological structures. To bridge this gap, we propose MolCA: Molecular Graph-Language Modeling with Cross-Modal Projector and Uni-Modal Adapter. MolCA enables an LM (e.g., Galactica) to understand both text- and graph-based molecular contents via the cross-modal projector. Specifically, the cross-modal projector is implemented as a Q-Former to connect a graph encoder's representation space and an LM's text space. Further, MolCA employs a uni-modal adapter (i.e., LoRA) for the LM's efficient adaptation to downstream tasks. Unlike previous studies that couple an LM with a graph encoder via cross-modal contrastive learning, MolCA retains the LM's ability of open-ended text generation and augments it with 2D graph information. To showcase its effectiveness, we extensively benchmark MolCA on tasks of molecule captioning, IUPAC name prediction, and molecule-text retrieval, on which MolCA significantly outperforms the baselines. Our codes and checkpoints can be found at https://github.com/acharkq/MolCA.", "year": 2023, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Zhiyuan Liu", "Sihang Li", "Yancheng Luo", "Hao Fei", "Yixin Cao", "Kenji Kawaguchi", "Xiang Wang", "Tat-Seng Chua" ], "externalIds": { "ArXiv": "2310.12798", "DBLP": "journals/corr/abs-2310-12798", "DOI": "10.48550/arXiv.2310.12798", "CorpusId": 264306303 }, "url": "https://www.semanticscholar.org/paper/25738c43c0c4788d803981eaf5d397691aba0958", "referenceCount": 51, "citationCount": 43, "influentialCitationCount": 10, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "GIMLET: A Unified Graph-Text Model for Instruction-Based Molecule Zero-Shot Learning", "abstract": "Molecule property prediction has gained significant attention in recent years. The main bottleneck is the label insufficiency caused by expensive lab experiments. In order to alleviate this issue and to better leverage textual knowledge for tasks, this study investigates the feasibility of employing natural language instructions to accomplish molecule-related tasks in a zero-shot setting. We discover that existing molecule-text models perform poorly in this setting due to inadequate treatment of instructions and limited capacity for graphs. To overcome these issues, we propose GIMLET, which unifies language models for both graph and text data. By adopting generalized position embedding, our model is extended to encode both graph structures and instruction text without additional graph encoding modules. GIMLET also decouples encoding of the graph from tasks instructions in the attention mechanism, enhancing the generalization of graph features across novel tasks. We construct a dataset consisting of more than two thousand molecule tasks with corresponding instructions derived from task descriptions. We pretrain GIMLET on the molecule tasks along with instructions, enabling the model to transfer effectively to a broad range of tasks. Experimental results demonstrate that GIMLET significantly outperforms molecule-text baselines in instruction-based zero-shot learning, even achieving closed results to supervised GNN models on tasks such as toxcast and muv.1", "year": 2023, "venue": "bioRxiv", "authors": [ "Haiteng Zhao", "Shengchao Liu", "Chang Ma", "Hannan Xu", "Jie Fu", "Zhihong Deng", "Lingpeng Kong", "Qi Liu" ], "externalIds": { "ArXiv": "2306.13089", "DBLP": "conf/nips/ZhaoLMXFDKL23", "DOI": "10.1101/2023.05.30.542904", "CorpusId": 259077070 }, "url": "https://www.semanticscholar.org/paper/119a3ed0898499fce0ce6af6958d566d82390ba5", "referenceCount": 89, "citationCount": 39, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Biology", "Computer Science" ] }, { "title": "Region-Aware Pretraining for Open-Vocabulary Object Detection with Vision Transformers", "abstract": "We present Region-aware Open-vocabulary Vision Transformers (RO-ViT) - a contrastive image-text pretraining recipe to bridge the gap between image-level pretraining and open-vocabulary object detection. At the pretraining phase, we propose to randomly crop and resize regions of positional embeddings instead of using the whole image positional embeddings. This better matches the use of positional embeddings at region-level in the detection finetuning phase. In addition, we replace the common softmax cross entropy loss in contrastive learning with focal loss to better learn the informative yet difficult examples. Finally, we leverage recent advances in novel object proposals to improve open-vocabulary detection finetuning. We evaluate our full model on the LVIS and COCO open-vocabulary detection benchmarks and zero-shot transfer. RO-ViT achieves a state-of-the-art 32.1 APr on LVIS, surpassing the best existing approach by +5.8 points in addition to competitive zero-shot transfer detection. Surprisingly, RO-ViT improves the image-level representation as well and achieves the state of the art on 9 out of 12 metrics on COCO and Flickr image-text retrieval benchmarks, outperforming competitive approaches with larger models.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Dahun Kim", "A. Angelova", "Weicheng Kuo" ], "externalIds": { "ArXiv": "2305.07011", "DBLP": "journals/corr/abs-2305-07011", "DOI": "10.1109/CVPR52729.2023.01072", "CorpusId": 258615536 }, "url": "https://www.semanticscholar.org/paper/5faee4af70f65e609eafe1f23f26593423f03750", "referenceCount": 70, "citationCount": 48, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DetCLIPv2: Scalable Open-Vocabulary Object Detection Pre-training via Word-Region Alignment", "abstract": "This paper presents DetCLIPv2, an efficient and scalable training framework that incorporates large-scale imagetext pairs to achieve open-vocabulary object detection (OVD). Unlike previous OVD frameworks that typically rely on a pre-trained vision-language model (e.g., CLIP) or exploit image-text pairs via a pseudo labeling process, DetCLIPv2 directly learns the fine-grained word-region alignment from massive image-text pairs in an end-to-end manner. To accomplish this, we employ a maximum word-region similarity between region proposals and textual words to guide the contrastive objective. To enable the model to gain localization capability while learning broad concepts, DetCLIPv2 is trained with a hybrid supervision from detection, grounding and image-text pair data under a unified data formulation. By jointly training with an alternating scheme and adopting low-resolution input for image-text pairs, DetCLIPv2 exploits image-text pair data efficiently and effectively: DetCLIPv2 utilizes 13 × more image-text pairs than DetCLIP with a similar training time and improves performance. With 13M image-text pairs for pre-training, DetCLIPv2 demonstrates superior open-vocabulary detection performance, e.g., DetCLIPv2 with Swin-T backbone achieves 40.4% zero-shot AP on the LVIS benchmark, which outperforms previous works GLIP/GLIPv2/DetCLIP by 14.4/11.4/4.5% AP, respectively, and even beats its fully-supervised counterpart by a large margin.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Lewei Yao", "Jianhua Han", "Xiaodan Liang", "Danqian Xu", "W. Zhang", "Zhenguo Li", "Hang Xu" ], "externalIds": { "ArXiv": "2304.04514", "DBLP": "journals/corr/abs-2304-04514", "DOI": "10.1109/CVPR52729.2023.02250", "CorpusId": 258049178 }, "url": "https://www.semanticscholar.org/paper/2394feb97b9ec16f6eb61907b40764bd03672971", "referenceCount": 67, "citationCount": 51, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "E(3) Equivariant Graph Neural Networks for Particle-Based Fluid Mechanics", "abstract": "We contribute to the vastly growing field of machine learning for engineering systems by demonstrating that equivariant graph neural networks have the potential to learn more accurate dynamic-interaction models than their non-equivariant counterparts. We benchmark two well-studied fluid flow systems, namely the 3D decaying Taylor-Green vortex and the 3D reverse Poiseuille flow, and compare equivariant graph neural networks to their non-equivariant counterparts on different performance measures, such as kinetic energy or Sinkhorn distance. Such measures are typically used in engineering to validate numerical solvers. Our main findings are that while being rather slow to train and evaluate, equivariant models learn more physically accurate interactions. This indicates opportunities for future work towards coarse-grained models for turbulent flows, and generalization across system dynamics and parameters.", "year": 2023, "venue": "arXiv.org", "authors": [ "Artur P. Toshev", "G. Galletti", "Johannes Brandstetter", "S. Adami", "N. Adams" ], "externalIds": { "DBLP": "journals/corr/abs-2304-00150", "ArXiv": "2304.00150", "DOI": "10.48550/arXiv.2304.00150", "CorpusId": 259300896 }, "url": "https://www.semanticscholar.org/paper/f9de04c657212334f559c50a21005f9a3fb425ee", "referenceCount": 22, "citationCount": 4, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Physics" ] }, { "title": "Aligning Bag of Regions for Open-Vocabulary Object Detection", "abstract": "Pre-trained vision-language models (VLMs) learn to align vision and language representations on large-scale datasets, where each image-text pair usually contains a bag of semantic concepts. However, existing open-vocabulary object detectors only align region embeddings individually with the corresponding features extracted from the VLMs. Such a design leaves the compositional structure of semantic concepts in a scene under-exploited, although the structure may be implicitly learned by the VLMs. In this work, we propose to align the embedding of bag of regions beyond individual regions. The proposed method groups contextually interrelated regions as a bag. The embeddings of regions in a bag are treated as embeddings of words in a sentence, and they are sent to the text encoder of a VLM to obtain the bag-of-regions embedding, which is learned to be aligned to the corresponding features extracted by a frozen VLM. Applied to the commonly used Faster R-CNN, our approach surpasses the previous best results by 4.6 box AP50 and 2.8 mask AP on novel categories of open-vocabulary COCO and LVIS benchmarks, respectively. Code and models are available at https://github.com/wusize/ovdet.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Size Wu", "Wenwei Zhang", "Sheng Jin", "Wentao Liu", "Chen Change Loy" ], "externalIds": { "DBLP": "journals/corr/abs-2302-13996", "ArXiv": "2302.13996", "DOI": "10.1109/CVPR52729.2023.01464", "CorpusId": 257219741 }, "url": "https://www.semanticscholar.org/paper/bce29cc829fab288c41ae5678e1bb5b95bf218d4", "referenceCount": 62, "citationCount": 70, "influentialCitationCount": 9, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Multi-modal Molecule Structure-text Model for Text-based Retrieval and Editing", "abstract": "There is increasing adoption of artificial intelligence in drug discovery. However, existing studies use machine learning to mainly utilize the chemical structures of molecules but ignore the vast textual knowledge available in chemistry. Incorporating textual knowledge enables us to realize new drug design objectives, adapt to text-based instructions and predict complex biological activities. Here we present a multi-modal molecule structure-text model, MoleculeSTM, by jointly learning molecules' chemical structures and textual descriptions via a contrastive learning strategy. To train MoleculeSTM, we construct a large multi-modal dataset, namely, PubChemSTM, with over 280,000 chemical structure-text pairs. To demonstrate the effectiveness and utility of MoleculeSTM, we design two challenging zero-shot tasks based on text instructions, including structure-text retrieval and molecule editing. MoleculeSTM has two main properties: open vocabulary and compositionality via natural language. In experiments, MoleculeSTM obtains the state-of-the-art generalization ability to novel biochemical concepts across various benchmarks.", "year": 2022, "venue": "Nat. Mac. Intell.", "authors": [ "Shengchao Liu", "Weili Nie", "Chengpeng Wang", "Jiarui Lu", "Zhuoran Qiao", "Ling Liu", "Jian Tang", "Chaowei Xiao", "Anima Anandkumar" ], "externalIds": { "DBLP": "journals/natmi/LiuNWLQLTXA23", "ArXiv": "2212.10789", "DOI": "10.48550/arXiv.2212.10789", "CorpusId": 254926709 }, "url": "https://www.semanticscholar.org/paper/958bb3831589246fe5b6b58cf99e3b65c58d027f", "referenceCount": 84, "citationCount": 93, "influentialCitationCount": 17, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Biology", "Mathematics" ] }, { "title": "Learning Object-Language Alignments for Open-Vocabulary Object Detection", "abstract": "Existing object detection methods are bounded in a fixed-set vocabulary by costly labeled data. When dealing with novel categories, the model has to be retrained with more bounding box annotations. Natural language supervision is an attractive alternative for its annotation-free attributes and broader object concepts. However, learning open-vocabulary object detection from language is challenging since image-text pairs do not contain fine-grained object-language alignments. Previous solutions rely on either expensive grounding annotations or distilling classification-oriented vision models. In this paper, we propose a novel open-vocabulary object detection framework directly learning from image-text pair data. We formulate object-language alignment as a set matching problem between a set of image region features and a set of word embeddings. It enables us to train an open-vocabulary object detector on image-text pairs in a much simple and effective way. Extensive experiments on two benchmark datasets, COCO and LVIS, demonstrate our superior performance over the competing approaches on novel categories, e.g. achieving 32.0% mAP on COCO and 21.7% mask mAP on LVIS. Code is available at: https://github.com/clin1223/VLDet.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Chuang Lin", "Pei Sun", "Yi Jiang", "Ping Luo", "Lizhen Qu", "Gholamreza Haffari", "Zehuan Yuan", "Jianfei Cai" ], "externalIds": { "DBLP": "journals/corr/abs-2211-14843", "ArXiv": "2211.14843", "DOI": "10.48550/arXiv.2211.14843", "CorpusId": 254043945 }, "url": "https://www.semanticscholar.org/paper/b2eb28dd5e2340a5e9de8ae82feaca7b6a265b4e", "referenceCount": 63, "citationCount": 66, "influentialCitationCount": 14, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "F-VLM: Open-Vocabulary Object Detection upon Frozen Vision and Language Models", "abstract": "We present F-VLM, a simple open-vocabulary object detection method built upon Frozen Vision and Language Models. F-VLM simplifies the current multi-stage training pipeline by eliminating the need for knowledge distillation or detection-tailored pretraining. Surprisingly, we observe that a frozen VLM: 1) retains the locality-sensitive features necessary for detection, and 2) is a strong region classifier. We finetune only the detector head and combine the detector and VLM outputs for each region at inference time. F-VLM shows compelling scaling behavior and achieves +6.5 mask AP improvement over the previous state of the art on novel categories of LVIS open-vocabulary detection benchmark. In addition, we demonstrate very competitive results on COCO open-vocabulary detection benchmark and cross-dataset transfer detection, in addition to significant training speed-up and compute savings. Code will be released at the https://sites.google.com/view/f-vlm/home", "year": 2022, "venue": "arXiv.org", "authors": [ "Weicheng Kuo", "Yin Cui", "Xiuye Gu", "A. Piergiovanni", "A. Angelova" ], "externalIds": { "ArXiv": "2209.15639", "DBLP": "journals/corr/abs-2209-15639", "DOI": "10.48550/arXiv.2209.15639", "CorpusId": 252668578 }, "url": "https://www.semanticscholar.org/paper/83aee45f8afc470f5dbaabc05ccca9304599baf2", "referenceCount": 59, "citationCount": 95, "influentialCitationCount": 13, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Molecular Multimodal Foundation Model Associating Molecule Graphs with Natural Language", "abstract": "Although artificial intelligence (AI) has made significant progress in understanding molecules in a wide range of fields, existing models generally acquire the single cognitive ability from the single molecular modality. Since the hierarchy of molecular knowledge is profound, even humans learn from different modalities including both intuitive diagrams and professional texts to assist their understanding. Inspired by this, we propose a molecular multimodal foundation model which is pretrained from molecular graphs and their semantically related textual data (crawled from published Scientific Citation Index papers) via contrastive learning. This AI model represents a critical attempt that directly bridges molecular graphs and natural language. Importantly, through capturing the specific and complementary information of the two modalities, our proposed model can better grasp molecular expertise. Experimental results show that our model not only exhibits promising performance in cross-modal tasks such as cross-modal retrieval and molecule caption, but also enhances molecular property prediction and possesses capability to generate meaningful molecular graphs from natural language descriptions. We believe that our model would have a broad impact on AI-empowered fields across disciplines such as biology, chemistry, materials, environment, and medicine, among others.", "year": 2022, "venue": "arXiv.org", "authors": [ "Bing Su", "Dazhao Du", "Zhao-Qing Yang", "Yujie Zhou", "Jiangmeng Li", "Anyi Rao", "Haoran Sun", "Zhiwu Lu", "Ji-rong Wen" ], "externalIds": { "ArXiv": "2209.05481", "DBLP": "journals/corr/abs-2209-05481", "DOI": "10.48550/arXiv.2209.05481", "CorpusId": 252212175 }, "url": "https://www.semanticscholar.org/paper/1c7a4e8d9f4fcf19a5d1caa078c66ca39cb75dd2", "referenceCount": 79, "citationCount": 84, "influentialCitationCount": 25, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Open-Vocabulary DETR with Conditional Matching", "abstract": null, "year": 2022, "venue": "European Conference on Computer Vision", "authors": [ "Yuhang Zang", "Wei Li", "Kaiyang Zhou", "Chen Huang", "Chen Change Loy" ], "externalIds": { "ArXiv": "2203.11876", "DBLP": "conf/eccv/ZangLZHL22", "DOI": "10.1007/978-3-031-20077-9_7", "CorpusId": 247597014 }, "url": "https://www.semanticscholar.org/paper/403ad5d6e78fcf29f1ac526fbc9ff6cbfea555eb", "referenceCount": 43, "citationCount": 147, "influentialCitationCount": 22, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A deep-learning system bridging molecule structure and biomedical text with comprehension comparable to human professionals", "abstract": null, "year": 2022, "venue": "Nature Communications", "authors": [ "Zheni Zeng", "Yuan Yao", "Zhiyuan Liu", "Maosong Sun" ], "externalIds": { "PubMedCentral": "8844428", "DOI": "10.1038/s41467-022-28494-3", "CorpusId": 246815222, "PubMed": "35165275" }, "url": "https://www.semanticscholar.org/paper/6958612fea7f220757b4165b8e12d4b62b4baa80", "referenceCount": 75, "citationCount": 91, "influentialCitationCount": 20, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Deep Molecular Representation Learning via Fusing Physical and Chemical Information", "abstract": "Molecular representation learning is the first yet vital step in combining deep learning and molecular science. To push the boundaries of molecular representation learning, we present PhysChem, a novel neural architecture that learns molecular representations via fusing physical and chemical information of molecules. PhysChem is composed of a physicist network (PhysNet) and a chemist network (ChemNet). PhysNet is a neural physical engine that learns molecular conformations through simulating molecular dynamics with parameterized forces;ChemNet implements geometry-aware deep message-passing to learn chemical / biomedical properties of molecules. Two networks specialize in their own tasks and cooperate by providing expertise to each other. By fusing physical and chemical information, PhysChem achieved state-of-the-art performances on MoleculeNet, a standard molecular machine learning benchmark. The effectiveness of PhysChem was further corroborated on cutting-edge datasets of SARS-CoV-2.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Shuwen Yang", "Ziyao Li", "Guojie Song", "Lingsheng Cai" ], "externalIds": { "DBLP": "conf/nips/YangLSC21", "ArXiv": "2112.04624", "CorpusId": 245005761 }, "url": "https://www.semanticscholar.org/paper/028969f5b5135797ba0690b46aaa3042a7720f6a", "referenceCount": 60, "citationCount": 22, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Motif-based Graph Self-Supervised Learning for Molecular Property Prediction", "abstract": "Predicting molecular properties with data-driven methods has drawn much attention in recent years. Particularly, Graph Neural Networks (GNNs) have demonstrated remarkable success in various molecular generation and prediction tasks. In cases where labeled data is scarce, GNNs can be pre-trained on unlabeled molecular data to first learn the general semantic and structural information before being fine-tuned for specific tasks. However, most existing self-supervised pre-training frameworks for GNNs only focus on node-level or graph-level tasks. These approaches cannot capture the rich information in subgraphs or graph motifs. For example, functional groups (frequently-occurred subgraphs in molecular graphs) often carry indicative information about the molecular properties. To bridge this gap, we propose Motif-based Graph Self-supervised Learning (MGSSL) by introducing a novel self-supervised motif generation framework for GNNs. First, for motif extraction from molecular graphs, we design a molecule fragmentation method that leverages a retrosynthesis-based algorithm BRICS and additional rules for controlling the size of motif vocabulary. Second, we design a general motif-based generative pre-training framework in which GNNs are asked to make topological and label predictions. This generative framework can be implemented in two different ways, i.e., breadth-first or depth-first. Finally, to take the multi-scale information in molecular graphs into consideration, we introduce a multi-level self-supervised pre-training. Extensive experiments on various downstream benchmark tasks show that our methods outperform all state-of-the-art baselines.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Zaixin Zhang", "Qi Liu", "Hao Wang", "Chengqiang Lu", "Chee-Kong Lee" ], "externalIds": { "ArXiv": "2110.00987", "DBLP": "conf/nips/ZhangLWLL21", "CorpusId": 238259534 }, "url": "https://www.semanticscholar.org/paper/2ced2ac19a88439b52e519d2e6ce44cccf08e191", "referenceCount": 55, "citationCount": 190, "influentialCitationCount": 18, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Biology" ] }, { "title": "Chemical-Reaction-Aware Molecule Representation Learning", "abstract": "Molecule representation learning (MRL) methods aim to embed molecules into a real vector space. However, existing SMILES-based (Simplified Molecular-Input Line-Entry System) or GNN-based (Graph Neural Networks) MRL methods either take SMILES strings as input that have difficulty in encoding molecule structure information, or over-emphasize the importance of GNN architectures but neglect their generalization ability. Here we propose using chemical reactions to assist learning molecule representation. The key idea of our approach is to preserve the equivalence of molecules with respect to chemical reactions in the embedding space, i.e., forcing the sum of reactant embeddings and the sum of product embeddings to be equal for each chemical equation. This constraint is proven effective to 1) keep the embedding space well-organized and 2) improve the generalization ability of molecule embeddings. Moreover, our model can use any GNN as the molecule encoder and is thus agnostic to GNN architectures. Experimental results demonstrate that our method achieves state-of-the-art performance in a variety of downstream tasks, e.g., 17.4% absolute Hit@1 gain in chemical reaction prediction, 2.3% absolute AUC gain in molecule property prediction, and 18.5% relative RMSE gain in graph-edit-distance prediction, respectively, over the best baseline method. The code is available at https://github.com/hwwang55/MolR.", "year": 2021, "venue": "International Conference on Learning Representations", "authors": [ "Hongwei Wang", "Weijian Li", "Xiaomeng Jin", "Kyunghyun Cho", "Heng Ji", "Jiawei Han", "M. Burke" ], "externalIds": { "ArXiv": "2109.09888", "DBLP": "journals/corr/abs-2109-09888", "CorpusId": 237581512 }, "url": "https://www.semanticscholar.org/paper/309049d5003f0876a759c983fce4edf510f1b006", "referenceCount": 57, "citationCount": 49, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Physics", "Biology" ] }, { "title": "Geometric deep learning on molecular representations", "abstract": null, "year": 2021, "venue": "Nature Machine Intelligence", "authors": [ "Kenneth Atz", "F. Grisoni", "G. Schneider" ], "externalIds": { "ArXiv": "2107.12375", "DBLP": "journals/corr/abs-2107-12375", "DOI": "10.1038/s42256-021-00418-8", "CorpusId": 236447886 }, "url": "https://www.semanticscholar.org/paper/41c3624512a6b249444b374e5767c108fb240650", "referenceCount": 208, "citationCount": 215, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Physics", "Biology" ] }, { "title": "Property-Aware Relation Networks for Few-Shot Molecular Property Prediction", "abstract": "Molecular property prediction plays a fundamental role in AI-aided drug discovery to identify candidate molecules, which is also essentially a few-shot problem due to lack of labeled data. In this paper, we propose Property-Aware Relation networks (PAR) to handle this problem. We first introduce a property-aware molecular encoder to transform the generic molecular embeddings to property-aware ones. Then, we design a query-dependent relation graph learning module to estimate molecular relation graph and refine molecular embeddings w.r.t. the target property. Thus, the facts that both property-related information and relationships among molecules change across different properties are utilized to better learn and propagate molecular embeddings. Generally, PAR can be regarded as a combination of metric-based and optimization-based few-shot learning method. We further extend PAR to Transferable PAR (T-PAR) to handle the distribution shift, which is common in drug discovery. The keys are joint sampling and relation graph learning schemes, which simultaneously learn molecular embeddings from both source and target domains. Extensive results on benchmark datasets show that PAR and T-PAR consistently outperform existing methods on few-shot and transferable few-shot molecular property prediction tasks, respectively. Besides, ablation and case studies are conducted to validate the rationality of our designs in PAR and T-PAR.", "year": 2021, "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", "authors": [ "Yaqing Wang", "Abulikemu Abuduweili", "Quanming Yao", "D. Dou" ], "externalIds": { "ArXiv": "2107.07994", "DBLP": "conf/nips/WangAYD21", "DOI": "10.1109/TPAMI.2024.3368090", "CorpusId": 240420239, "PubMed": "38381636" }, "url": "https://www.semanticscholar.org/paper/fe3187ef8ad0576caf8982f049e5906f92f67230", "referenceCount": 94, "citationCount": 55, "influentialCitationCount": 10, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "PubChem in 2021: new data content and improved web interfaces", "abstract": "Abstract PubChem (https://pubchem.ncbi.nlm.nih.gov) is a popular chemical information resource that serves the scientific community as well as the general public, with millions of unique users per month. In the past two years, PubChem made substantial improvements. Data from more than 100 new data sources were added to PubChem, including chemical-literature links from Thieme Chemistry, chemical and physical property links from SpringerMaterials, and patent links from the World Intellectual Properties Organization (WIPO). PubChem's homepage and individual record pages were updated to help users find desired information faster. This update involved a data model change for the data objects used by these pages as well as by programmatic users. Several new services were introduced, including the PubChem Periodic Table and Element pages, Pathway pages, and Knowledge panels. Additionally, in response to the coronavirus disease 2019 (COVID-19) outbreak, PubChem created a special data collection that contains PubChem data related to COVID-19 and the severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2).", "year": 2020, "venue": "Nucleic Acids Res.", "authors": [ "Sunghwan Kim", "Jie Chen", "Tiejun Cheng", "A. Gindulyte", "Jia He", "Siqian He", "Qingliang Li", "Benjamin A. Shoemaker", "P. Thiessen", "Bo Yu", "L. Zaslavsky", "Jian Zhang", "Evan E. Bolton" ], "externalIds": { "PubMedCentral": "7778930", "DBLP": "journals/nar/00020CGHHLSTYZ021", "MAG": "3097145107", "DOI": "10.1093/nar/gkaa971", "CorpusId": 226261325, "PubMed": "33151290" }, "url": "https://www.semanticscholar.org/paper/3ef5f761b1047b4c20533b84f76f4c97ecda03ff", "referenceCount": 65, "citationCount": 2260, "influentialCitationCount": 134, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science", "Biology" ] }, { "title": "Graph Contrastive Learning with Augmentations", "abstract": "Generalizable, transferrable, and robust representation learning on graph-structured data remains a challenge for current graph neural networks (GNNs). Unlike what has been developed for convolutional neural networks (CNNs) for image data, self-supervised learning and pre-training are less explored for GNNs. In this paper, we propose a graph contrastive learning (GraphCL) framework for learning unsupervised representations of graph data. We first design four types of graph augmentations to incorporate various priors. We then systematically study the impact of various combinations of graph augmentations on multiple datasets, in four different settings: semi-supervised, unsupervised, and transfer learning as well as adversarial attacks. The results show that, even without tuning augmentation extents nor using sophisticated GNN architectures, our GraphCL framework can produce graph representations of similar or better generalizability, transferrability, and robustness compared to state-of-the-art methods. We also investigate the impact of parameterized graph augmentation extents and patterns, and observe further performance gains in preliminary experiments. Our codes are available at https://github.com/Shen-Lab/GraphCL.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Yuning You", "Tianlong Chen", "Yongduo Sui", "Ting Chen", "Zhangyang Wang", "Yang Shen" ], "externalIds": { "DBLP": "conf/nips/YouCSCWS20", "MAG": "3102419180", "ArXiv": "2010.13902", "CorpusId": 225076220 }, "url": "https://www.semanticscholar.org/paper/2a9fbca9dc6badbeedc591ad829c5c6e0f950fd6", "referenceCount": 68, "citationCount": 1565, "influentialCitationCount": 332, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Self-Supervised Graph Transformer on Large-Scale Molecular Data", "abstract": "How to obtain informative representations of molecules is a crucial prerequisite in AI-driven drug design and discovery. Recent researches abstract molecules as graphs and employ Graph Neural Networks (GNNs) for molecular representation learning. Nevertheless, two issues impede the usage of GNNs in real scenarios: (1) insufficient labeled molecules for supervised training; (2) poor generalization capability to new-synthesized molecules. To address them both, we propose a novel framework, GROVER, which stands for Graph Representation frOm self-superVised mEssage passing tRansformer. With carefully designed self-supervised tasks in node-, edge- and graph-level, GROVER can learn rich structural and semantic information of molecules from enormous unlabelled molecular data. Rather, to encode such complex information, GROVER integrates Message Passing Networks into the Transformer-style architecture to deliver a class of more expressive encoders of molecules. The flexibility of GROVER allows it to be trained efficiently on large-scale molecular dataset without requiring any supervision, thus being immunized to the two issues mentioned above. We pre-train GROVER with 100 million parameters on 10 million unlabelled molecules -- the biggest GNN and the largest training dataset in molecular representation learning. We then leverage the pre-trained GROVER for molecular property prediction followed by task-specific fine-tuning, where we observe a huge improvement (more than 6% on average) from current state-of-the-art methods on 11 challenging benchmarks. The insights we gained are that well-designed self-supervision losses and largely-expressive pre-trained models enjoy the significant potential on performance boosting.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Yu Rong", "Yatao Bian", "Tingyang Xu", "Wei-yang Xie", "Ying Wei", "Wen-bing Huang", "Junzhou Huang" ], "externalIds": { "MAG": "3101620381", "DBLP": "conf/nips/RongBXX0HH20", "CorpusId": 226191736 }, "url": "https://www.semanticscholar.org/paper/a9a4e8e631890a14257539948e1813b5214c60dd", "referenceCount": 67, "citationCount": 608, "influentialCitationCount": 99, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "RetroXpert: Decompose Retrosynthesis Prediction like a Chemist", "abstract": "Retrosynthesis is the process of recursively decomposing target molecules into available building blocks. It plays an important role in solving problems in organic synthesis planning. To automate the retrosynthesis analysis, many retrosynthesis prediction methods have been proposed.However, most of them are cumbersome and lack interpretability about their predictions.In this paper, we devise a novel template-free algorithm, RetroXpert, for automatic retrosynthetic expansion by automating the procedure that chemists used to do.Our method disassembles retrosynthesis into two steps: i) we identify the potential reaction center within the target molecule through a graph neural network and generate intermediate synthons; and ii) we predict the associated reactants based on the obtained synthons via a reactant generation model. While outperforming the state-of-the-art baselines by a significant margin, our model also provides chemically reasonable interpretation.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Chao-chao Yan", "Qianggang Ding", "P. Zhao", "Shuangjia Zheng", "Jinyu Yang", "Yang Yu", "Junzhou Huang" ], "externalIds": { "ArXiv": "2011.02893", "DBLP": "journals/corr/abs-2011-02893", "MAG": "3101473090", "DOI": "10.26434/chemrxiv.11869692.v3", "CorpusId": 226254245 }, "url": "https://www.semanticscholar.org/paper/f5ef0247f9bf19e1774da660b58bb4ac18108fa4", "referenceCount": 42, "citationCount": 93, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Biology" ] }, { "title": "GEOM: Energy-annotated molecular conformations for property prediction and molecular generation", "abstract": "Machine learning outperforms traditional approaches in many molecular design tasks. Although molecules are often thought of as 2D graphs, they in fact consist of an ensemble of inter-converting 3D structures called conformers. Molecular properties arise from the contribution of many conformers, and in the case of a drug binding a target, may be due mainly to a few distinct members. Molecular representations in machine learning are typically based on either one single 3D conformer or on a 2D graph that strips geometrical information. No reference datasets exist that connect these graph and point cloud ensemble representations. Here, we use first-principles simulations to annotate over 400,000 molecules with the ensemble of geometries they span. The Geometrical Embedding Of Molecules (GEOM) dataset contains over 33 million molecular conformers labeled with their relative energies and statistical probabilities at room temperature. This dataset will assist benchmarking and transfer learning in two classes of tasks: inferring 3D properties from 2D molecular graphs, and developing generative models to sample 3D conformations.", "year": 2020, "venue": "arXiv.org", "authors": [ "Simon Axelrod", "Rafael Gómez-Bombarelli" ], "externalIds": { "MAG": "3034806393", "ArXiv": "2006.05531", "DBLP": "journals/corr/abs-2006-05531", "CorpusId": 219558923 }, "url": "https://www.semanticscholar.org/paper/7e631773b14d7203eb0d85742b2a850f51eb1616", "referenceCount": 132, "citationCount": 159, "influentialCitationCount": 25, "isOpenAccess": false, "fieldsOfStudy": [ "Physics", "Computer Science" ] }, { "title": "Bridging the Gap Between Anchor-Based and Anchor-Free Detection via Adaptive Training Sample Selection", "abstract": "Object detection has been dominated by anchor-based detectors for several years. Recently, anchor-free detectors have become popular due to the proposal of FPN and Focal Loss. In this paper, we first point out that the essential difference between anchor-based and anchor-free detection is actually how to define positive and negative training samples, which leads to the performance gap between them. If they adopt the same definition of positive and negative samples during training, there is no obvious difference in the final performance, no matter regressing from a box or a point. This shows that how to select positive and negative training samples is important for current object detectors. Then, we propose an Adaptive Training Sample Selection (ATSS) to automatically select positive and negative samples according to statistical characteristics of object. It significantly improves the performance of anchor-based and anchor-free detectors and bridges the gap between them. Finally, we discuss the necessity of tiling multiple anchors per location on the image to detect objects. Extensive experiments conducted on MS COCO support our aforementioned analysis and conclusions. With the newly introduced ATSS, we improve state-of-the-art detectors by a large margin to 50.7% AP without introducing any overhead. The code is available at https://github.com/sfzhang15/ATSS.", "year": 2019, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Shifeng Zhang", "Cheng Chi", "Yongqiang Yao", "Zhen Lei", "Stan Z. Li" ], "externalIds": { "MAG": "3035396860", "DBLP": "conf/cvpr/ZhangCYLL20", "ArXiv": "1912.02424", "DOI": "10.1109/CVPR42600.2020.00978", "CorpusId": 208637257 }, "url": "https://www.semanticscholar.org/paper/448529da2bf004cf79084401ad3cbd6b511e4969", "referenceCount": 74, "citationCount": 1284, "influentialCitationCount": 224, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "InfoGraph: Unsupervised and Semi-supervised Graph-Level Representation Learning via Mutual Information Maximization", "abstract": "This paper studies learning the representations of whole graphs in both unsupervised and semi-supervised scenarios. Graph-level representations are critical in a variety of real-world applications such as predicting the properties of molecules and community analysis in social networks. Traditional graph kernel based methods are simple, yet effective for obtaining fixed-length representations for graphs but they suffer from poor generalization due to hand-crafted designs. There are also some recent methods based on language models (e.g. graph2vec) but they tend to only consider certain substructures (e.g. subtrees) as graph representatives. Inspired by recent progress of unsupervised representation learning, in this paper we proposed a novel method called InfoGraph for learning graph-level representations. We maximize the mutual information between the graph-level representation and the representations of substructures of different scales (e.g., nodes, edges, triangles). By doing so, the graph-level representations encode aspects of the data that are shared across different scales of substructures. Furthermore, we further propose InfoGraph*, an extension of InfoGraph for semi-supervised scenarios. InfoGraph* maximizes the mutual information between unsupervised graph representations learned by InfoGraph and the representations learned by existing supervised methods. As a result, the supervised encoder learns from unlabeled data while preserving the latent semantic space favored by the current supervised task. Experimental results on the tasks of graph classification and molecular property prediction show that InfoGraph is superior to state-of-the-art baselines and InfoGraph* can achieve performance competitive with state-of-the-art semi-supervised models.", "year": 2019, "venue": "International Conference on Learning Representations", "authors": [ "Fan-Yun Sun", "Jordan Hoffmann", "Jian Tang" ], "externalIds": { "MAG": "2965556524", "DBLP": "conf/iclr/SunHV020", "ArXiv": "1908.01000", "CorpusId": 199441876 }, "url": "https://www.semanticscholar.org/paper/2fb59ebe271d6b007bb0429c1701fd1004782d1b", "referenceCount": 75, "citationCount": 738, "influentialCitationCount": 127, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Self-referencing embedded strings (SELFIES): A 100% robust molecular string representation", "abstract": "The discovery of novel materials and functional molecules can help to solve some of society’s most urgent challenges, ranging from efficient energy harvesting and storage to uncovering novel pharmaceutical drug candidates. Traditionally matter engineering–generally denoted as inverse design–was based massively on human intuition and high-throughput virtual screening. The last few years have seen the emergence of significant interest in computer-inspired designs based on evolutionary or deep learning methods. The major challenge here is that the standard strings molecular representation SMILES shows substantial weaknesses in that task because large fractions of strings do not correspond to valid molecules. Here, we solve this problem at a fundamental level and introduce SELFIES (SELF-referencIng Embedded Strings), a string-based representation of molecules which is 100% robust. Every SELFIES string corresponds to a valid molecule, and SELFIES can represent every molecule. SELFIES can be directly applied in arbitrary machine learning models without the adaptation of the models; each of the generated molecule candidates is valid. In our experiments, the model’s internal memory stores two orders of magnitude more diverse molecules than a similar test with SMILES. Furthermore, as all molecules are valid, it allows for explanation and interpretation of the internal working of the generative models.", "year": 2019, "venue": "Machine Learning: Science and Technology", "authors": [ "Mario Krenn", "Florian Hase", "AkshatKumar Nigam", "Pascal Friederich", "Alán Aspuru-Guzik" ], "externalIds": { "DBLP": "journals/mlst/KrennHNFA20", "MAG": "3045928028", "DOI": "10.1088/2632-2153/aba947", "CorpusId": 212415210 }, "url": "https://www.semanticscholar.org/paper/8338a903d8078481ff8af777475f7394d00e9d57", "referenceCount": 65, "citationCount": 545, "influentialCitationCount": 42, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Physics" ] }, { "title": "Strategies for Pre-training Graph Neural Networks", "abstract": "Many applications of machine learning require a model to make accurate pre-dictions on test examples that are distributionally different from training ones, while task-specific labels are scarce during training. An effective approach to this challenge is to pre-train a model on related tasks where data is abundant, and then fine-tune it on a downstream task of interest. While pre-training has been effective in many language and vision domains, it remains an open question how to effectively use pre-training on graph datasets. In this paper, we develop a new strategy and self-supervised methods for pre-training Graph Neural Networks (GNNs). The key to the success of our strategy is to pre-train an expressive GNN at the level of individual nodes as well as entire graphs so that the GNN can learn useful local and global representations simultaneously. We systematically study pre-training on multiple graph classification datasets. We find that naive strategies, which pre-train GNNs at the level of either entire graphs or individual nodes, give limited improvement and can even lead to negative transfer on many downstream tasks. In contrast, our strategy avoids negative transfer and improves generalization significantly across downstream tasks, leading up to 9.4% absolute improvements in ROC-AUC over non-pre-trained models and achieving state-of-the-art performance for molecular property prediction and protein function prediction.", "year": 2019, "venue": "International Conference on Learning Representations", "authors": [ "Weihua Hu", "Bowen Liu", "Joseph Gomes", "M. Zitnik", "Percy Liang", "V. Pande", "J. Leskovec" ], "externalIds": { "MAG": "3005552578", "ArXiv": "1905.12265", "DBLP": "conf/iclr/HuLGZLPL20", "CorpusId": 213085920 }, "url": "https://www.semanticscholar.org/paper/789a7069d1a2d02d784e4821685b216cc63e6ec8", "referenceCount": 92, "citationCount": 1152, "influentialCitationCount": 222, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "SciBERT: A Pretrained Language Model for Scientific Text", "abstract": "Obtaining large-scale annotated data for NLP tasks in the scientific domain is challenging and expensive. We release SciBERT, a pretrained language model based on BERT (Devlin et. al., 2018) to address the lack of high-quality, large-scale labeled scientific data. SciBERT leverages unsupervised pretraining on a large multi-domain corpus of scientific publications to improve performance on downstream scientific NLP tasks. We evaluate on a suite of tasks including sequence tagging, sentence classification and dependency parsing, with datasets from a variety of scientific domains. We demonstrate statistically significant improvements over BERT and achieve new state-of-the-art results on several of these tasks. The code and pretrained models are available at https://github.com/allenai/scibert/.", "year": 2019, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Iz Beltagy", "Kyle Lo", "Arman Cohan" ], "externalIds": { "ACL": "D19-1371", "DBLP": "conf/emnlp/BeltagyLC19", "MAG": "2973154071", "ArXiv": "1903.10676", "DOI": "10.18653/v1/D19-1371", "CorpusId": 202558505 }, "url": "https://www.semanticscholar.org/paper/156d217b0a911af97fa1b5a71dc909ccef7a8028", "referenceCount": 32, "citationCount": 2542, "influentialCitationCount": 462, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "N-Gram Graph: Simple Unsupervised Representation for Graphs, with Applications to Molecules", "abstract": "Machine learning techniques have recently been adopted in various applications in medicine, biology, chemistry, and material engineering. An important task is to predict the properties of molecules, which serves as the main subroutine in many downstream applications such as virtual screening and drug design. Despite the increasing interest, the key challenge is to construct proper representations of molecules for learning algorithms. This paper introduces the N-gram graph, a simple unsupervised representation for molecules. The method first embeds the vertices in the molecule graph. It then constructs a compact representation for the graph by assembling the vertex embeddings in short walks in the graph, which we show is equivalent to a simple graph neural network that needs no training. The representations can thus be efficiently computed and then used with supervised learning methods for prediction. Experiments on 60 tasks from 10 benchmark datasets demonstrate its advantages over both popular graph neural networks and traditional representation methods. This is complemented by theoretical analysis showing its strong representation and prediction power.", "year": 2018, "venue": "Neural Information Processing Systems", "authors": [ "Shengchao Liu", "M. F. Demirel", "Yingyu Liang" ], "externalIds": { "MAG": "2987522751", "ArXiv": "1806.09206", "DBLP": "conf/nips/LiuDL19", "CorpusId": 207852825 }, "url": "https://www.semanticscholar.org/paper/0eea4193103599933bbc3ee89fcc563c5a519a9d", "referenceCount": 66, "citationCount": 157, "influentialCitationCount": 15, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "DrugBank 5.0: a major update to the DrugBank database for 2018", "abstract": "Abstract DrugBank (www.drugbank.ca) is a web-enabled database containing comprehensive molecular information about drugs, their mechanisms, their interactions and their targets. First described in 2006, DrugBank has continued to evolve over the past 12 years in response to marked improvements to web standards and changing needs for drug research and development. This year’s update, DrugBank 5.0, represents the most significant upgrade to the database in more than 10 years. In many cases, existing data content has grown by 100% or more over the last update. For instance, the total number of investigational drugs in the database has grown by almost 300%, the number of drug-drug interactions has grown by nearly 600% and the number of SNP-associated drug effects has grown more than 3000%. Significant improvements have been made to the quantity, quality and consistency of drug indications, drug binding data as well as drug-drug and drug-food interactions. A great deal of brand new data have also been added to DrugBank 5.0. This includes information on the influence of hundreds of drugs on metabolite levels (pharmacometabolomics), gene expression levels (pharmacotranscriptomics) and protein expression levels (pharmacoprotoemics). New data have also been added on the status of hundreds of new drug clinical trials and existing drug repurposing trials. Many other important improvements in the content, interface and performance of the DrugBank website have been made and these should greatly enhance its ease of use, utility and potential applications in many areas of pharmacological research, pharmaceutical science and drug education.", "year": 2017, "venue": "Nucleic Acids Res.", "authors": [ "D. Wishart", "Y. D. Feunang", "Anchi Guo", "Elvis J. Lo", "A. Marcu", "J. Grant", "Tanvir Sajed", "Daniel Johnson", "Carin Li", "Zinat Sayeeda", "Nazanin Assempour", "Ithayavani Iynkkaran", "Yifeng Liu", "Adam Maciejewski", "Nicola Gale", "Alex Wilson", "Lucy Chin", "Ryan Cummings", "Diana Le", "Allison Pon", "Craig Knox", "Michael Wilson" ], "externalIds": { "PubMedCentral": "5753335", "MAG": "2767891136", "DBLP": "journals/nar/WishartFGLMGSJL18", "DOI": "10.1093/nar/gkx1037", "CorpusId": 29807737, "PubMed": "29126136" }, "url": "https://www.semanticscholar.org/paper/98128fd412ebfa90201a276f2c59020ccc696a75", "referenceCount": 22, "citationCount": 5803, "influentialCitationCount": 432, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science", "Biology" ] }, { "title": "Attention is All you Need", "abstract": "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "Ashish Vaswani", "Noam M. Shazeer", "Niki Parmar", "Jakob Uszkoreit", "Llion Jones", "Aidan N. Gomez", "Lukasz Kaiser", "Illia Polosukhin" ], "externalIds": { "MAG": "2963403868", "DBLP": "conf/nips/VaswaniSPUJGKP17", "ArXiv": "1706.03762", "CorpusId": 13756489 }, "url": "https://www.semanticscholar.org/paper/204e3073870fae3d05bcbc2f6a8e263d9b72e776", "referenceCount": 41, "citationCount": 104988, "influentialCitationCount": 15363, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Neural Message Passing for Quantum Chemistry", "abstract": "Supervised learning on molecules has incredible potential to be useful in chemistry, drug discovery, and materials science. Luckily, several promising and closely related neural network models invariant to molecular symmetries have already been described in the literature. These models learn a message passing algorithm and aggregation procedure to compute a function of their entire input graph. At this point, the next step is to find a particularly effective variant of this general approach and apply it to chemical prediction benchmarks until we either solve them or reach the limits of the approach. In this paper, we reformulate existing models into a single common framework we call Message Passing Neural Networks (MPNNs) and explore additional novel variations within this framework. Using MPNNs we demonstrate state of the art results on an important molecular property prediction benchmark; these results are strong enough that we believe future work should focus on datasets with larger molecules or more accurate ground truth labels.", "year": 2017, "venue": "International Conference on Machine Learning", "authors": [ "J. Gilmer", "S. Schoenholz", "Patrick F. Riley", "O. Vinyals", "George E. Dahl" ], "externalIds": { "DBLP": "journals/corr/GilmerSRVD17", "MAG": "2952254971", "ArXiv": "1704.01212", "CorpusId": 9665943 }, "url": "https://www.semanticscholar.org/paper/e24cdf73b3e7e590c2fe5ecac9ae8aa983801367", "referenceCount": 37, "citationCount": 6566, "influentialCitationCount": 751, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MoleculeNet: a benchmark for molecular machine learning", "abstract": "A large scale benchmark for molecular machine learning consisting of multiple public datasets, metrics, featurizations and learning algorithms.", "year": 2017, "venue": "Chemical Science", "authors": [ "Zhenqin Wu", "Bharath Ramsundar", "Evan N. Feinberg", "Joseph Gomes", "C. Geniesse", "Aneesh S. Pappu", "K. Leswing", "V. Pande" ], "externalIds": { "MAG": "2949858440", "PubMedCentral": "5868307", "ArXiv": "1703.00564", "DBLP": "journals/corr/WuRFGGPLP17", "DOI": "10.1039/c7sc02664a", "CorpusId": 217680306, "PubMed": "29629118" }, "url": "https://www.semanticscholar.org/paper/d0ab11de3077490c80a08abd0fb8827bac84c454", "referenceCount": 124, "citationCount": 1488, "influentialCitationCount": 264, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine", "Physics", "Mathematics" ] }, { "title": "A Data-Driven Approach to Predicting Successes and Failures of Clinical Trials.", "abstract": null, "year": 2016, "venue": "Cell Chemical Biology", "authors": [ "Kaitlyn M. Gayvert", "Neel S. Madhukar", "O. Elemento" ], "externalIds": { "MAG": "2461620095", "DOI": "10.1016/j.chembiol.2016.07.023", "CorpusId": 5175136, "PubMed": "27642066" }, "url": "https://www.semanticscholar.org/paper/6f4a1b068dbcfd1a5c65ca2c6d670818ee3db8ff", "referenceCount": 37, "citationCount": 198, "influentialCitationCount": 14, "isOpenAccess": true, "fieldsOfStudy": [ "Biology", "Medicine" ] }, { "title": "The SIDER database of drugs and side effects", "abstract": "Unwanted side effects of drugs are a burden on patients and a severe impediment in the development of new drugs. At the same time, adverse drug reactions (ADRs) recorded during clinical trials are an important source of human phenotypic data. It is therefore essential to combine data on drugs, targets and side effects into a more complete picture of the therapeutic mechanism of actions of drugs and the ways in which they cause adverse reactions. To this end, we have created the SIDER (‘Side Effect Resource’, http://sideeffects.embl.de) database of drugs and ADRs. The current release, SIDER 4, contains data on 1430 drugs, 5880 ADRs and 140 064 drug–ADR pairs, which is an increase of 40% compared to the previous version. For more fine-grained analyses, we extracted the frequency with which side effects occur from the package inserts. This information is available for 39% of drug–ADR pairs, 19% of which can be compared to the frequency under placebo treatment. SIDER furthermore contains a data set of drug indications, extracted from the package inserts using Natural Language Processing. These drug indications are used to reduce the rate of false positives by identifying medical terms that do not correspond to ADRs.", "year": 2015, "venue": "Nucleic Acids Res.", "authors": [ "Michael Kuhn", "Ivica Letunic", "L. Jensen", "P. Bork" ], "externalIds": { "PubMedCentral": "4702794", "MAG": "2145578524", "DBLP": "journals/nar/KuhnLJB16", "DOI": "10.1093/nar/gkv1075", "CorpusId": 10442573, "PubMed": "26481350" }, "url": "https://www.semanticscholar.org/paper/c1e62b537f3d30018e7979a89b0e0f15e2b6eecc", "referenceCount": 18, "citationCount": 1029, "influentialCitationCount": 82, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Biology", "Computer Science" ] }, { "title": "Convolutional Networks on Graphs for Learning Molecular Fingerprints", "abstract": "We introduce a convolutional neural network that operates directly on graphs. These networks allow end-to-end learning of prediction pipelines whose inputs are graphs of arbitrary size and shape. The architecture we present generalizes standard molecular feature extraction methods based on circular fingerprints. We show that these data-driven features are more interpretable, and have better predictive performance on a variety of tasks.", "year": 2015, "venue": "Neural Information Processing Systems", "authors": [ "D. Duvenaud", "D. Maclaurin", "J. Aguilera-Iparraguirre", "Rafael Gómez-Bombarelli", "Timothy D. Hirzel", "Alán Aspuru-Guzik", "Ryan P. Adams" ], "externalIds": { "DBLP": "journals/corr/DuvenaudMAGHAA15", "MAG": "2173027866", "ArXiv": "1509.09292", "CorpusId": 1690180 }, "url": "https://www.semanticscholar.org/paper/5d1bfeed240709725c78bc72ea40e55410b373dc", "referenceCount": 31, "citationCount": 3185, "influentialCitationCount": 179, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "A Bayesian Approach to in Silico Blood-Brain Barrier Penetration Modeling", "abstract": "The human blood-brain barrier (BBB) is a membrane that protects the central nervous system (CNS) by restricting the passage of solutes. The development of any new drug must take into account its existence whether for designing new molecules that target components of the CNS or, on the other hand, to find new substances that should not penetrate the barrier. Several studies in the literature have attempted to predict BBB penetration, so far with limited success and few, if any, application to real world drug discovery and development programs. Part of the reason is due to the fact that only about 2% of small molecules can cross the BBB, and the available data sets are not representative of that reality, being generally biased with an over-representation of molecules that show an ability to permeate the BBB (BBB positives). To circumvent this limitation, the current study aims to devise and use a new approach based on Bayesian statistics, coupled with state-of-the-art machine learning methods to produce a robust model capable of being applied in real-world drug research scenarios. The data set used, gathered from the literature, totals 1970 curated molecules, one of the largest for similar studies. Random Forests and Support Vector Machines were tested in various configurations against several chemical descriptor set combinations. Models were tested in a 5-fold cross-validation process, and the best one tested over an independent validation set. The best fitted model produced an overall accuracy of 95%, with a mean square contingency coefficient (ϕ) of 0.74, and showing an overall capacity for predicting BBB positives of 83% and 96% for determining BBB negatives. This model was adapted into a Web based tool made available for the whole community at http://b3pp.lasige.di.fc.ul.pt.", "year": 2012, "venue": "Journal of Chemical Information and Modeling", "authors": [ "I. Martins", "Ana L. Teixeira", "Luis Pinheiro", "A. O. Falcão" ], "externalIds": { "MAG": "2406943157", "DBLP": "journals/jcisd/MartinsTPF12", "DOI": "10.1021/ci300124c", "CorpusId": 28029842, "PubMed": "22612593" }, "url": "https://www.semanticscholar.org/paper/e6749a56e8f9927f6968ac99ec3163a016168d5c", "referenceCount": 44, "citationCount": 247, "influentialCitationCount": 18, "isOpenAccess": false, "fieldsOfStudy": [ "Biology", "Medicine", "Computer Science" ] }, { "title": "ZINC: A Free Tool to Discover Chemistry for Biology", "abstract": "ZINC is a free public resource for ligand discovery. The database contains over twenty million commercially available molecules in biologically relevant representations that may be downloaded in popular ready-to-dock formats and subsets. The Web site also enables searches by structure, biological activity, physical property, vendor, catalog number, name, and CAS number. Small custom subsets may be created, edited, shared, docked, downloaded, and conveyed to a vendor for purchase. The database is maintained and curated for a high purchasing success rate and is freely available at zinc.docking.org.", "year": 2012, "venue": "Journal of Chemical Information and Modeling", "authors": [ "J. Irwin", "T. Sterling", "Michael M. Mysinger", "Erin S. Bolstad", "R. Coleman" ], "externalIds": { "MAG": "2027482274", "PubMedCentral": "3402020", "DBLP": "journals/jcisd/IrwinSMBC12", "DOI": "10.1021/ci3001277", "CorpusId": 9759396, "PubMed": "22587354" }, "url": "https://www.semanticscholar.org/paper/2c846c5ac7e8bd020b89f6c4767a01731114ee4d", "referenceCount": 42, "citationCount": 2302, "influentialCitationCount": 187, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Quantifying the chemical beauty of drugs.", "abstract": null, "year": 2012, "venue": "Nature Chemistry", "authors": [ "G. R. Bickerton", "Gaia V. Paolini", "J. Besnard", "S. Muresan", "Andrew L. Hopkins" ], "externalIds": { "MAG": "2034549041", "DOI": "10.1038/nchem.1243", "CorpusId": 205289650, "PubMed": "22270643" }, "url": "https://www.semanticscholar.org/paper/11bce438e2fdc7a7ae5f65c339c757f386f4f48a", "referenceCount": 53, "citationCount": 1496, "influentialCitationCount": 141, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Chemistry" ] }, { "title": "Maximum Unbiased Validation (MUV) Data Sets for Virtual Screening Based on PubChem Bioactivity Data", "abstract": "Refined nearest neighbor analysis was recently introduced for the analysis of virtual screening benchmark data sets. It constitutes a technique from the field of spatial statistics and provides a mathematical framework for the nonparametric analysis of mapped point patterns. Here, refined nearest neighbor analysis is used to design benchmark data sets for virtual screening based on PubChem bioactivity data. A workflow is devised that purges data sets of compounds active against pharmaceutically relevant targets from unselective hits. Topological optimization using experimental design strategies monitored by refined nearest neighbor analysis functions is applied to generate corresponding data sets of actives and decoys that are unbiased with regard to analogue bias and artificial enrichment. These data sets provide a tool for Maximum Unbiased Validation (MUV) of virtual screening methods. The data sets and a software package implementing the MUV design workflow are freely available at http://www.pharmchem.tu-bs.de/lehre/baumann/MUV.html.", "year": 2009, "venue": "Journal of Chemical Information and Modeling", "authors": [ "Sebastian G. Rohrer", "K. Baumann" ], "externalIds": { "MAG": "1964513093", "DBLP": "journals/jcisd/RohrerB09", "DOI": "10.1021/ci8002649", "CorpusId": 206885721, "PubMed": "19434821" }, "url": "https://www.semanticscholar.org/paper/2345e46e9b284a7b52d034c21023c2682c3d4818", "referenceCount": 0, "citationCount": 315, "influentialCitationCount": 25, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "On the Art of Compiling and Using 'Drug‐Like' Chemical Fragment Spaces", "abstract": "Ever since the first rational approaches to the discovery of promising lead candidate structures were applied, it has been a challenge for both medicinal and computational chemists to assess, generate, and combine promising structural motifs to form new and potent chemical entities for biological screening against potential drug targets. Many scientists have committed themselves to the analysis and identification of valuable chemical building blocks and have also developed strategies on how to best recombine them. In this context, the retrosynthetic fragmentation and recombination of chemical motifs derived from known inhibitors is a common and well-known procedure. Meanwhile, fragment-based approaches have become established and valuable processes in pharmaceutical lead discovery and validation. Several application studies have yielded promising lead candidates. Chemical space is huge. Corporate as well as public databases are in the millions and are still increasing in size in order to cover a larger part of the chemical universe. For several good reasons, there is the common trend to standardize experimental and computational protocols in pharmaceutical research. This trend demands systematic and consistent approaches, although they can hardly match the creativity and intuition of medicinal chemists. Consequently, they can and should not substitute, but rather assist, the expert in this task. The most prominent automated example for fragment generation is the retrosynthetic combinatorial analysis procedure (RECAP). It was the first of its kind to apply 11 distinct rules that were supposed to model chemical motifs that could easily be formed by combinatorial chemistry. In this context, the “fragment space” concept was introduced. In contrast to a fragment library, such a space consists not only of a set of fragments, but also of a set of rules that specifies how to recombine fragments by fusing the respective chemical motifs. RECAP is widely used and often referred to, yet even though authors frequently state to have used modified improved versions of the original, actual publications that communicate the extensions that were carried out are rare. An extension of the fragment space concept was recently published, but with a focus on obtaining scaffolds and not on retaining supposedly ’drug-like’ substituents or functional groups. Apart from that, the question remains what a ’drug-like’ fragment space actually is, and whether or not ’drug-likeness’ depends on the origin of the fragments: that is, if they necessarily have to be derived from drugs. In this context, it is highly interesting and important to measure the extent and accuracy with which current models and methods are able to represent the available chemical space. In an attempt to improve existing approaches for the automatic decomposition of molecules into fragments, we compiled a new and more elaborate set of rules for the breaking of retrosynthetically interesting chemical substructures (BRICS) and used this for obtaining fragments from biologically active compounds and vendor catalogue sources. Based on this, we compiled corresponding fragment spaces by specifying a complementary set of rules for the recombination of the corresponding chemical motifs. Furthermore, we put considerable effort into compiling a set of high-quality, high-performance, and, in contrast to all other approaches, publicly available fragments that are meant to serve as a possible basis for various molecular design objectives and techniques. We incorporated more elaborate medicinal chemistry concepts and, for example, modeled explicit isosteric replacements for cyclic and acyclic cases and further distinguished activated from inactivated heterocyclic ring systems and their corresponding substituents. Overall, this work led us to more comprehensive sets of fragments, and the corresponding fragment spaces show a significant increase in performance over existing methods. Moreover, by incorporating fragments from vendor catalogue sources, the performance can be increased even further. The shredding procedure we used for BRICS applies all possible retrosynthetic cuts simultaneously, which avoids the generation of overlapping (redundant) fragments. This is in accordance with RECAP and simplifies the calculation later on. Scheme 1 shows a simple fragmentation example and highlights the key steps. In addition to splitting retrosynthetically relevant bonds, we directly included substructure filters into the shredding procedure to avoid the generation of unwanted chemical motifs as well as small terminal fragments such as single hydrogen and halogen atoms, hydroxy, nitro, carboxylate, methoxy, methyl, ethyl, and isopropyl groups. These motifs are therefore discarded or left uncleaved, respectively. The BRICS model consists of 16 chemical environments indicated by link atoms of different types. The corresponding fragment prototypes are depicted in Scheme 2 and show only the direct chemical environment of the cleavage sites for reasons of simplicity. Therefore, the diversity of the fragments is within the R groups that can also contain further links. Note that the carbonyl and alkyl fragments are shown twice (L1/L6, L4/L8). This is because we wanted to keep track of their origin for medicinal chemistry and modeling reasons, that is, whether they appeared as cyclic or acyclic substituents or linkers. The corresponding fragment space results from the definition of the [a] J. Degen, Dr. A. Zaliani, Prof. Dr. M. Rarey Center for Bioinformatics, University of Hamburg Bundesstrasse 43, 20146 Hamburg (Germany) Fax: (+49)40-42838-7352 E-mail : rarey@zbh.uni-hamburg.de [b] Dr. C. Wegscheid-Gerlach Medicinal Chemistry VII Computational Chemistry Bayer Schering Pharma AG, M:llerstrasse 178, 13342 Berlin (Germany) Supporting information for this article is available on the WWW under http://dx.doi.org/10.1002/cmdc.200800178 and from the authors’ website .", "year": 2008, "venue": "ChemMedChem", "authors": [ "J. Degen", "C. Wegscheid‐Gerlach", "A. Zaliani", "M. Rarey" ], "externalIds": { "MAG": "2022476850", "DOI": "10.1002/cmdc.200800178", "CorpusId": 36771178, "PubMed": "18792903" }, "url": "https://www.semanticscholar.org/paper/593315087f07c6143a5ff1adf5be8c25c8fdc979", "referenceCount": 19, "citationCount": 343, "influentialCitationCount": 24, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Fast calculation of molecular polar surface area as a sum of fragment-based contributions and its application to the prediction of drug transport properties.", "abstract": "Molecular polar surface area (PSA), i.e., surface belonging to polar atoms, is a descriptor that was shown to correlate well with passive molecular transport through membranes and, therefore, allows prediction of transport properties of drugs. The calculation of PSA, however, is rather time-consuming because of the necessity to generate a reasonable 3D molecular geometry and the calculation of the surface itself. A new approach for the calculation of the PSA is presented here, based on the summation of tabulated surface contributions of polar fragments. The method, termed topological PSA (TPSA), provides results which are practically identical with the 3D PSA (the correlation coefficient between 3D PSA and fragment-based TPSA for 34 810 molecules from the World Drug Index is 0.99), while the computation speed is 2-3 orders of magnitude faster. The new methodology may, therefore, be used for fast bioavailability screening of virtual libraries having millions of molecules. This article describes the new methodology and shows the results of validation studies based on sets of published absorption data, including intestinal absorption, Caco-2 monolayer penetration, and blood-brain barrier penetration.", "year": 2000, "venue": "Journal of Medicinal Chemistry", "authors": [ "P. Ertl", "B. Rohde", "P. Selzer" ], "externalIds": { "MAG": "2033495141", "DOI": "10.1021/JM000942E", "CorpusId": 20924493, "PubMed": "11020286" }, "url": "https://www.semanticscholar.org/paper/8b17e51e5e8751ec7c9f580d1164af54e4134d9c", "referenceCount": 17, "citationCount": 2486, "influentialCitationCount": 100, "isOpenAccess": false, "fieldsOfStudy": [ "Chemistry", "Medicine" ] }, { "title": "Partition coefficients and their uses", "abstract": null, "year": 1971, "venue": "", "authors": [ "A. Leo", "C. Hansch", "David Elkins" ], "externalIds": { "MAG": "2067468321", "DOI": "10.1021/CR60274A001", "CorpusId": 98708739 }, "url": "https://www.semanticscholar.org/paper/95dd11684cbb28558dc9884ffbf79d159caaae64", "referenceCount": 0, "citationCount": 4127, "influentialCitationCount": 72, "isOpenAccess": false, "fieldsOfStudy": [ "Chemistry" ] }, { "title": "Injecting Multimodal Information into Rigid Protein Docking via Bi-level Optimization", "abstract": "The structure of protein-protein complexes is critical for understanding binding dynamics, biological mechanisms, and intervention strategies. Rigid protein docking, a fundamental problem in this field, aims to predict the 3D structure of complexes from their unbound states without conformational changes. In this scenario, we have access to two types of valuable information: sequence-modal information, such as coevolutionary data obtained from multiple sequence alignments, and structure-modal information, including the 3D conformations of rigid structures. However, existing docking methods typically utilize single-modal information, resulting in suboptimal predictions. In this paper, we propose xTrimoBiDock α (or BiDock for short) 4 , a novel rigid docking model that effectively integrates sequence-and structure-modal information through bi-level optimization. Specifically, a cross-modal transformer combines multimodal information to predict an inter-protein distance map. To achieve rigid docking, the roto-translation transformation is optimized to align the docked pose with the predicted distance map. In order to tackle this bi-level optimization problem, we unroll the gradient descent of the inner loop and further derive a better initialization for roto-translation transformation based on spectral estimation. Compared to baselines, BiDock achieves a promising result of a maximum 234% relative improvement in challenging antibody-antigen docking problem.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Ruijia Wang", "YiWu Sun", "Yujie Luo", "Shaochuan Li", "Cheng Yang", "Xingyi Cheng", "Hui Li", "Chuan Shi", "Le Song" ], "externalIds": { "DBLP": "conf/nips/WangSLLYCLSS23", "CorpusId": 268042260 }, "url": "https://www.semanticscholar.org/paper/a03098f19ac60a2888d251eaaf914b43bf160d2b", "referenceCount": 61, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MoCL: Contrastive Learning on Molecular Graphs with Multi-level Domain Knowledge", "abstract": "Recent years have seen a rapid growth of utilizing graph neural networks (GNNs) in the biomedical domain for tackling drug-related problems. However, like any other deep architectures, GNNs are data hungry. While requiring labels in real world is often expensive, pretraining GNNs in an unsupervised manner has been actively explored. Among them, graph contrastive learning, by maximizing the mutual information between paired graph augmentations, has been shown to be effective on various downstream tasks. However, the current graph contrastive learning framework has two limitations. First, the augmentations are designed for general graphs and thus may not be suitable or powerful enough for certain domains. Second, the contrastive scheme only learns representations that are invariant to local perturbations and thus does not consider the global structure of the dataset, which may also be useful for down-stream tasks. Therefore, in this paper, we study graph contrastive learning in the context of biomedical domain, where molecular graphs are present. We propose a novel framework called MoCL, which utilizes domain knowledge at both local-and global-level to assist representation learning. The local-level domain knowledge guides the augmentation process such that variation is introduced without changing graph semantics. The global-level knowledge encodes the similarity information between graphs in the entire dataset and helps to learn representations with richer semantics. The entire model is learned through a double contrast objective. We evaluate MoCL on various molecular datasets under both linear and semi-supervised settings and results show that MoCL achieves state-of-the-art performance.", "year": 2021, "venue": "arXiv.org", "authors": [ "Mengying Sun", "Jing Xing", "Huijun Wang", "Bin Chen", "Jiayu Zhou" ], "externalIds": { "DBLP": "journals/corr/abs-2106-04509", "CorpusId": 235367726 }, "url": "https://www.semanticscholar.org/paper/ce91e8cfa68fc4e70b82685e7bd30399e21543f4", "referenceCount": 50, "citationCount": 43, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MolCLR: Molecular Contrastive Learning of Representations via Graph Neural Networks", "abstract": "Molecular machine learning bears promise for efficient molecule property prediction and drug discovery. However, due to the limited labeled data and the giant chemical space, machine learning models trained via supervised learning perform poorly in generalization. This greatly limits the applications of machine learning methods for molecular design and discovery. In this work, we present MolCLR : Molecular Contrastive Learning of Representations via Graph Neural Networks (GNNs), a self-supervised learning framework for large unlabeled molecule datasets. Specifically, we first build a molecular graph, where each node represents an atom and each edge represents a chemical bond. A GNN is then used to encode the molecule graph. We propose three novel molecule graph augmentations: atom masking, bond deletion, and subgraph removal. A contrastive estimator is utilized to maximize the agreement of different graph augmentations from the same molecule. Experiments show that molecule representations learned by MolCLR can be transferred to multiple downstream molecular property prediction tasks. Our method thus achieves state-of-the-art performance on many challenging datasets. We also prove the efficiency of our proposed molecule graph augmentations on supervised molecular classification tasks.", "year": 2021, "venue": "arXiv.org", "authors": [ "Yuyang Wang", "Jianren Wang", "Zhonglin Cao", "A. Farimani" ], "externalIds": { "DBLP": "journals/corr/abs-2102-10056", "CorpusId": 231979472 }, "url": "https://www.semanticscholar.org/paper/0e81221c0df40f2b49d7e421aa738173e0e01469", "referenceCount": 97, "citationCount": 60, "influentialCitationCount": 10, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "why should I trust you?", "abstract": null, "year": 2016, "venue": "Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, San Francisco, CA, USA, August 13-17, 2016", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Aids antiviral screen data", "abstract": null, "year": 2015, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Tox21 Data Challenge", "abstract": null, "year": 2014, "venue": "Tox21 data challenge", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Visualizing Data using t-SNE", "abstract": "We present a new technique called “t-SNE” that visualizes high-dimensional data by giving each datapoint a location in a two or three-dimensional map. The technique is a variation of Stochastic Neighbor Embedding (Hinton and Roweis, 2002) that is much easier to optimize, and produces significantly better visualizations by reducing the tendency to crowd points together in the center of the map. t-SNE is better than existing techniques at creating a single map that reveals structure at many different scales. This is particularly important for high-dimensional data that lie on several different, but related, low-dimensional manifolds, such as images of objects from multiple classes seen from multiple viewpoints. For visualizing the structure of very large datasets, we show how t-SNE can use random walks on neighborhood graphs to allow the implicit structure of all of the data to influence the way in which a subset of the data is displayed. We illustrate the performance of t-SNE on a wide variety of datasets and compare it with many other non-parametric visualization techniques, including Sammon mapping, Isomap, and Locally Linear Embedding. The visualizations produced by t-SNE are significantly better than those produced by the other techniques on almost all of the datasets.", "year": 2008, "venue": "", "authors": [ "L. Maaten", "Geoffrey E. Hinton" ], "externalIds": { "MAG": "2187089797", "CorpusId": 5855042 }, "url": "https://www.semanticscholar.org/paper/1c46943103bd7b7a2c7be86859995a4144d1938b", "referenceCount": 41, "citationCount": 37233, "influentialCitationCount": 1191, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] } ] }, "Wormhole: Concept-Aware Deep Representation Learning for Co-Evolving Sequences": { "paper_title": "Wormhole: Concept-Aware Deep Representation Learning for Co-Evolving Sequences", "arxiv_id": "2409.13857v1", "keyword": "representation learning", "authors": [ "Kunpeng Xu", "Lifei Chen", "Shengrui Wang" ], "references": [ { "title": "Deep learning-based snore sound analysis for the detection of night-time breathing disorders", "abstract": "Snoring, a prevalent symptom of obstructive sleep apnea, is believed to impact 57% of men and 40% of women in the United States. Night-time breathing disorders present significant challenges to both diagnosis and treatment, impacting millions of individuals worldwide. Traditional methods like CPAP machines and lifestyle changes face barriers such as discomfort, low adherence, and high costs, prompting the need for innovative solutions. This paper presents a novel approach using artificial intelligence, specifically deep learning, to create a snore sound analysis-based alerting system. This system aims to detect sleep disorders by analyzing snore patterns, providing a non-intrusive, cost-effective, and user-friendly alternative to traditional methods. By training models on snore sound characteristics, we've achieved promising results in identifying sleep apnea, showcasing the potential of this system in transforming the detection and management of night-time breathing disorders.", "year": 2024, "venue": "Applied and Computational Engineering", "authors": [ "Bo Dang", "Danqing Ma", "Shaojie Li", "Zongqing Qi", "Elly Yijun Zhu" ], "externalIds": { "DOI": "10.54254/2755-2721/76/20240574", "CorpusId": 271227070 }, "url": "https://www.semanticscholar.org/paper/2c61f14299992b61fcfbee440f9f943957e9e07f", "referenceCount": 0, "citationCount": 10, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Learning Flexible Time-windowed Granger Causality Integrating Heterogeneous Interventional Time Series Data", "abstract": "Granger causality, commonly used for inferring causal structures from time series data, has been adopted in widespread applications across various fields due to its intuitive explainability and high compatibility with emerging deep neural network prediction models. To alleviate challenges in better deciphering causal structures unambiguously from time series, the use of interventional data has become a practical approach. However, existing methods have yet to be explored in the context of imperfect interventions with unknown targets, which are more common and often more beneficial in a wide range of real-world applications. Additionally, the identifiability issues of Granger causality with unknown interventional targets in complex network models remain unsolved. Our work presents a theoretically-grounded method that infers Granger causal structure and identifies unknown targets by leveraging heterogeneous interventional time series data. We further illustrate that learning Granger causal structure and recovering interventional targets can mutually promote each other. Comparative experiments demonstrate that our method outperforms several robust baseline methods in learning Granger causal structure from interventional time series data.", "year": 2024, "venue": "Knowledge Discovery and Data Mining", "authors": [ "Ziyi Zhang", "Shaogang Ren", "Xiaoning Qian", "Nicholas Duffield" ], "externalIds": { "DBLP": "journals/corr/abs-2406-10419", "ArXiv": "2406.10419", "DOI": "10.1145/3637528.3672023", "CorpusId": 270559950 }, "url": "https://www.semanticscholar.org/paper/5eb0a1d42716d76992289f4d79598afa692f3a08", "referenceCount": 51, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Analyzing the factors that are involved in length of inpatient stay at the hospital for diabetes patients", "abstract": "The paper investigates the escalating concerns surrounding the surge in diabetes cases, exacerbated by the COVID-19 pandemic, and the subsequent strain on medical resources. The research aims to construct a predictive model quantifying factors influencing inpatient hospital stay durations for diabetes patients, offering insights to hospital administrators for improved patient management strategies. The literature review highlights the increasing prevalence of diabetes, emphasizing the need for continued attention and analysis of urban-rural disparities in healthcare access. International studies underscore the financial implications and healthcare burden associated with diabetes-related hospitalizations and complications, emphasizing the significance of effective management strategies. The methodology involves a quantitative approach, utilizing a dataset comprising 10,000 observations of diabetic inpatient encounters in U.S. hospitals from 1999 to 2008. Predictive modeling techniques, particularly Generalized Linear Models (GLM), are employed to develop a model predicting hospital stay durations based on patient demographics, admission types, medical history, and treatment regimen. The results highlight the influence of age, medical history, and treatment regimen on hospital stay durations for diabetes patients. Despite model limitations, such as heteroscedasticity and deviations from normality in residual analysis, the findings offer valuable insights for hospital administrators in patient management. The paper concludes with recommendations for future research to address model limitations and explore the implications of predictive models on healthcare management strategies, ensuring equitable patient care and resource allocation.", "year": 2024, "venue": "arXiv.org", "authors": [ "Jorden Lam", "Kunpeng Xu" ], "externalIds": { "ArXiv": "2406.05189", "DBLP": "journals/corr/abs-2406-05189", "DOI": "10.48550/arXiv.2406.05189", "CorpusId": 270371291 }, "url": "https://www.semanticscholar.org/paper/edd4ccdf12bbf5ae7d35e4581afd811e4e7e77b2", "referenceCount": 25, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Kolmogorov-Arnold Networks for Time Series: Bridging Predictive Power and Interpretability", "abstract": "Kolmogorov-Arnold Networks (KAN) is a groundbreaking model recently proposed by the MIT team, representing a revolutionary approach with the potential to be a game-changer in the field. This innovative concept has rapidly garnered worldwide interest within the AI community. Inspired by the Kolmogorov-Arnold representation theorem, KAN utilizes spline-parametrized univariate functions in place of traditional linear weights, enabling them to dynamically learn activation patterns and significantly enhancing interpretability. In this paper, we explore the application of KAN to time series forecasting and propose two variants: T-KAN and MT-KAN. T-KAN is designed to detect concept drift within time series and can explain the nonlinear relationships between predictions and previous time steps through symbolic regression, making it highly interpretable in dynamically changing environments. MT-KAN, on the other hand, improves predictive performance by effectively uncovering and leveraging the complex relationships among variables in multivariate time series. Experiments validate the effectiveness of these approaches, demonstrating that T-KAN and MT-KAN significantly outperform traditional methods in time series forecasting tasks, not only enhancing predictive accuracy but also improving model interpretability. This research opens new avenues for adaptive forecasting models, highlighting the potential of KAN as a powerful and interpretable tool in predictive analytics.", "year": 2024, "venue": "arXiv.org", "authors": [ "Kunpeng Xu", "Lifei Chen", "Shengrui Wang" ], "externalIds": { "DBLP": "journals/corr/abs-2406-02496", "ArXiv": "2406.02496", "DOI": "10.48550/arXiv.2406.02496", "CorpusId": 270226403 }, "url": "https://www.semanticscholar.org/paper/10145b2238569436754c4d9be3f9c7db501cc65c", "referenceCount": 24, "citationCount": 19, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Real-Time Pill Identification for the Visually Impaired Using Deep Learning", "abstract": "The prevalence of mobile technology offers unique opportunities for addressing healthcare challenges, especially for individuals with visual impairments. This paper explores the development and implementation of a deep learning-based mobile application designed to assist blind and visually impaired individuals in real-time pill identification. Utilizing the YOLO framework, the application aims to accurately recognize and differentiate between various pill types through real-time image processing on mobile devices. The system incorporates Text-to-Speech (TTS) to provide immediate auditory feedback, enhancing usability and independence for visually impaired users. Our study evaluates the application's effectiveness in terms of detection accuracy and user experience, highlighting its potential to improve medication management and safety among the visually impaired community.", "year": 2024, "venue": "2024 6th International Conference on Communications, Information System and Computer Engineering (CISCE)", "authors": [ "Bo Dang", "Wenchao Zhao", "Yufeng Li", "Danqing Ma", "Qixuan Yu", "Elly Yijun Zhu" ], "externalIds": { "ArXiv": "2405.05983", "DBLP": "journals/corr/abs-2405-05983", "DOI": "10.1109/CISCE62493.2024.10653353", "CorpusId": 269740928 }, "url": "https://www.semanticscholar.org/paper/c08e81b4a74a62729139975e5809d91e6b38e655", "referenceCount": 28, "citationCount": 17, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Towards Invariant Time Series Forecasting in Smart Cities", "abstract": "In the transformative landscape of smart cities, the integration of the cutting-edge web technologies into time series forecasting presents a pivotal opportunity to enhance urban planning, sustainability, and economic growth. The advancement of deep neural networks has significantly improved forecasting performance. However, a notable challenge lies in the ability of these models to generalize well to out-of-distribution (OOD) time series data. The inherent spatial heterogeneity and domain shifts across urban environments create hurdles that prevent models from adapting and performing effectively in new urban environments. To tackle this problem, we propose a solution to derive invariant representations for more robust predictions under different urban environments instead of relying on spurious correlation across urban environments for better generalizability. Through extensive experiments on both synthetic and real-world data, we demonstrate that our proposed method outperforms traditional time series forecasting models when tackling domain shifts in changing urban environments. The effectiveness and robustness of our method can be extended to diverse fields including climate modeling, urban planning, and smart city resource management.", "year": 2024, "venue": "The Web Conference", "authors": [ "Ziyi Zhang", "Shaogang Ren", "Xiaoning Qian", "Nicholas Duffield" ], "externalIds": { "ArXiv": "2405.05430", "DBLP": "journals/corr/abs-2405-05430", "DOI": "10.1145/3589335.3651897", "CorpusId": 269635409 }, "url": "https://www.semanticscholar.org/paper/51e5deb1a87fed68818d4d2d77c92f405590679c", "referenceCount": 15, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Fostc3net: A Lightweight YOLOv5 Based On the Network Structure Optimization", "abstract": "\n Transmission line detection technology is crucial for automatic monitoring and ensuring the safety of electrical facilities. The YOLOv5 series is currently one of the most advanced and widely used methods for object detection. However, it faces inherent challenges, such as high computational load on devices and insufficient detection accuracy. To address these concerns, this paper presents an enhanced lightweight YOLOv5 technique customized for mobile devices, specifically intended for identifying objects associated with transmission lines. The C3Ghost module is integrated into the convolutional network of YOLOv5 to reduce floating point operations per second (FLOPs) in the feature channel fusion process and improve feature expression performance. In addition, a FasterNet module is introduced to replace the c3 module in the YOLOv5 Backbone. The FasterNet module uses Partial Convolutions to process only a portion of the input channels, improving feature extraction efficiency and reducing computational overhead. To address the imbalance between simple and challenging samples in the dataset and the diversity of aspect ratios of bounding boxes, the wIoU v3 LOSS is adopted as the loss function. To validate the performance of the proposed approach, Experiments are conducted on a custom dataset of transmission line poles. The results show that the proposed model achieves a 1% increase in detection accuracy, a 13% reduction in FLOPs, and a 26% decrease in model parameters compared to the existing YOLOv5. In the ablation experiment, it was also discovered that while the Fastnet module and the CSghost module improved the precision of the original YOLOv5 baseline model, they caused a decrease in the mAP@.5-.95 metric. However, the improvement of the wIoUv3 loss function significantly mitigated the decline of the mAP@.5-.95 metric.", "year": 2024, "venue": "Journal of Physics: Conference Series", "authors": [ "Danqing Ma", "Shaojie Li", "Bo Dang", "Hengyi Zang", "Xinqi Dong" ], "externalIds": { "ArXiv": "2403.13703", "DBLP": "journals/corr/abs-2403-13703", "DOI": "10.1088/1742-6596/2824/1/012004", "CorpusId": 268537353 }, "url": "https://www.semanticscholar.org/paper/697a6daec01317c996558acf8dbfb445059f3e38", "referenceCount": 11, "citationCount": 15, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Multiple distresses detection for Asphalt Pavement using improved you Only Look Once Algorithm based on convolutional neural network", "abstract": null, "year": 2024, "venue": "International Journal of Pavement Engineering", "authors": [ "Han-Cheng Dan", "Peng Yan", "Jiawei Tan", "Yinchao Zhou", "Bingjie Lu" ], "externalIds": { "DOI": "10.1080/10298436.2024.2308169", "CorpusId": 268218823 }, "url": "https://www.semanticscholar.org/paper/279037411b68e258e4750e21199ee630723168ec", "referenceCount": 16, "citationCount": 10, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Evaluation of asphalt pavement texture using multiview stereo reconstruction based on deep learning", "abstract": null, "year": 2024, "venue": "Construction and Building Materials", "authors": [ "Han-Cheng Dan", "Bingjie Lu", "Mengyu Li" ], "externalIds": { "DOI": "10.1016/j.conbuildmat.2023.134837", "CorpusId": 266797259 }, "url": "https://www.semanticscholar.org/paper/012c52fa73d2abc04c994d572c2f282dbcba1d85", "referenceCount": 59, "citationCount": 12, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Online Boosting Adaptive Learning under Concept Drift for Multistream Classification", "abstract": "Multistream classification poses significant challenges due to the necessity for rapid adaptation in dynamic streaming processes with concept drift. Despite the growing research outcomes in this area, there has been a notable oversight regarding the temporal dynamic relationships between these streams, leading to the issue of negative transfer arising from irrelevant data. In this paper, we propose a novel Online Boosting Adaptive Learning (OBAL) method that effectively addresses this limitation by adaptively learning the dynamic correlation among different streams. Specifically, OBAL operates in a dual-phase mechanism, in the first of which we design an Adaptive COvariate Shift Adaptation (AdaCOSA) algorithm to construct an initialized ensemble model using archived data from various source streams, thus mitigating the covariate shift while learning the dynamic correlations via an adaptive re-weighting strategy. During the online process, we employ a Gaussian Mixture Model-based weighting mechanism, which is seamlessly integrated with the acquired correlations via AdaCOSA to effectively handle asynchronous drift. This approach significantly improves the predictive performance and stability of the target stream. We conduct comprehensive experiments on several synthetic and real-world data streams, encompassing various drifting scenarios and types. The results clearly demonstrate that OBAL achieves remarkable advancements in addressing multistream classification problems by effectively leveraging positive knowledge derived from multiple sources.", "year": 2023, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Enshui Yu", "Jie Lu", "Bin Zhang", "Guangquan Zhang" ], "externalIds": { "DBLP": "conf/aaai/YuLZ024", "ArXiv": "2312.10841", "DOI": "10.48550/arXiv.2312.10841", "CorpusId": 266359209 }, "url": "https://www.semanticscholar.org/paper/ef4f93a84c88359e706598862254f4e57c3ca9fd", "referenceCount": 37, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Location-Aware Social Network Recommendation via Temporal Graph Networks", "abstract": "In the data-driven era, recommendations have become indispensable across various systems. Graphs, as versatile data structures, shine at abstracting complex systems. Many real-world scenarios effortlessly translate into graphs, representing individuals and their relationships as nodes and edges. Link prediction, a cornerstone of recommendations, excels in forecasting future network connections based on current structures. Its applications span diverse domains, including social networks, biological networks, and network security. Previous studies have leveraged classification algorithms like logistic regression and random forest, often complemented by node embedding techniques, yielding impressive results in addressing the challenge of link prediction. Today's dynamic networks continually reshape connections, introducing new links and nodes while removing others. Furthermore, the inclusion of location information associated with nodes provides a new opportunity. Adapting models to this dynamism necessitates capturing spatial and temporal dependencies for sustained effectiveness. In this paper, we undertake a comprehensive evaluation of various algorithms for link prediction. Subsequently, we further enriched the continuous-time dynamic graph networks by incorporating essential location information. This strategic enhancement results in a remarkable performance improvement, highlighting the crucial role of location-based temporal data in improving recommendations. It emphasizes the untapped potential of location and temporal information in refining user recommendations within interconnected networks.", "year": 2023, "venue": "LocalRec@SIGSPATIAL", "authors": [ "Ziyi Zhang", "Diya Li", "Zhenlei Song", "Nicholas Duffield", "Zhe Zhang" ], "externalIds": { "DBLP": "conf/localrec/ZhangLSD023", "DOI": "10.1145/3615896.3628342", "CorpusId": 265507222 }, "url": "https://www.semanticscholar.org/paper/398b363dc3530b6346f806a9d3f5e59e5cbfd665", "referenceCount": 11, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DRNet: A Decision-Making Method for Autonomous Lane Changingwith Deep Reinforcement Learning", "abstract": "Machine learning techniques have outperformed numerous rule-based methods for decision-making in autonomous vehicles. Despite recent efforts, lane changing remains a major challenge, due to the complex driving scenarios and changeable social behaviors of surrounding vehicles. To help improve the state of the art, we propose to leveraging the emerging \\underline{D}eep \\underline{R}einforcement learning (DRL) approach for la\\underline{NE} changing at the \\underline{T}actical level. To this end, we present\"DRNet\", a novel and highly efficient DRL-based framework that enables a DRL agent to learn to drive by executing reasonable lane changing on simulated highways with an arbitrary number of lanes, and considering driving style of surrounding vehicles to make better decisions. Furthermore, to achieve a safe policy for decision-making, DRNet incorporates ideas from safety verification, the most important component of autonomous driving, to ensure that only safe actions are chosen at any time. The setting of our state representation and reward function enables the trained agent to take appropriate actions in a real-world-like simulator. Our DRL agent has the ability to learn the desired task without causing collisions and outperforms DDQN and other baseline models.", "year": 2023, "venue": "arXiv.org", "authors": [ "Kunpeng Xu", "Lifei Chen", "Shengrui Wang" ], "externalIds": { "ArXiv": "2311.01602", "DBLP": "journals/corr/abs-2311-01602", "DOI": "10.48550/arXiv.2311.01602", "CorpusId": 265019441 }, "url": "https://www.semanticscholar.org/paper/a811257533d8d95c60e778aff0e0cbaab98aa715", "referenceCount": 26, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "SQBA: sequential query-based blackbox attack", "abstract": "Many existing approaches to blackbox adversarial attacks follow attack strategies with predefined priori which are fixed throughout the process. As a result, they often require an excessive number of queries against the victim models to succeed. In this paper, we proposed a new attacking paradigm that better resembles real-world attacks in practical settings, where an agent (i.e., attacker) approaches the attack by taking actions (i.e., perturbations to the source image) through sequential interactions with the environment (i.e., the victim model) to achieve maximum rewards (i.e., the success of attack with the minimum number of queries). Naturally, as any action the agent chooses to take would alter the query image and change the state of the attack, the agent needs to adapt its policy accordingly along the trajectory instead of applying a predefined strategy unanimously. As an instantiation, we propose a “sequential query-based boundary blackbox attack” (SQBA), which learns a policy to adaptively select from a set of candidates attacking methods and then follow the selected method to apply one attack at each step. For demonstration, we restrict the candidate to subspace-based boundary attack methods. We show that the policy can be learned effectively with a variety of approaches, including imitation learning, policy optimization, and an ensemble of both. Extensive experiments on four benchmark datasets (MNIST, CIFAR-10, CelebA, and ImageNet) show that SQBA can significantly reduce the query complexity under different settings compared with baselines while keeping a 100% attack success rate. In addition, we find that the Reinforcement Learning agent as an ensemble of TRPO and BiLSTM performs the best among different agents.", "year": 2023, "venue": "International Conference on Artificial Intelligence and Computer Science", "authors": [ "Yiyi Tao" ], "externalIds": { "DOI": "10.1117/12.3009240", "CorpusId": 264170820 }, "url": "https://www.semanticscholar.org/paper/0d3332fdbcb870ff59c9816b166386b95cf5076d", "referenceCount": 26, "citationCount": 10, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Engineering" ] }, { "title": "OneNet: Enhancing Time Series Forecasting Models under Concept Drift by Online Ensembling", "abstract": "Online updating of time series forecasting models aims to address the concept drifting problem by efficiently updating forecasting models based on streaming data. Many algorithms are designed for online time series forecasting, with some exploiting cross-variable dependency while others assume independence among variables. Given every data assumption has its own pros and cons in online time series modeling, we propose \\textbf{On}line \\textbf{e}nsembling \\textbf{Net}work (OneNet). It dynamically updates and combines two models, with one focusing on modeling the dependency across the time dimension and the other on cross-variate dependency. Our method incorporates a reinforcement learning-based approach into the traditional online convex programming framework, allowing for the linear combination of the two models with dynamically adjusted weights. OneNet addresses the main shortcoming of classical online learning methods that tend to be slow in adapting to the concept drift. Empirical results show that OneNet reduces online forecasting error by more than $\\mathbf{50\\%}$ compared to the State-Of-The-Art (SOTA) method. The code is available at \\url{https://github.com/yfzhang114/OneNet}.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Yifan Zhang", "Qingsong Wen", "Xue Wang", "Weiqiu Chen", "Liang Sun", "Z. Zhang", "Liang Wang", "Rong Jin", "Tien-Ping Tan" ], "externalIds": { "DBLP": "journals/corr/abs-2309-12659", "ArXiv": "2309.12659", "DOI": "10.48550/arXiv.2309.12659", "CorpusId": 262217049 }, "url": "https://www.semanticscholar.org/paper/ff84ee4199dd8413601a0b5cc170547266abc522", "referenceCount": 71, "citationCount": 13, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Meta Learning Enabled Adversarial Defense", "abstract": "Machine learning (ML) models have been found to be vulnerable against adversarial attacks recently. Such examples are carefully crafted instances with a small magnitude of perturbation added to mislead the target ML models. This has raised great concerns and different approaches have been put forward to defend against these malicious adversarial attacks, among which adversarial training so far has been shown to be very effective empirically. However, one major limitation of adversarial training is that it requires exploring diverse factors such as different attacks and datasets to achieve good performance, which significantly limits its practical usage. We propose a novel and effective meta-learning based adversarial training framework named Meta-Adv in this work. We leverage the knowledge obtained from past tasks that are trained on a subset of attacks and datasets and show that such knowledge is sufficient to improve the learning robustness against different unforeseen adversarial attacks. We conducted extensive experiments on different datasets including CIFAR-10 and ImageNet and show that the proposed method outperforms the state-of-the-art adversarial training methods significantly, including the winners in the CAAD adversarial defense competition. Our method provides a simple and effective baseline to train robust ML models against adversarial attacks.", "year": 2023, "venue": "2023 IEEE International Conference on Sensors, Electronics and Computer Engineering (ICSECE)", "authors": [ "Yiyi Tao" ], "externalIds": { "DOI": "10.1109/ICSECE58870.2023.10263390", "CorpusId": 263230755 }, "url": "https://www.semanticscholar.org/paper/59dca4c44900f8bff756d2212246521eb9e86c8c", "referenceCount": 11, "citationCount": 16, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "ZeroPrompt: Streaming Acoustic Encoders are Zero-Shot Masked LMs", "abstract": "In this paper, we present ZeroPrompt (Figure 1-(a)) and the corresponding Prompt-and-Refine strategy (Figure 3), two simple but effective \\textbf{training-free} methods to decrease the Token Display Time (TDT) of streaming ASR models \\textbf{without any accuracy loss}. The core idea of ZeroPrompt is to append zeroed content to each chunk during inference, which acts like a prompt to encourage the model to predict future tokens even before they were spoken. We argue that streaming acoustic encoders naturally have the modeling ability of Masked Language Models and our experiments demonstrate that ZeroPrompt is engineering cheap and can be applied to streaming acoustic encoders on any dataset without any accuracy loss. Specifically, compared with our baseline models, we achieve 350 $\\sim$ 700ms reduction on First Token Display Time (TDT-F) and 100 $\\sim$ 400ms reduction on Last Token Display Time (TDT-L), with theoretically and experimentally equal WER on both Aishell-1 and Librispeech datasets.", "year": 2023, "venue": "Interspeech", "authors": [ "Xingcheng Song", "Di Wu", "Binbin Zhang", "Zhendong Peng", "Bo Dang", "Fuping Pan", "Zhiyong Wu" ], "externalIds": { "DBLP": "conf/interspeech/Song0ZP0P023", "ArXiv": "2305.10649", "DOI": "10.21437/Interspeech.2023-1497", "CorpusId": 258762440 }, "url": "https://www.semanticscholar.org/paper/635e5a007dd2e31503c9a5b0668f44fd6b10c767", "referenceCount": 24, "citationCount": 15, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Spectral Clustering via Orthogonalization-Free Methods", "abstract": "Graph Signal Filter used as dimensionality reduction in spectral clustering usually requires expensive eigenvalue estimation. We analyze the filter in an optimization setting and propose to use four orthogonalization-free methods by optimizing objective functions as dimensionality reduction in spectral clustering. The proposed methods do not utilize any orthogonalization, which is known as not well scalable in a parallel computing environment. Our methods theoretically construct adequate feature space, which is, at most, a weighted alteration to the eigenspace of a normalized Laplacian matrix. We numerically hypothesize that the proposed methods are equivalent in clustering quality to the ideal Graph Signal Filter, which exploits the exact eigenvalue needed without expensive eigenvalue estimation. Numerical results show that the proposed methods outperform Power Iteration-based methods and Graph Signal Filter in clustering quality and computation cost. Unlike Power Iteration-based methods and Graph Signal Filter which require random signal input, our methods are able to utilize available initialization in the streaming graph scenarios. Additionally, numerical results show that our methods outperform ARPACK and are faster than LOBPCG in the streaming graph scenarios. We also present numerical results showing the scalability of our methods in multithreading and multiprocessing implementations to facilitate parallel spectral clustering.", "year": 2023, "venue": "arXiv.org", "authors": [ "Qiyuan Pang", "Haizhao Yang" ], "externalIds": { "DBLP": "journals/corr/abs-2305-10356", "ArXiv": "2305.10356", "DOI": "10.48550/arXiv.2305.10356", "CorpusId": 258740846 }, "url": "https://www.semanticscholar.org/paper/91901340ff4375dfd46cc1f7f288f2f28a2e1e13", "referenceCount": 50, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "Dish-TS: A General Paradigm for Alleviating Distribution Shift in Time Series Forecasting", "abstract": "The distribution shift in Time Series Forecasting (TSF), indicating series distribution changes over time, largely hinders the performance of TSF models. Existing works towards distribution shift in time series are mostly limited in the quantification of distribution and, more importantly, overlook the potential shift between lookback and horizon windows. To address above challenges, we systematically summarize the distribution shift in TSF into two categories. Regarding lookback windows as input-space and horizon windows as output-space, there exist (i) intra-space shift, that the distribution within the input-space keeps shifted over time, and (ii) inter-space shift, that the distribution is shifted between input-space and output-space. Then we introduce, Dish-TS, a general neural paradigm for alleviating distribution shift in TSF. Specifically, for better distribution estimation, we propose the coefficient net (Conet), which can be any neural architectures, to map input sequences into learnable distribution coefficients. To relieve intra-space and inter-space shift, we organize Dish-TS as a Dual-Conet framework to separately learn the distribution of input- and output-space, which naturally captures the distribution difference of two spaces. In addition, we introduce a more effective training strategy for intractable Conet learning. Finally, we conduct extensive experiments on several datasets coupled with different state-of-the-art forecasting models. Experimental results show Dish-TS consistently boosts them with a more than 20% average improvement. Code is available at https://github.com/weifantt/Dish-TS.", "year": 2023, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Wei Fan", "Pengyang Wang", "Dongkun Wang", "Dongjie Wang", "Yuanchun Zhou", "Yanjie Fu" ], "externalIds": { "DBLP": "journals/corr/abs-2302-14829", "ArXiv": "2302.14829", "DOI": "10.48550/arXiv.2302.14829", "CorpusId": 257232506 }, "url": "https://www.semanticscholar.org/paper/4366579b70e6c408e7fd621344bc869c1dda9d0f", "referenceCount": 33, "citationCount": 33, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Distributed Block Chebyshev-Davidson Algorithm for Parallel Spectral Clustering", "abstract": "We develop a distributed Block Chebyshev-Davidson algorithm to solve large-scale leading eigenvalue problems for spectral analysis in spectral clustering. First, the efficiency of the Chebyshev-Davidson algorithm relies on the prior knowledge of the eigenvalue spectrum, which could be expensive to estimate. This issue can be lessened by the analytic spectrum estimation of the Laplacian or normalized Laplacian matrices in spectral clustering, making the proposed algorithm very efficient for spectral clustering. Second, to make the proposed algorithm capable of analyzing big data, a distributed and parallel version has been developed with attractive scalability. The speedup by parallel computing is approximately equivalent to $\\sqrt{p}$, where $p$ denotes the number of processes. {Numerical results will be provided to demonstrate its efficiency in spectral clustering and scalability advantage over existing eigensolvers used for spectral clustering in parallel computing environments.}", "year": 2022, "venue": "Journal of Scientific Computing", "authors": [ "Qiyuan Pang", "Haizhao Yang" ], "externalIds": { "DBLP": "journals/jscic/PangY24", "ArXiv": "2212.04443", "DOI": "10.48550/arXiv.2212.04443", "CorpusId": 254408975 }, "url": "https://www.semanticscholar.org/paper/1cf24a50e159241469f108d11eca5bac0365852b", "referenceCount": 62, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Data-driven Kernel Subspace Clustering with Local Manifold Preservation", "abstract": "Kernel-based subspace clustering methods that can reveal the nonlinear structure of data are an emerging research topic. While advances have been made, existing methods suffer from one or both of the following shortcomings: (1) the predefined kernel determines their performance; (2) they may be vulnerable in arbitrary manifold subspace. In this paper, we propose a novel data-driven kernel subspace clustering model with local manifold preservation, named DKLM. Specifically, DKLM provides an explicit data-driven kernel learning strategy for learning kernel directly from the self-representation of data while satisfying the adaptive-weighting. Based on the kernel, DKLM allows preserving the local manifold structure of data through a kernel local manifold term in nonlinear space and encourages acquiring an affinity matrix with the optimal block diagonal. Various experiments on both synthetic data and real-world data demonstrate the effectiveness of our method.", "year": 2022, "venue": "2022 IEEE International Conference on Data Mining Workshops (ICDMW)", "authors": [ "Kunpeng Xu", "Lifei Chen", "Shengrui Wang" ], "externalIds": { "DBLP": "conf/icdm/XuCW22", "DOI": "10.1109/ICDMW58026.2022.00116", "CorpusId": 256668976 }, "url": "https://www.semanticscholar.org/paper/af8fe2a30a5607c4db4b2cb5026e819234ef576a", "referenceCount": 31, "citationCount": 8, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Temporal Action Segmentation: An Analysis of Modern Techniques", "abstract": "Temporal action segmentation (TAS) in videos aims at densely identifying video frames in minutes-long videos with multiple action classes. As a long-range video understanding task, researchers have developed an extended collection of methods and examined their performance using various benchmarks. Despite the rapid growth of TAS techniques in recent years, no systematic survey has been conducted in these sectors. This survey analyzes and summarizes the most significant contributions and trends. In particular, we first examine the task definition, common benchmarks, types of supervision, and prevalent evaluation measures. In addition, we systematically investigate two essential techniques of this topic, i.e., frame representation and temporal modeling, which have been studied extensively in the literature. We then conduct a thorough review of existing TAS works categorized by their levels of supervision and conclude our survey by identifying and emphasizing several research gaps.", "year": 2022, "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", "authors": [ "Guodong Ding", "Fadime Sener", "Angela Yao" ], "externalIds": { "DBLP": "journals/corr/abs-2210-10352", "ArXiv": "2210.10352", "DOI": "10.1109/TPAMI.2023.3327284", "CorpusId": 252992530, "PubMed": "37874699" }, "url": "https://www.semanticscholar.org/paper/8b8dd714d1db923cc3eabc33e21bca88aeeb77ac", "referenceCount": 206, "citationCount": 41, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "TimesNet: Temporal 2D-Variation Modeling for General Time Series Analysis", "abstract": "Time series analysis is of immense importance in extensive applications, such as weather forecasting, anomaly detection, and action recognition. This paper focuses on temporal variation modeling, which is the common key problem of extensive analysis tasks. Previous methods attempt to accomplish this directly from the 1D time series, which is extremely challenging due to the intricate temporal patterns. Based on the observation of multi-periodicity in time series, we ravel out the complex temporal variations into the multiple intraperiod- and interperiod-variations. To tackle the limitations of 1D time series in representation capability, we extend the analysis of temporal variations into the 2D space by transforming the 1D time series into a set of 2D tensors based on multiple periods. This transformation can embed the intraperiod- and interperiod-variations into the columns and rows of the 2D tensors respectively, making the 2D-variations to be easily modeled by 2D kernels. Technically, we propose the TimesNet with TimesBlock as a task-general backbone for time series analysis. TimesBlock can discover the multi-periodicity adaptively and extract the complex temporal variations from transformed 2D tensors by a parameter-efficient inception block. Our proposed TimesNet achieves consistent state-of-the-art in five mainstream time series analysis tasks, including short- and long-term forecasting, imputation, classification, and anomaly detection. Code is available at this repository: https://github.com/thuml/TimesNet.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Haixu Wu", "Teng Hu", "Yong Liu", "Hang Zhou", "Jianmin Wang", "Mingsheng Long" ], "externalIds": { "DBLP": "conf/iclr/WuHLZ0L23", "ArXiv": "2210.02186", "DOI": "10.48550/arXiv.2210.02186", "CorpusId": 252715491 }, "url": "https://www.semanticscholar.org/paper/47696145b3f88c4cc3f3c22035286b5d7ebce09d", "referenceCount": 52, "citationCount": 346, "influentialCitationCount": 72, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Dynamic Cross-sectional Regime Identification for Financial Market Prediction", "abstract": "We investigate issues related to dynamic cross-sectional regime identification for financial market prediction. A financial market can be viewed as an ecosystem regulated by regimes that may switch at different time points. In most existing regime-based prediction models, regimes can only switch, according to a static transition probability matrix, among a fixed set of regimes identified on training data due to the fact that they lack in mechanism of identifying new regimes on test data. This prevents them from being effective as the financial markets are time-evolving and may fall into a new regime at any future time. Moreover, most of them only handle single time series, and are not capable of dealing with multiple time series. These shortcomings prompted us to devise a dynamic cross-sectional regime identification model for time series prediction. The new model is defined on a multi-time-series system, with time-varying transition probabilities, and can identify new cross-sectional regimes dynamically from the time-evolving financial market. Experimental results on real-world financial datasets illustrate the promising performance and suitability of our model.", "year": 2022, "venue": "Annual International Computer Software and Applications Conference", "authors": [ "Rongbo Chen", "Kunpeng Xun", "Jean-Marc Patenaude", "Shengrui Wang" ], "externalIds": { "DBLP": "conf/compsac/ChenXPW22", "DOI": "10.1109/COMPSAC54236.2022.00049", "CorpusId": 251473231 }, "url": "https://www.semanticscholar.org/paper/1b31e72197709e764948dc47503f11319dce5883", "referenceCount": 0, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Non-stationary Transformers: Exploring the Stationarity in Time Series Forecasting", "abstract": "Transformers have shown great power in time series forecasting due to their global-range modeling ability. However, their performance can degenerate terribly on non-stationary real-world data in which the joint distribution changes over time. Previous studies primarily adopt stationarization to attenuate the non-stationarity of original series for better predictability. But the stationarized series deprived of inherent non-stationarity can be less instructive for real-world bursty events forecasting. This problem, termed over-stationarization in this paper, leads Transformers to generate indistinguishable temporal attentions for different series and impedes the predictive capability of deep models. To tackle the dilemma between series predictability and model capability, we propose Non-stationary Transformers as a generic framework with two interdependent modules: Series Stationarization and De-stationary Attention. Concretely, Series Stationarization unifies the statistics of each input and converts the output with restored statistics for better predictability. To address the over-stationarization problem, De-stationary Attention is devised to recover the intrinsic non-stationary information into temporal dependencies by approximating distinguishable attentions learned from raw series. Our Non-stationary Transformers framework consistently boosts mainstream Transformers by a large margin, which reduces MSE by 49.43% on Transformer, 47.34% on Informer, and 46.89% on Reformer, making them the state-of-the-art in time series forecasting. Code is available at this repository: https://github.com/thuml/Nonstationary_Transformers.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Yong Liu", "Haixu Wu", "Jianmin Wang", "Mingsheng Long" ], "externalIds": { "DBLP": "conf/nips/LiuWWL22", "ArXiv": "2205.14415", "CorpusId": 252968420 }, "url": "https://www.semanticscholar.org/paper/8064d3873c646dc9ff949d72c54c634a906fc092", "referenceCount": 47, "citationCount": 195, "influentialCitationCount": 21, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Temporal Domain Generalization with Drift-Aware Dynamic Neural Network", "abstract": "Temporal domain generalization is a promising yet extremely challenging area where the goal is to learn models under temporally changing data distributions and generalize to unseen data distributions following the trends of the change. The advancement of this area is challenged by: 1) characterizing data distribution drift and its impacts on models, 2) expressiveness in tracking the model dynamics, and 3) theoretical guarantee on the performance. To address them, we propose a Temporal Domain Generalization with Drift-Aware Dynamic Neural Network (DRAIN) framework. Specifically, we formulate the problem into a Bayesian framework that jointly models the relation between data and model dynamics. We then build a recurrent graph generation scenario to characterize the dynamic graph-structured neural networks learned across different time points. It captures the temporal drift of model parameters and data distributions and can predict models in the future without the presence of future data. In addition, we explore theoretical guarantees of the model performance under the challenging temporal DG setting and provide theoretical analysis, including uncertainty and generalization error. Finally, extensive experiments on several real-world benchmarks with temporal drift demonstrate the effectiveness and efficiency of the proposed method.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Guangji Bai", "Ling Chen", "Liang Zhao" ], "externalIds": { "ArXiv": "2205.10664", "DBLP": "conf/iclr/BaiL023", "DOI": "10.48550/arXiv.2205.10664", "CorpusId": 248987351 }, "url": "https://www.semanticscholar.org/paper/85a7c8523f5e1688896e9da7f36f081d5db21876", "referenceCount": 47, "citationCount": 16, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Triformer: Triangular, Variable-Specific Attentions for Long Sequence Multivariate Time Series Forecasting-Full Version", "abstract": "A variety of real-world applications rely on far future information to make decisions, thus calling for efficient and accurate long sequence multivariate time series forecasting. While recent attention-based forecasting models show strong abilities in\n\ncapturing long-term dependencies, they still suffer from two key limitations. First, canonical self attention has a quadratic complexity w.r.t. the input time series length, thus falling short in efficiency. Second, different variables’ time series often have\n\ndistinct temporal dynamics, which existing studies fail to capture, as they use the same model parameter space, e.g., projection matrices, for all variables’ time series, thus falling short in accuracy. To ensure high efficiency and accuracy, we propose Triformer, a triangular, variable-specific attention. (i) Linear complexity: we introduce a novel patch attention with linear complexity. When stacking multiple layers of the patch attentions, a triangular structure is proposed such that the\n\nlayer sizes shrink exponentially, thus maintaining linear complexity. (ii) Variable-specific parameters: we propose a light-weight method to enable distinct sets of model parameters for different variables’ time series to enhance accuracy\n\nwithout compromising efficiency and memory usage. Strong empirical evidence on four datasets from multiple domains justifies our design choices, and it demonstrates that Triformer outperforms state-of-the-art methods w.r.t. both accuracy and\n\nefficiency. Source code is publicly available at https://github.com/razvanc92/triformer.", "year": 2022, "venue": "International Joint Conference on Artificial Intelligence", "authors": [ "Razvan-Gabriel Cirstea", "Chenjuan Guo", "B. Yang", "Tung Kieu", "Xuanyi Dong", "Shirui Pan" ], "externalIds": { "DBLP": "conf/ijcai/CirsteaG0KDP22", "ArXiv": "2204.13767", "DOI": "10.48550/arXiv.2204.13767", "CorpusId": 248476444 }, "url": "https://www.semanticscholar.org/paper/00f4f0fe0d8f5e03f52b1ba7dda8c7e94f69de17", "referenceCount": 49, "citationCount": 75, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning Fast and Slow for Online Time Series Forecasting", "abstract": "The fast adaptation capability of deep neural networks in non-stationary environments is critical for online time series forecasting. Successful solutions require handling changes to new and recurring patterns. However, training deep neural forecaster on the fly is notoriously challenging because of their limited ability to adapt to non-stationary environments and the catastrophic forgetting of old knowledge. In this work, inspired by the Complementary Learning Systems (CLS) theory, we propose Fast and Slow learning Networks (FSNet), a holistic framework for online time-series forecasting to simultaneously deal with abrupt changing and repeating patterns. Particularly, FSNet improves the slowly-learned backbone by dynamically balancing fast adaptation to recent changes and retrieving similar old knowledge. FSNet achieves this mechanism via an interaction between two complementary components of an adapter to monitor each layer's contribution to the lost, and an associative memory to support remembering, updating, and recalling repeating events. Extensive experiments on real and synthetic datasets validate FSNet's efficacy and robustness to both new and recurring patterns. Our code is available at \\url{https://github.com/salesforce/fsnet}.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Quang Pham", "Chenghao Liu", "Doyen Sahoo", "S. Hoi" ], "externalIds": { "DBLP": "journals/corr/abs-2202-11672", "ArXiv": "2202.11672", "CorpusId": 247058853 }, "url": "https://www.semanticscholar.org/paper/4d755a5a66a8c46b722b44613788085191524e11", "referenceCount": 75, "citationCount": 23, "influentialCitationCount": 6, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "A Multi-view Kernel Clustering framework for Categorical sequences", "abstract": null, "year": 2022, "venue": "Expert systems with applications", "authors": [ "Kunpeng Xu", "Lifei Chen", "Shengrui Wang" ], "externalIds": { "DBLP": "journals/eswa/XuCW22", "DOI": "10.1016/j.eswa.2022.116637", "CorpusId": 247078802 }, "url": "https://www.semanticscholar.org/paper/a1aeff793e7ac7eb60a4c7379409c12e2f9d41cd", "referenceCount": 55, "citationCount": 16, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A time-series clustering algorithm for analyzing the changes of mobility pattern caused by COVID-19", "abstract": "The coronavirus (COVID-19) has spread to more than 135 countries and continues to spread. The virus sickened more than 90,201,652 people until January 2021 and caused 1,937,091 deaths in the world. So far, social distancing plays a vital role in controlling the coronavirus. Governments issued restrictions on traveling, institutions cancel gatherings, and citizens socially distance themselves to limit the spread of the virus. This paper aims to develop a novel time-series clustering algorithm to analyze the changes in mobility patterns caused by the COVID-19. This work will produce broader impacts in many areas, such as helping local governments locate the medical facilities and improving the social distancing recommendations for infectious disease control.", "year": 2021, "venue": "HANIMOB@SIGSPATIAL", "authors": [ "Zi-Wu Zhang", "Diya Li", "Zhe Zhang", "Nicholas Duffield" ], "externalIds": { "DBLP": "conf/gis/ZhangLZD21", "DOI": "10.1145/3486637.3489489", "CorpusId": 240075224 }, "url": "https://www.semanticscholar.org/paper/e6a191f87e5ea74bce3f2cf1b5580a433ee2c1a4", "referenceCount": 14, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Self-Expressive Kernel Subspace Clustering Algorithm for Categorical Data with Embedded Feature Selection", "abstract": "Kernel clustering of categorical data is a useful tool to process the separable datasets and has been employed in many disciplines. Despite recent efforts, existing methods for kernel clustering remain a significant challenge due to the assumption of feature independence and equal weights. In this study, we propose a self-expressive kernel subspace clustering algorithm for categorical data (SKSCC) using the self-expressive kernel density estimation (SKDE) scheme, as well as a new feature-weighted non-linear similarity measurement. In the SKSCC algorithm, we propose an effective non-linear optimization method to solve the clustering algorithm’s objective function, which not only considers the relationship between attributes in a non-linear space but also assigns a weight to each attribute in the algorithm to measure the degree of correlation. A series of experiments on some widely used synthetic and real-world datasets demonstrated the better effectiveness and efficiency of the proposed algorithm compared with other state-of-the-art methods, in terms of non-linear relationship exploration among attributes.", "year": 2021, "venue": "Mathematics", "authors": [ "Hui Chen", "Kunpeng Xu", "Lifei Chen", "Q. Jiang" ], "externalIds": { "MAG": "3184306616", "DOI": "10.3390/MATH9141680", "CorpusId": 237699607 }, "url": "https://www.semanticscholar.org/paper/ede92b30c4b3cd883d936aee0e697ad36f159093", "referenceCount": 50, "citationCount": 4, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Automatic Sequential Pattern Mining in Data Streams", "abstract": "Given a large volume of multi-dimensional data streams, such as that produced by IoT applications, finance and online web-click logs, how can we discover typical patterns and compress them into compact models? In addition, how can we incrementally distinguish multiple patterns while considering the information obtained from a pattern found in a streaming setting? In this paper, we propose a streaming algorithm, namely StreamScope, that is designed to find intuitive patterns efficiently from event streams evolving over time. Our proposed method has the following properties: (a) it is effective: it operates on semi-infinite collections of co-evolving streams and summarizes all the streams into a set of multiple discrete segments grouped by their similarities. (b) it is automatic: it automatically and incrementally recognizes such patterns and generates models for each of them if necessary; (c) it is scalable: the complexity of our method does not depend on the length of the data streams. Our extensive experiments on real data streams demonstrate that StreamScope can find meaningful patterns and achieve great improvements in terms of computational time and memory space over its full batch method competitors.", "year": 2019, "venue": "International Conference on Information and Knowledge Management", "authors": [ "Kouki Kawabata", "Yasuko Matsubara", "Yasushi Sakurai" ], "externalIds": { "DBLP": "conf/cikm/KawabataMS19", "MAG": "2987028103", "DOI": "10.1145/3357384.3358002", "CorpusId": 207757662 }, "url": "https://www.semanticscholar.org/paper/a9e37e77fe69f5375adbd67ee11ff6b5821f6180", "referenceCount": 34, "citationCount": 8, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Dynamic Modeling and Forecasting of Time-evolving Data Streams", "abstract": "Given a large, semi-infinite collection of co-evolving data sequences (e.g., IoT/sensor streams), which contains multiple distinct dynamic time-series patterns, our aim is to incrementally monitor current dynamic patterns and forecast future behavior. We present an intuitive model, namely OrbitMap, which provides a good summary of time-series evolution in streams. We also propose a scalable and effective algorithm for fitting and forecasting time-series data streams. Our method is designed as a dynamic, interactive and flexible system, and is based on latent non-linear differential equations. Our proposed method has the following advantages: (a) It is effective: it captures important time-evolving patterns in data streams and enables real-time, long-range forecasting; (b) It is general: our model is general and practical and can be applied to various types of time-evolving data streams; (c) It is scalable: our algorithm does not depend on data size, and thus is applicable to very large sequences. Extensive experiments on real datasets demonstrate that OrbitMap makes long-range forecasts, and consistently outperforms the best existing state-of-the-art methods as regards accuracy and execution speed.", "year": 2019, "venue": "Knowledge Discovery and Data Mining", "authors": [ "Yasuko Matsubara", "Yasushi Sakurai" ], "externalIds": { "MAG": "2951011582", "DBLP": "conf/kdd/MatsubaraS19", "DOI": "10.1145/3292500.3330947", "CorpusId": 196182210 }, "url": "https://www.semanticscholar.org/paper/f4569c606d93628f7937dcce4f4c41487d73a258", "referenceCount": 43, "citationCount": 17, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Cogra: Concept-Drift-Aware Stochastic Gradient Descent for Time-Series Forecasting", "abstract": "We approach the time-series forecasting problem in the presence of concept drift by automatic learning rate tuning of stochastic gradient descent (SGD). The SGD-based approach is preferable to other concept drift algorithms in that it can be applied to any model and it can keep learning efficiently whilst predicting online. Among a number of SGD algorithms, the variance-based SGD (vSGD) can successfully handle concept drift by automatic learning rate tuning, which is reduced to an adaptive mean estimation problem. However, its performance is still limited because of its heuristic mean estimator. In this paper, we present a concept-drift-aware stochastic gradient descent (Cogra), equipped with more theoretically-sound mean estimator called sequential mean tracker (SMT). Our key contribution is that we define a goodness criterion for the mean estimators; SMT is designed to be optimal according to this criterion. As a result of comprehensive experiments, we find that (i) our SMT can estimate the mean better than vSGD’s estimator in the presence of concept drift, and (ii) in terms of predictive performance, Cogra reduces the predictive loss by 16–67% for real-world datasets, indicating that SMT improves the prediction accuracy significantly.", "year": 2019, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Kohei Miyaguchi", "Hiroshi Kajino" ], "externalIds": { "DBLP": "conf/aaai/MiyaguchiK19", "MAG": "2905470007", "DOI": "10.1609/AAAI.V33I01.33014594", "CorpusId": 59209915 }, "url": "https://www.semanticscholar.org/paper/f4c6fc94932b8010bec99b279e762c148257a354", "referenceCount": 23, "citationCount": 16, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The FacT: Taming Latent Factor Models for Explainability with Factorization Trees", "abstract": "Latent factor models have achieved great success in personalized recommendations, but they are also notoriously difficult to explain. In this work, we integrate regression trees to guide the learning of latent factor models for recommendation, and use the learnt tree structure to explain the resulting latent factors. Specifically, we build regression trees on users and items respectively with user-generated reviews, and associate a latent profile to each node on the trees to represent users and items. With the growth of regression tree, the latent factors are gradually refined under the regularization imposed by the tree structure. As a result, we are able to track the creation of latent profiles by looking into the path of each factor on regression trees, which thus serves as an explanation for the resulting recommendations. Extensive experiments on two large collections of Amazon and Yelp reviews demonstrate the advantage of our model over several competitive baseline algorithms. Besides, our extensive user study also confirms the practical value of explainable recommendations generated by our model.", "year": 2019, "venue": "Annual International ACM SIGIR Conference on Research and Development in Information Retrieval", "authors": [ "Yiyi Tao", "Yiling Jia", "Nan Wang", "Hongning Wang" ], "externalIds": { "ArXiv": "1906.02037", "MAG": "2963683654", "DBLP": "journals/corr/abs-1906-02037", "DOI": "10.1145/3331184.3331244", "CorpusId": 174799119 }, "url": "https://www.semanticscholar.org/paper/948133add88a3ced3f0810d03dbcf2baaa07f69e", "referenceCount": 44, "citationCount": 48, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "A Self-representation Model for Robust Clustering of Categorical Sequences", "abstract": null, "year": 2018, "venue": "APWeb/WAIM Workshops", "authors": [ "Kunpeng Xu", "Lifei Chen", "Shengrui Wang", "Beizhan Wang" ], "externalIds": { "DBLP": "conf/apweb/XuCWW18", "MAG": "2896348621", "DOI": "10.1007/978-3-030-01298-4_2", "CorpusId": 53028496 }, "url": "https://www.semanticscholar.org/paper/f4ba0c538810096779ea716756809a0f107602a1", "referenceCount": 15, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "State Space Models for Forecasting Water Quality Variables: An Application in Aquaculture Prawn Farming", "abstract": "A novel approach to deterministic modelling of diurnal water quality parameters in aquaculture prawn ponds is presented. The purpose is to provide assistance to prawn pond farmers in monitoring pond water quality with limited data. Obtaining sufficient water quality data is generally a challenge in commercial prawn farming applications. Farmers can sustain large losses in their crop if water quality is not well managed. The model presented provides a means for modelling and forecasting various water quality parameters. It is inspired by data dynamics and does not rely on physical ecosystem modelling. The model is constructed within the Bayesian filtering framework. The Kalman filter and the unscented Kalman filer are applied for inference. The results demonstrate generalisability to both variables and environments. The ability for short term forecasting with mean absolute percentage errors between 0.5% and 11% is demonstrated.", "year": 2018, "venue": "Knowledge Discovery and Data Mining", "authors": [ "J. Dabrowski", "Ashfaqur Rahman", "A. George", "Stuart Arnold", "John McCulloch" ], "externalIds": { "DBLP": "conf/kdd/DabrowskiRGAM18", "MAG": "2809078865", "DOI": "10.1145/3219819.3219841", "CorpusId": 50776443 }, "url": "https://www.semanticscholar.org/paper/1e7101d0b99233753a457ff7ac07b99a27290c60", "referenceCount": 29, "citationCount": 28, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Environmental Science" ] }, { "title": "Toeplitz Inverse Covariance-Based Clustering of Multivariate Time Series Data", "abstract": "Subsequence clustering of multivariate time series is a useful tool for discovering repeated patterns in temporal data. Once these patterns have been discovered, seemingly complicated datasets can be interpreted as a temporal sequence of only a small number of states, or clusters. For example, raw sensor data from a fitness-tracking application can be expressed as a timeline of a select few actions (i.e., walking, sitting, running). However, discovering these patterns is challenging because it requires simultaneous segmentation and clustering of the time series. Furthermore, interpreting the resulting clusters is difficult, especially when the data is high-dimensional. Here we propose a new method of model-based clustering, which we call Toeplitz Inverse Covariance-based Clustering (TICC). Each cluster in the TICC method is defined by a correlation network, or Markov random field (MRF), characterizing the interdependencies between different observations in a typical subsequence of that cluster. Based on this graphical representation, TICC simultaneously segments and clusters the time series data. We solve the TICC problem through alternating minimization, using a variation of the expectation maximization (EM) algorithm. We derive closed-form solutions to efficiently solve the two resulting subproblems in a scalable way, through dynamic programming and the alternating direction method of multipliers (ADMM), respectively. We validate our approach by comparing TICC to several state-of-the-art baselines in a series of synthetic experiments, and we then demonstrate on an automobile sensor dataset how TICC can be used to learn interpretable clusters in real-world scenarios.", "year": 2017, "venue": "Knowledge Discovery and Data Mining", "authors": [ "David Hallac", "Sagar Vare", "Stephen P. Boyd", "J. Leskovec" ], "externalIds": { "MAG": "2871311606", "DBLP": "conf/kdd/HallacVBL17", "ArXiv": "1706.03161", "DOI": "10.1145/3097983.3098060", "CorpusId": 3345975, "PubMed": "29770257" }, "url": "https://www.semanticscholar.org/paper/00e589a575526a0dca7b458c16bbb1c27524666f", "referenceCount": 55, "citationCount": 239, "influentialCitationCount": 40, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics", "Medicine" ] }, { "title": "AutoPlait: automatic mining of co-evolving time sequences", "abstract": "Given a large collection of co-evolving multiple time-series, which contains an unknown number of patterns of different durations, how can we efficiently and effectively find typical patterns and the points of variation? How can we statistically summarize all the sequences, and achieve a meaningful segmentation? In this paper we present AutoPlait, a fully automatic mining algorithm for co-evolving time sequences. Our method has the following properties: (a) effectiveness: it operates on large collections of time-series, and finds similar segment groups that agree with human intuition; (b) scalability: it is linear with the input size, and thus scales up very well; and (c) AutoPlait is parameter-free, and requires no user intervention, no prior training, and no parameter tuning. Extensive experiments on 67GB of real datasets demonstrate that AutoPlait does indeed detect meaningful patterns correctly, and it outperforms state-of-the-art competitors as regards accuracy and speed: AutoPlait achieves near-perfect, over 95% precision and recall, and it is up to 472 times faster than its competitors.", "year": 2014, "venue": "SIGMOD Conference", "authors": [ "Yasuko Matsubara", "Yasushi Sakurai", "C. Faloutsos" ], "externalIds": { "MAG": "2077760583", "DBLP": "conf/sigmod/MatsubaraSF14", "DOI": "10.1145/2588555.2588556", "CorpusId": 6118391 }, "url": "https://www.semanticscholar.org/paper/27cddc26a77d47d5f0e5bf4cb9f339a55d2fda50", "referenceCount": 37, "citationCount": 135, "influentialCitationCount": 17, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Framework for Clustering Evolving Data Streams", "abstract": null, "year": 2003, "venue": "Very Large Data Bases Conference", "authors": [ "C. Aggarwal", "Jiawei Han", "Jianyong Wang", "Philip S. Yu" ], "externalIds": { "MAG": "2170936641", "DBLP": "conf/vldb/AggarwalHWY03", "DOI": "10.1016/B978-012722442-8/50016-1", "CorpusId": 2354576 }, "url": "https://www.semanticscholar.org/paper/cbc2b5ada23f785b7897a498a2697c4109be616f", "referenceCount": 14, "citationCount": 1958, "influentialCitationCount": 249, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Generative Learning for Financial Time Series with Irregular and Scale-Invariant Patterns", "abstract": null, "year": 2024, "venue": "International Conference on Learning Representations", "authors": [ "Hongbin Huang", "Minghua Chen", "Xiao Qiao" ], "externalIds": { "DBLP": "conf/iclr/HuangCQ24", "CorpusId": 268930903 }, "url": "https://www.semanticscholar.org/paper/001d5eac903cee1be4b04b5fd6011034f18be3d1", "referenceCount": 41, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "RHINE: A Regime-Switching Model with Nonlinear Representation for Discovering and Forecasting Regimes in Financial Markets", "abstract": null, "year": 2024, "venue": "SDM", "authors": [ "Kunpeng Xu", "Lifei Chen", "Jean-Marc Patenaude", "Shengrui Wang" ], "externalIds": { "DBLP": "conf/sdm/0002CPW24", "DOI": "10.1137/1.9781611978032.61", "CorpusId": 270925422 }, "url": "https://www.semanticscholar.org/paper/3323617403becb16226f3a10b8e54cfe68544e3b", "referenceCount": 0, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Kernel Representation Learning with Dynamic Regime Discovery for Time Series Forecasting", "abstract": null, "year": 2024, "venue": "Pacific-Asia Conference on Knowledge Discovery and Data Mining", "authors": [ "Kunpeng Xu", "Lifei Chen", "Jean-Marc Patenaude", "Shengrui Wang" ], "externalIds": { "DBLP": "conf/pakdd/XuCPW24", "DOI": "10.1007/978-981-97-2266-2_20", "CorpusId": 269565880 }, "url": "https://www.semanticscholar.org/paper/2cd5df5736e8233eb9c6b27b6754f7c9b02212da", "referenceCount": 0, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Evolving Standardization for Continual Domain Generalization over Temporal Drift", "abstract": "The capability of generalizing to out-of-distribution data is crucial for the deployment of machine learning models in the real world. Existing domain generalization (DG) mainly embarks on offline and discrete scenarios, where multiple source domains are simultaneously accessible and the distribution shift among domains is abrupt and violent. Nevertheless, such setting may not be universally applicable to all real-world applications, as there are cases where the data distribution gradually changes over time due to various factors, e.g., the process of aging. Additionally, as the domain constantly evolves, new domains will continually emerge. Re-training and updating models with both new and previous domains using existing DG meth-ods can be resource-intensive and inefficient. Therefore, in this paper, we present a problem formulation for Continual Domain Generalization over Temporal Drift (CDGTD). CDGTD addresses the challenge of gradually shifting data distributions over time, where domains arrive sequentially and models can only access the data of the current domain. The goal is to generalize to unseen domains that are not too far into the future. To this end, we propose an Evolving Standardization (EvoS) method, which characterizes the evolving pattern of feature distribution and mitigates the distribution shift by standardizing features with generated statistics of corresponding domain. Specifically, inspired by the powerful ability of trans-formers to model sequence relations, we design a multi-scale attention module (MSAM) to learn the evolving pattern under sliding time windows of different lengths. MSAM can generate statistics of current domain based on the statistics of previous domains and the learned evolving pattern. Experiments on multiple real-world datasets including images and texts validate the efficacy of our EvoS.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Mixue Xie", "Shuang Li", "Longhui Yuan", "Chi Harold Liu", "Zehui Dai" ], "externalIds": { "DBLP": "conf/nips/Xie0YLD23", "CorpusId": 268042693 }, "url": "https://www.semanticscholar.org/paper/1ad31a5d64733f13e2f1d747bc7da053d09aa901", "referenceCount": 61, "citationCount": 4, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Clustering-Based Cross-Sectional Regime Identification for Financial Market Forecasting", "abstract": null, "year": 2022, "venue": "International Conference on Database and Expert Systems Applications", "authors": [ "Rongbo Chen", "Mingxuan Sun", "Kunpeng Xu", "Jean-Marc Patenaude", "Shengrui Wang" ], "externalIds": { "DBLP": "conf/dexa/ChenSXPW22", "DOI": "10.1007/978-3-031-12426-6_1", "CorpusId": 251228451 }, "url": "https://www.semanticscholar.org/paper/d38bd6e4c20fdcf1facb3bbab9ac4cb9574729c6", "referenceCount": 0, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Forecasting time series with complex seasonal patterns using exponential smoothing", "abstract": "A new innovations state space modeling framework, incorporating Box-Cox transformations, Fourier series with time varying coefficients and ARMA error correction, is introduced for forecasting complex seasonal time series that cannot be handled using existing forecasting models. Such complex time series include time series with multiple seasonal periods, high frequency seasonality, non-integer seasonality and dual-calendar effects. Our new modelling framework provides an alternative to existing exponential smoothing models, and is shown to have many advantages. The methods for initialization and estimation, including likelihood evaluation, are presented, and analytical expressions for point forecasts and interval predictions under the assumption of Gaussian errors are derived, leading to a simple, comprehensible approach to forecasting complex seasonal time series. Our trigonometric formulation is also presented as a means of decomposing complex seasonal time series, which cannot be decomposed using any of the existing decomposition methods. The approach is useful in a broad range of applications, and we illustrate its versatility in three empirical studies where it demonstrates excellent forecasting performance over a range of prediction horizons. In addition, we show that our trigonometric decomposition leads to the identification and extraction of seasonal components, which are otherwise not apparent in the time series plot itself.", "year": 2009, "venue": "", "authors": [ "Alysha M. De Livera", "Rob J Hyndman", "R. Snyder" ], "externalIds": { "CorpusId": 3245836 }, "url": "https://www.semanticscholar.org/paper/e8c365dbd79126cc819d81839bdd2e5dcb5ff2ae", "referenceCount": 40, "citationCount": 820, "influentialCitationCount": 89, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Density-Based Clustering over an Evolving Data Stream with Noise", "abstract": "Clustering is an important task in mining evolving data streams. Beside the limited memory and one-pass constraints, the nature of evolving data streams implies the following requirements for stream clustering: no assumption on the number of clusters, discovery of clusters with arbitrary shape and ability to handle outliers. While a lot of clustering algorithms for data streams have been proposed, they offer no solution to the combination of these requirements. In this paper, we present DenStream, a new approach for discovering clusters in an evolving data stream. The “dense” micro-cluster (named core-micro-cluster) is introduced to summarize the clusters with arbitrary shape, while the potential core-micro-cluster and outlier micro-cluster structures are proposed to maintain and distinguish the potential clusters and outliers. A novel pruning strategy is designed based on these concepts, which guarantees the precision of the weights of the micro-clusters with limited memory. Our performance study over a number of real and synthetic data sets demonstrates the effectiveness and efficiency of our method.", "year": 2006, "venue": "SDM", "authors": [ "Feng Cao", "M. Ester", "Weining Qian", "Aoying Zhou" ], "externalIds": { "MAG": "182707955", "DBLP": "conf/sdm/CaoEQZ06", "DOI": "10.1137/1.9781611972764.29", "CorpusId": 5939988 }, "url": "https://www.semanticscholar.org/paper/371db75f3f668317af075767b0630c6c93288212", "referenceCount": 22, "citationCount": 1033, "influentialCitationCount": 157, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] } ] }, "Exploring Token Pruning in Vision State Space Models": { "paper_title": "Exploring Token Pruning in Vision State Space Models", "arxiv_id": "2409.18962v1", "keyword": "transformer", "authors": [ "Zheng Zhan", "Zhenglun Kong", "Yifan Gong", "Yushu Wu", "Zichong Meng", "Hangyu Zheng", "Xuan Shen", "Stratis Ioannidis", "Wei Niu", "Pu Zhao", "Yanzhi Wang" ], "references": [ { "title": "PlainMamba: Improving Non-Hierarchical Mamba in Visual Recognition", "abstract": "We present PlainMamba: a simple non-hierarchical state space model (SSM) designed for general visual recognition. The recent Mamba model has shown how SSMs can be highly competitive with other architectures on sequential data and initial attempts have been made to apply it to images. In this paper, we further adapt the selective scanning process of Mamba to the visual domain, enhancing its ability to learn features from two-dimensional images by (i) a continuous 2D scanning process that improves spatial continuity by ensuring adjacency of tokens in the scanning sequence, and (ii) direction-aware updating which enables the model to discern the spatial relations of tokens by encoding directional information. Our architecture is designed to be easy to use and easy to scale, formed by stacking identical PlainMamba blocks, resulting in a model with constant width throughout all layers. The architecture is further simplified by removing the need for special tokens. We evaluate PlainMamba on a variety of visual recognition tasks, achieving performance gains over previous non-hierarchical models and is competitive with hierarchical alternatives. For tasks requiring high-resolution inputs, in particular, PlainMamba requires much less computing while maintaining high performance. Code and models are available at: https://github.com/ChenhongyiYang/PlainMamba .", "year": 2024, "venue": "arXiv.org", "authors": [ "Chenhongyi Yang", "Zehui Chen", "Miguel Espinosa", "Linus Ericsson", "Zhenyu Wang", "Jiaming Liu", "Elliot J. Crowley" ], "externalIds": { "ArXiv": "2403.17695", "DBLP": "journals/corr/abs-2403-17695", "DOI": "10.48550/arXiv.2403.17695", "CorpusId": 268692121 }, "url": "https://www.semanticscholar.org/paper/62ac3ef81e54e1d1930fb5980b236345ee2e4f32", "referenceCount": 107, "citationCount": 34, "influentialCitationCount": 9, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The Hidden Attention of Mamba Models", "abstract": "The Mamba layer offers an efficient selective state space model (SSM) that is highly effective in modeling multiple domains, including NLP, long-range sequence processing, and computer vision. Selective SSMs are viewed as dual models, in which one trains in parallel on the entire sequence via an IO-aware parallel scan, and deploys in an autoregressive manner. We add a third view and show that such models can be viewed as attention-driven models. This new perspective enables us to empirically and theoretically compare the underlying mechanisms to that of the self-attention layers in transformers and allows us to peer inside the inner workings of the Mamba model with explainability methods. Our code is publicly available.", "year": 2024, "venue": "arXiv.org", "authors": [ "Ameen Ali", "Itamar Zimerman", "Lior Wolf" ], "externalIds": { "ArXiv": "2403.01590", "DBLP": "journals/corr/abs-2403-01590", "DOI": "10.48550/arXiv.2403.01590", "CorpusId": 268248520 }, "url": "https://www.semanticscholar.org/paper/26e6cd121c5fdb147df83cb848e4813c926737c8", "referenceCount": 84, "citationCount": 27, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "VMamba: Visual State Space Model", "abstract": "Designing computationally efficient network architectures persists as an ongoing necessity in computer vision. In this paper, we transplant Mamba, a state-space language model, into VMamba, a vision backbone that works in linear time complexity. At the core of VMamba lies a stack of Visual State-Space (VSS) blocks with the 2D Selective Scan (SS2D) module. By traversing along four scanning routes, SS2D helps bridge the gap between the ordered nature of 1D selective scan and the non-sequential structure of 2D vision data, which facilitates the gathering of contextual information from various sources and perspectives. Based on the VSS blocks, we develop a family of VMamba architectures and accelerate them through a succession of architectural and implementation enhancements. Extensive experiments showcase VMamba's promising performance across diverse visual perception tasks, highlighting its advantages in input scaling efficiency compared to existing benchmark models. Source code is available at https://github.com/MzeroMiko/VMamba.", "year": 2024, "venue": "arXiv.org", "authors": [ "Yue Liu", "Yunjie Tian", "Yuzhong Zhao", "Hongtian Yu", "Lingxi Xie", "Yaowei Wang", "Qixiang Ye", "Yunfan Liu" ], "externalIds": { "ArXiv": "2401.10166", "DBLP": "journals/corr/abs-2401-10166", "DOI": "10.48550/arXiv.2401.10166", "CorpusId": 267035250 }, "url": "https://www.semanticscholar.org/paper/b24e899ec0f77eef2fc87a9b8e50516367aa1f97", "referenceCount": 87, "citationCount": 253, "influentialCitationCount": 81, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Vision Mamba: Efficient Visual Representation Learning with Bidirectional State Space Model", "abstract": "Recently the state space models (SSMs) with efficient hardware-aware designs, i.e., the Mamba deep learning model, have shown great potential for long sequence modeling. Meanwhile building efficient and generic vision backbones purely upon SSMs is an appealing direction. However, representing visual data is challenging for SSMs due to the position-sensitivity of visual data and the requirement of global context for visual understanding. In this paper, we show that the reliance on self-attention for visual representation learning is not necessary and propose a new generic vision backbone with bidirectional Mamba blocks (Vim), which marks the image sequences with position embeddings and compresses the visual representation with bidirectional state space models. On ImageNet classification, COCO object detection, and ADE20k semantic segmentation tasks, Vim achieves higher performance compared to well-established vision transformers like DeiT, while also demonstrating significantly improved computation&memory efficiency. For example, Vim is 2.8$\\times$ faster than DeiT and saves 86.8% GPU memory when performing batch inference to extract features on images with a resolution of 1248$\\times$1248. The results demonstrate that Vim is capable of overcoming the computation&memory constraints on performing Transformer-style understanding for high-resolution images and it has great potential to be the next-generation backbone for vision foundation models. Code is available at https://github.com/hustvl/Vim.", "year": 2024, "venue": "International Conference on Machine Learning", "authors": [ "Lianghui Zhu", "Bencheng Liao", "Qian Zhang", "Xinlong Wang", "Wenyu Liu", "Xinggang Wang" ], "externalIds": { "DBLP": "conf/icml/ZhuL0W0W24", "ArXiv": "2401.09417", "DOI": "10.48550/arXiv.2401.09417", "CorpusId": 267028142 }, "url": "https://www.semanticscholar.org/paper/38c48a1cd296d16dc9c56717495d6e44cc354444", "referenceCount": 79, "citationCount": 307, "influentialCitationCount": 51, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Mamba: Linear-Time Sequence Modeling with Selective State Spaces", "abstract": "Foundation models, now powering most of the exciting applications in deep learning, are almost universally based on the Transformer architecture and its core attention module. Many subquadratic-time architectures such as linear attention, gated convolution and recurrent models, and structured state space models (SSMs) have been developed to address Transformers' computational inefficiency on long sequences, but they have not performed as well as attention on important modalities such as language. We identify that a key weakness of such models is their inability to perform content-based reasoning, and make several improvements. First, simply letting the SSM parameters be functions of the input addresses their weakness with discrete modalities, allowing the model to selectively propagate or forget information along the sequence length dimension depending on the current token. Second, even though this change prevents the use of efficient convolutions, we design a hardware-aware parallel algorithm in recurrent mode. We integrate these selective SSMs into a simplified end-to-end neural network architecture without attention or even MLP blocks (Mamba). Mamba enjoys fast inference (5$\\times$ higher throughput than Transformers) and linear scaling in sequence length, and its performance improves on real data up to million-length sequences. As a general sequence model backbone, Mamba achieves state-of-the-art performance across several modalities such as language, audio, and genomics. On language modeling, our Mamba-3B model outperforms Transformers of the same size and matches Transformers twice its size, both in pretraining and downstream evaluation.", "year": 2023, "venue": "arXiv.org", "authors": [ "Albert Gu", "Tri Dao" ], "externalIds": { "ArXiv": "2312.00752", "DBLP": "journals/corr/abs-2312-00752", "DOI": "10.48550/arXiv.2312.00752", "CorpusId": 265551773 }, "url": "https://www.semanticscholar.org/paper/7bbc7595196a0606a07506c4fb1473e5e87f6082", "referenceCount": 0, "citationCount": 968, "influentialCitationCount": 281, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Condense: A Framework for Device and Frequency Adaptive Neural Network Models on the Edge", "abstract": "With the popularity of battery-powered edge computing, an important yet under-explored problem is the supporting of DNNs for diverse edge devices. On the one hand, different edge platforms have various runtime requirements and computation/memory capabilities. Deploying the same DNN model is unsatisfiable, while designing a specialized DNN for each platform is prohibitively expensive. On the other hand, for a single edge device, DVFS is leveraged to prolong the battery, incurring significant inference speed variation for the same DNN and consequently poor user experience. To tackle this, we propose Condense, a framework providing a single adaptive model that can be reconfigured (switch to various sub-networks with different computations/parameters) instantly for diverse devices and execution frequencies without any retraining. Experiments demonstrate that Condense can simultaneously provide vast high-accuracy sub-networks with different computations and parameters corresponding to various sparsity ratios to support diverse edge devices with different runtime requirements, and reduce the speed variation under varying frequencies on each device, with a memory cost of only one set of weights.", "year": 2023, "venue": "Design Automation Conference", "authors": [ "Yifan Gong", "Pu Zhao", "Zheng Zhan", "Yushu Wu", "Chao Wu", "Zhenglun Kong", "Minghai Qin", "Caiwen Ding", "Yanzhi Wang" ], "externalIds": { "DBLP": "conf/dac/GongZZWWKQDW23", "DOI": "10.1109/DAC56929.2023.10247713", "CorpusId": 261900522 }, "url": "https://www.semanticscholar.org/paper/7f2cbe89607b99fcab1e6d1694a129ab5e38778e", "referenceCount": 17, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Selective Structured State-Spaces for Long-Form Video Understanding", "abstract": "Effective modeling of complex spatiotemporal dependencies in long-form videos remains an open problem. The recently proposed Structured State-Space Sequence ($S4$) model with its linear complexity offers a promising direction in this space. However, we demonstrate that treating all imagetokens equally as done by $S4$ model can adversely affect its efficiency and accuracy. To address this limitation, we present a novel Selective $S4$ (i.e., $S5)$ model that employs a lightweight mask generator to adaptively select informative image tokens resulting in more efficient and accurate modeling of long-term spatiotemporal dependencies in videos. Unlike previous mask-based token reduction methods used in transformers, our $S5$ model avoids the dense self-attention calculation by making use of the guidance of the momentum-updated $S4$ model. This enables our model to efficiently discard less informative tokens and adapt to various long-form video understanding tasks more effectively. However, as is the case for most token reduction methods, the informative image tokens could be dropped incorrectly. To improve the robustness and the temporal horizon of our model, we propose a novel long-short masked contrastive learning (LSMCL) approach that enables our model to predict longer temporal context using shorter input videos. We present extensive comparative results using three challenging long-form video understanding datasets (LVU, COIN and Breakfast), demonstrating that our approach consistently outperforms the previous state-of-the-art S4 model by up to 9.6% accuracy while reducing its memory footprint by 23%.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Jue Wang", "Wenjie Zhu", "Pichao Wang", "Xiang Yu", "Linda Liu", "Mohamed Omar", "Raffay Hamid" ], "externalIds": { "DBLP": "conf/cvpr/WangZWYLOH23", "ArXiv": "2303.14526", "DOI": "10.1109/CVPR52729.2023.00618", "CorpusId": 257766807 }, "url": "https://www.semanticscholar.org/paper/983d8b87693e909eb8b2f2fe74a6244dd65b61ee", "referenceCount": 80, "citationCount": 57, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "All-in-One: A Highly Representative DNN Pruning Framework for Edge Devices with Dynamic Power Management", "abstract": "During the deployment of deep neural networks (DNNs) on edge devices, many research efforts are devoted to the limited hardware resource. However, little attention is paid to the influence of dynamic power management. As edge devices typically only have a budget of energy with batteries (rather than almost unlimited energy support on servers or workstations), their dynamic power management often changes the execution frequency as in the widely-used dynamic voltage and frequency scaling (DVFS) technique. This leads to highly unstable inference speed performance, especially for computation-intensive DNN models, which can harm user experience and waste hardware resources. We firstly identify this problem and then propose All-in-One, a highly representative pruning framework to work with dynamic power management using DVFS. The framework can use only one set of model weights and soft masks (together with other auxiliary parameters of negligible storage) to represent multiple models of various pruning ratios. By re-configuring the model to the corresponding pruning ratio for a specific execution frequency (and voltage), we are able to achieve stable inference speed, i.e., keeping the difference in speed performance under various execution frequencies as small as possible. Our experiments demonstrate that our method not only achieves high accuracy for multiple models of different pruning ratios, but also reduces their variance of inference latency for various frequencies, with minimal memory consumption of only one model and one soft mask.", "year": 2022, "venue": "2022 IEEE/ACM International Conference On Computer Aided Design (ICCAD)", "authors": [ "Yifan Gong", "Zheng Zhan", "Pu Zhao", "Yushu Wu", "Chaoan Wu", "Caiwen Ding", "Weiwen Jiang", "Minghai Qin", "Yanzhi Wang" ], "externalIds": { "DBLP": "conf/iccad/00040ZWWDJQW22", "ArXiv": "2212.05122", "DOI": "10.1145/3508352.3549379", "CorpusId": 254564412 }, "url": "https://www.semanticscholar.org/paper/1955793bec2653136818c0fccbd5f43d57d336e4", "referenceCount": 41, "citationCount": 6, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Token Merging: Your ViT But Faster", "abstract": "We introduce Token Merging (ToMe), a simple method to increase the throughput of existing ViT models without needing to train. ToMe gradually combines similar tokens in a transformer using a general and light-weight matching algorithm that is as fast as pruning while being more accurate. Off-the-shelf, ToMe can 2x the throughput of state-of-the-art ViT-L @ 512 and ViT-H @ 518 models on images and 2.2x the throughput of ViT-L on video with only a 0.2-0.3% accuracy drop in each case. ToMe can also easily be applied during training, improving in practice training speed up to 2x for MAE fine-tuning on video. Training with ToMe further minimizes accuracy drop, leading to 2x the throughput of ViT-B on audio for only a 0.4% mAP drop. Qualitatively, we find that ToMe merges object parts into one token, even over multiple frames of video. Overall, ToMe's accuracy and speed are competitive with state-of-the-art on images, video, and audio.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Daniel Bolya", "Cheng-Yang Fu", "Xiaoliang Dai", "Peizhao Zhang", "Christoph Feichtenhofer", "Judy Hoffman" ], "externalIds": { "ArXiv": "2210.09461", "DBLP": "conf/iclr/BolyaFDZFH23", "DOI": "10.48550/arXiv.2210.09461", "CorpusId": 252968113 }, "url": "https://www.semanticscholar.org/paper/1dff6b1b35e2d45d4db57c8b4e4395486c3e365f", "referenceCount": 59, "citationCount": 245, "influentialCitationCount": 61, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Long Range Language Modeling via Gated State Spaces", "abstract": "State space models have shown to be effective at modeling long range dependencies, specially on sequence classification tasks. In this work we focus on autoregressive sequence modeling over English books, Github source code and ArXiv mathematics articles. Based on recent developments around the effectiveness of gated activation functions, we propose a new layer named Gated State Space (GSS) and show that it trains significantly faster than the diagonal version of S4 (i.e. DSS) on TPUs, is fairly competitive with several well-tuned Transformer-based baselines and exhibits zero-shot generalization to longer inputs while being straightforward to implement. Finally, we show that leveraging self-attention to model local dependencies improves the performance of GSS even further.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Harsh Mehta", "Ankit Gupta", "Ashok Cutkosky", "Behnam Neyshabur" ], "externalIds": { "DBLP": "journals/corr/abs-2206-13947", "ArXiv": "2206.13947", "DOI": "10.48550/arXiv.2206.13947", "CorpusId": 250089125 }, "url": "https://www.semanticscholar.org/paper/eaef083b9d661f42cc0d89d9d8156218f33a91d9", "referenceCount": 53, "citationCount": 157, "influentialCitationCount": 18, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Vision Transformer Adapter for Dense Predictions", "abstract": "This work investigates a simple yet powerful dense prediction task adapter for Vision Transformer (ViT). Unlike recently advanced variants that incorporate vision-specific inductive biases into their architectures, the plain ViT suffers inferior performance on dense predictions due to weak prior assumptions. To address this issue, we propose the ViT-Adapter, which allows plain ViT to achieve comparable performance to vision-specific transformers. Specifically, the backbone in our framework is a plain ViT that can learn powerful representations from large-scale multi-modal data. When transferring to downstream tasks, a pre-training-free adapter is used to introduce the image-related inductive biases into the model, making it suitable for these tasks. We verify ViT-Adapter on multiple dense prediction tasks, including object detection, instance segmentation, and semantic segmentation. Notably, without using extra detection data, our ViT-Adapter-L yields state-of-the-art 60.9 box AP and 53.0 mask AP on COCO test-dev. We hope that the ViT-Adapter could serve as an alternative for vision-specific transformers and facilitate future research. The code and models will be released at https://github.com/czczup/ViT-Adapter.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Zhe Chen", "Yuchen Duan", "Wenhai Wang", "Junjun He", "Tong Lu", "Jifeng Dai", "Y. Qiao" ], "externalIds": { "ArXiv": "2205.08534", "DBLP": "journals/corr/abs-2205-08534", "DOI": "10.48550/arXiv.2205.08534", "CorpusId": 248834106 }, "url": "https://www.semanticscholar.org/paper/c431408780586268e8bcf2483b01a80728d10960", "referenceCount": 109, "citationCount": 403, "influentialCitationCount": 45, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "BLCR: Towards Real-time DNN Execution with Block-based Reweighted Pruning", "abstract": "Accelerating DNN execution on resource-limited computing platforms has been a long-standing problem. Prior works utilize ℓ1-based group lasso or dynamic regularization such as ADMM to perform structured pruning on DNN models to leverage the parallel computing architectures. However, both of the pruning schemes and pruning methods lack universality, which leads to degraded performance and limited applicability. Considering mobile devices are becoming an important carrier for deep learning tasks, current approaches are not ideal for fully exploiting mobile parallelism while achieving high inference accuracy. To solve the problem, we propose BLCR, a novel block-based pruning framework that comprises a general and flexible structured pruning scheme that enjoys higher flexibility while exploiting full on-device parallelism, as well as a powerful and efficient reweighted regularization method to achieve the proposed sparsity scheme. Our framework is universal, which can be applied to both CNNs and RNNs, implying complete support for the two major kinds of computation-intensive layers (i.e., CONV and FC layers). To complete all aspects of the pruning-for-acceleration task, we also integrate compiler-based code optimization into our framework that can perform DNN inference on mobile devices in real-time. To the best of our knowledge, it is the first time that the weight pruning framework achieves universal coverage for both CNNs and RNNs with real-time mobile acceleration and no accuracy compromise.", "year": 2022, "venue": "IEEE International Symposium on Quality Electronic Design", "authors": [ "Xiaolong Ma", "Geng Yuan", "Z. Li", "Yifan Gong", "Tianyun Zhang", "Wei Niu", "Zheng Zhan", "Pu Zhao", "Ning Liu", "Jian Tang", "Xue Lin", "Bin Ren", "Yanzhi Wang" ], "externalIds": { "DBLP": "conf/isqed/MaYLGZNZZLTLRW22", "DOI": "10.1109/ISQED54688.2022.9806237", "CorpusId": 250119428 }, "url": "https://www.semanticscholar.org/paper/2255c2ffb11ef4aa5e5a2da76ec58ec07fd5da70", "referenceCount": 28, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning to Merge Tokens in Vision Transformers", "abstract": "Transformers are widely applied to solve natural language understanding and computer vision tasks. While scaling up these architectures leads to improved performance, it often comes at the expense of much higher computational costs. In order for large-scale models to remain practical in real-world systems, there is a need for reducing their computational overhead. In this work, we present the PatchMerger, a simple module that reduces the number of patches or tokens the network has to process by merging them between two consecutive intermediate layers. We show that the PatchMerger achieves a significant speedup across various model sizes while matching the original performance both upstream and downstream after fine-tuning.", "year": 2022, "venue": "arXiv.org", "authors": [ "Cédric Renggli", "André Susano Pinto", "N. Houlsby", "Basil Mustafa", "J. Puigcerver", "C. Riquelme" ], "externalIds": { "DBLP": "journals/corr/abs-2202-12015", "ArXiv": "2202.12015", "CorpusId": 247084222 }, "url": "https://www.semanticscholar.org/paper/202967f77c4384bce80eaf2fa5737259008267d3", "referenceCount": 17, "citationCount": 44, "influentialCitationCount": 10, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A ConvNet for the 2020s", "abstract": "The “Roaring 20s” of visual recognition began with the introduction of Vision Transformers (ViTs), which quickly superseded ConvNets as the state-of-the-art image classification model. A vanilla ViT, on the other hand, faces difficulties when applied to general computer vision tasks such as object detection and semantic segmentation. It is the hierarchical Transformers (e.g., Swin Transformers) that reintroduced several ConvNet priors, making Transformers practically viable as a generic vision backbone and demonstrating remarkable performance on a wide variety of vision tasks. However, the effectiveness of such hybrid approaches is still largely credited to the intrinsic superiority of Transformers, rather than the inherent inductive biases of convolutions. In this work, we reexamine the design spaces and test the limits of what a pure ConvNet can achieve. We gradually “modernize” a standard ResNet toward the design of a vision Transformer, and discover several key components that contribute to the performance difference along the way. The outcome of this exploration is a family of pure ConvNet models dubbed ConvNeXt. Constructed entirely from standard ConvNet modules, ConvNeXts compete favorably with Transformers in terms of accuracy and scalability, achieving 87.8% ImageNet top-1 accuracy and outperforming Swin Transformers on COCO detection and ADE20K segmentation, while maintaining the simplicity and efficiency of standard ConvNets.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Zhuang Liu", "Hanzi Mao", "Chaozheng Wu", "Christoph Feichtenhofer", "Trevor Darrell", "Saining Xie" ], "externalIds": { "ArXiv": "2201.03545", "DBLP": "journals/corr/abs-2201-03545", "DOI": "10.1109/CVPR52688.2022.01167", "CorpusId": 245837420 }, "url": "https://www.semanticscholar.org/paper/177e957f5cd93229c9794ea652c646d2557b4a69", "referenceCount": 91, "citationCount": 3612, "influentialCitationCount": 583, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Automatic Mapping of the Best-Suited DNN Pruning Schemes for Real-Time Mobile Acceleration", "abstract": "Weight pruning is an effective model compression technique to tackle the challenges of achieving real-time deep neural network (DNN) inference on mobile devices. However, prior pruning schemes have limited application scenarios due to accuracy degradation, difficulty in leveraging hardware acceleration, and/or restriction on certain types of DNN layers. In this article, we propose a general, fine-grained structured pruning scheme and corresponding compiler optimizations that are applicable to any type of DNN layer while achieving high accuracy and hardware inference performance. With the flexibility of applying different pruning schemes to different layers enabled by our compiler optimizations, we further probe into the new problem of determining the best-suited pruning scheme considering the different acceleration and accuracy performance of various pruning schemes. Two pruning scheme mapping methods—one -search based and the other is rule based—are proposed to automatically derive the best-suited pruning regularity and block size for each layer of any given DNN. Experimental results demonstrate that our pruning scheme mapping methods, together with the general fine-grained structured pruning scheme, outperform the state-of-the-art DNN optimization framework with up to 2.48 \\( \\times \\) and 1.73 \\( \\times \\) DNN inference acceleration on CIFAR-10 and ImageNet datasets without accuracy loss.", "year": 2021, "venue": "ACM Trans. Design Autom. Electr. Syst.", "authors": [ "Yifan Gong", "Geng Yuan", "Zheng Zhan", "Wei Niu", "Zhengang Li", "Pu Zhao", "Yuxuan Cai", "Sijia Liu", "Bin Ren", "Xue Lin", "Xulong Tang", "Yanzhi Wang" ], "externalIds": { "DBLP": "journals/corr/abs-2111-11581", "ArXiv": "2111.11581", "DOI": "10.1145/3495532", "CorpusId": 244488655 }, "url": "https://www.semanticscholar.org/paper/ac8ccc07d822c50efc4e952c5fb2e2f1a08dc333", "referenceCount": 85, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Swin Transformer V2: Scaling Up Capacity and Resolution", "abstract": "We present techniques for scaling Swin Transformer [35] up to 3 billion parameters and making it capable of training with images of up to 1,536x1,536 resolution. By scaling up capacity and resolution, Swin Transformer sets new records on four representative vision benchmarks: 84.0% top-1 accuracy on ImageNet- V2 image classification, 63.1 / 54.4 box / mask mAP on COCO object detection, 59.9 mIoU on ADE20K semantic segmentation, and 86.8% top-1 accuracy on Kinetics-400 video action classification. We tackle issues of training instability, and study how to effectively transfer models pre-trained at low resolutions to higher resolution ones. To this aim, several novel technologies are proposed: 1) a residual post normalization technique and a scaled cosine attention approach to improve the stability of large vision models; 2) a log-spaced continuous position bias technique to effectively transfer models pre-trained at low-resolution images and windows to their higher-resolution counterparts. In addition, we share our crucial implementation details that lead to significant savings of GPU memory consumption and thus make it feasi-ble to train large vision models with regular GPUs. Using these techniques and self-supervised pre-training, we suc-cessfully train a strong 3 billion Swin Transformer model and effectively transfer it to various vision tasks involving high-resolution images or windows, achieving the state-of-the-art accuracy on a variety of benchmarks. Code is avail-able at https://github.com/microsoft/Swin-Transformer.", "year": 2021, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Ze Liu", "Han Hu", "Yutong Lin", "Zhuliang Yao", "Zhenda Xie", "Yixuan Wei", "Jia Ning", "Yue Cao", "Zheng Zhang", "Li Dong", "Furu Wei", "B. Guo" ], "externalIds": { "ArXiv": "2111.09883", "DBLP": "journals/corr/abs-2111-09883", "DOI": "10.1109/CVPR52688.2022.01170", "CorpusId": 244346076 }, "url": "https://www.semanticscholar.org/paper/be0fbb810583930c071d0b9b2c5187fe260783f5", "referenceCount": 86, "citationCount": 1290, "influentialCitationCount": 128, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Efficiently Modeling Long Sequences with Structured State Spaces", "abstract": "A central goal of sequence modeling is designing a single principled model that can address sequence data across a range of modalities and tasks, particularly on long-range dependencies. Although conventional models including RNNs, CNNs, and Transformers have specialized variants for capturing long dependencies, they still struggle to scale to very long sequences of $10000$ or more steps. A promising recent approach proposed modeling sequences by simulating the fundamental state space model (SSM) \\( x'(t) = Ax(t) + Bu(t), y(t) = Cx(t) + Du(t) \\), and showed that for appropriate choices of the state matrix \\( A \\), this system could handle long-range dependencies mathematically and empirically. However, this method has prohibitive computation and memory requirements, rendering it infeasible as a general sequence modeling solution. We propose the Structured State Space sequence model (S4) based on a new parameterization for the SSM, and show that it can be computed much more efficiently than prior approaches while preserving their theoretical strengths. Our technique involves conditioning \\( A \\) with a low-rank correction, allowing it to be diagonalized stably and reducing the SSM to the well-studied computation of a Cauchy kernel. S4 achieves strong empirical results across a diverse range of established benchmarks, including (i) 91\\% accuracy on sequential CIFAR-10 with no data augmentation or auxiliary losses, on par with a larger 2-D ResNet, (ii) substantially closing the gap to Transformers on image and language modeling tasks, while performing generation $60\\times$ faster (iii) SoTA on every task from the Long Range Arena benchmark, including solving the challenging Path-X task of length 16k that all prior work fails on, while being as efficient as all competitors.", "year": 2021, "venue": "International Conference on Learning Representations", "authors": [ "Albert Gu", "Karan Goel", "Christopher R'e" ], "externalIds": { "DBLP": "conf/iclr/GuGR22", "ArXiv": "2111.00396", "CorpusId": 240354066 }, "url": "https://www.semanticscholar.org/paper/ac2618b2ce5cdcf86f9371bcca98bc5e37e46f51", "referenceCount": 61, "citationCount": 930, "influentialCitationCount": 142, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Achieving on-Mobile Real-Time Super-Resolution with Neural Architecture and Pruning Search", "abstract": "Though recent years have witnessed remarkable progress in single image super-resolution (SISR) tasks with the prosperous development of deep neural networks (DNNs), the deep learning methods are confronted with the computation and memory consumption issues in practice, especially for resource-limited platforms such as mobile devices. To overcome the challenge and facilitate the real-time deployment of SISR tasks on mobile, we combine neural architecture search with pruning search and propose an automatic search framework that derives sparse super-resolution (SR) models with high image quality while satisfying the real-time inference requirement. To decrease the search cost, we leverage the weight sharing strategy by introducing a supernet and decouple the search problem into three stages, including supernet construction, compiler-aware architecture and pruning search, and compiler-aware pruning ratio search. With the proposed framework, we are the first to achieve real-time SR inference (with only tens of milliseconds per frame) for implementing 720p resolution with competitive image quality (in terms of PSNR and SSIM) on mobile platforms (Samsung Galaxy S20).", "year": 2021, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Zheng Zhan", "Yifan Gong", "Pu Zhao", "Geng Yuan", "Wei Niu", "Yushu Wu", "Tianyun Zhang", "Malith Jayaweera", "D. Kaeli", "Bin Ren", "Xue Lin", "Yanzhi Wang" ], "externalIds": { "ArXiv": "2108.08910", "DBLP": "conf/iccv/00010ZYNWZJKRLW21", "MAG": "3195441577", "DOI": "10.1109/ICCV48922.2021.00478", "CorpusId": 237259885 }, "url": "https://www.semanticscholar.org/paper/851ad21f48e738eabf9ff0f809d48139ddcb6cad", "referenceCount": 66, "citationCount": 28, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "IA-RED2: Interpretability-Aware Redundancy Reduction for Vision Transformers", "abstract": "The self-attention-based model, transformer, is recently becoming the leading backbone in the field of computer vision. In spite of the impressive success made by transformers in a variety of vision tasks, it still suffers from heavy computation and intensive memory costs. To address this limitation, this paper presents an Interpretability-Aware REDundancy REDuction framework (IA-RED$^2$). We start by observing a large amount of redundant computation, mainly spent on uncorrelated input patches, and then introduce an interpretable module to dynamically and gracefully drop these redundant patches. This novel framework is then extended to a hierarchical structure, where uncorrelated tokens at different stages are gradually removed, resulting in a considerable shrinkage of computational cost. We include extensive experiments on both image and video tasks, where our method could deliver up to 1.4x speed-up for state-of-the-art models like DeiT and TimeSformer, by only sacrificing less than 0.7% accuracy. More importantly, contrary to other acceleration approaches, our method is inherently interpretable with substantial visual evidence, making vision transformer closer to a more human-understandable architecture while being lighter. We demonstrate that the interpretability that naturally emerged in our framework can outperform the raw attention learned by the original visual transformer, as well as those generated by off-the-shelf interpretation methods, with both qualitative and quantitative results. Project Page: http://people.csail.mit.edu/bpan/ia-red/.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Bowen Pan", "Yifan Jiang", "Rameswar Panda", "Zhangyang Wang", "R. Feris", "A. Oliva" ], "externalIds": { "DBLP": "conf/nips/PanPJWFO21", "ArXiv": "2106.12620", "CorpusId": 235623729 }, "url": "https://www.semanticscholar.org/paper/e2f2662f0734e2edc2b4b36a734de111c7f8d54d", "referenceCount": 69, "citationCount": 119, "influentialCitationCount": 21, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DynamicViT: Efficient Vision Transformers with Dynamic Token Sparsification", "abstract": "Attention is sparse in vision transformers. We observe the final prediction in vision transformers is only based on a subset of most informative tokens, which is sufficient for accurate image recognition. Based on this observation, we propose a dynamic token sparsification framework to prune redundant tokens progressively and dynamically based on the input. Specifically, we devise a lightweight prediction module to estimate the importance score of each token given the current features. The module is added to different layers to prune redundant tokens hierarchically. To optimize the prediction module in an end-to-end manner, we propose an attention masking strategy to differentiably prune a token by blocking its interactions with other tokens. Benefiting from the nature of self-attention, the unstructured sparse tokens are still hardware friendly, which makes our framework easy to achieve actual speed-up. By hierarchically pruning 66% of the input tokens, our method greatly reduces 31%~37% FLOPs and improves the throughput by over 40% while the drop of accuracy is within 0.5% for various vision transformers. Equipped with the dynamic token sparsification framework, DynamicViT models can achieve very competitive complexity/accuracy trade-offs compared to state-of-the-art CNNs and vision transformers on ImageNet. Code is available at https://github.com/raoyongming/DynamicViT", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Yongming Rao", "Wenliang Zhao", "Benlin Liu", "Jiwen Lu", "Jie Zhou", "Cho-Jui Hsieh" ], "externalIds": { "DBLP": "conf/nips/RaoZLLZH21", "ArXiv": "2106.02034", "CorpusId": 235313562 }, "url": "https://www.semanticscholar.org/paper/dbdcabd0444ad50b68ee09e30f39b66e9068f5d2", "referenceCount": 39, "citationCount": 508, "influentialCitationCount": 101, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Tokens-to-Token ViT: Training Vision Transformers from Scratch on ImageNet", "abstract": "Transformers, which are popular for language modeling, have been explored for solving vision tasks recently, e.g., the Vision Transformer (ViT) for image classification. The ViT model splits each image into a sequence of tokens with fixed length and then applies multiple Transformer layers to model their global relation for classification. However, ViT achieves inferior performance to CNNs when trained from scratch on a midsize dataset like ImageNet. We find it is because: 1) the simple tokenization of input images fails to model the important local structure such as edges and lines among neighboring pixels, leading to low training sample efficiency; 2) the redundant attention backbone design of ViT leads to limited feature richness for fixed computation budgets and limited training samples. To overcome such limitations, we propose a new Tokens-To-Token Vision Transformer (T2T-VTT), which incorporates 1) a layer-wise Tokens-to-Token (T2T) transformation to progressively structurize the image to tokens by recursively aggregating neighboring Tokens into one Token (Tokens-to-Token), such that local structure represented by surrounding tokens can be modeled and tokens length can be reduced; 2) an efficient backbone with a deep-narrow structure for vision transformer motivated by CNN architecture design after empirical study. Notably, T2T-ViT reduces the parameter count and MACs of vanilla ViT by half, while achieving more than 3.0% improvement when trained from scratch on ImageNet. It also outperforms ResNets and achieves comparable performance with MobileNets by directly training on ImageNet. For example, T2T-ViT with comparable size to ResNet50 (21.5M parameters) can achieve 83.3% top1 accuracy in image resolution 384x384 on ImageNet.1", "year": 2021, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Li Yuan", "Yunpeng Chen", "Tao Wang", "Weihao Yu", "Yujun Shi", "Francis E. H. Tay", "Jiashi Feng", "Shuicheng Yan" ], "externalIds": { "DBLP": "journals/corr/abs-2101-11986", "ArXiv": "2101.11986", "DOI": "10.1109/ICCV48922.2021.00060", "CorpusId": 231719476 }, "url": "https://www.semanticscholar.org/paper/dbe077f8521ecbe0a1477d6148c726d4f053d9c9", "referenceCount": 63, "citationCount": 1651, "influentialCitationCount": 192, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Training data-efficient image transformers & distillation through attention", "abstract": "Recently, neural networks purely based on attention were shown to address image understanding tasks such as image classification. However, these visual transformers are pre-trained with hundreds of millions of images using an expensive infrastructure, thereby limiting their adoption. In this work, we produce a competitive convolution-free transformer by training on Imagenet only. We train them on a single computer in less than 3 days. Our reference vision transformer (86M parameters) achieves top-1 accuracy of 83.1% (single-crop evaluation) on ImageNet with no external data. More importantly, we introduce a teacher-student strategy specific to transformers. It relies on a distillation token ensuring that the student learns from the teacher through attention. We show the interest of this token-based distillation, especially when using a convnet as a teacher. This leads us to report results competitive with convnets for both Imagenet (where we obtain up to 85.2% accuracy) and when transferring to other tasks. We share our code and models.", "year": 2020, "venue": "International Conference on Machine Learning", "authors": [ "Hugo Touvron", "M. Cord", "Matthijs Douze", "Francisco Massa", "Alexandre Sablayrolles", "Herv'e J'egou" ], "externalIds": { "ArXiv": "2012.12877", "DBLP": "journals/corr/abs-2012-12877", "CorpusId": 229363322 }, "url": "https://www.semanticscholar.org/paper/ad7ddcc14984caae308c397f1a589aae75d4ab71", "referenceCount": 66, "citationCount": 5424, "influentialCitationCount": 892, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale", "abstract": "While the Transformer architecture has become the de-facto standard for natural language processing tasks, its applications to computer vision remain limited. In vision, attention is either applied in conjunction with convolutional networks, or used to replace certain components of convolutional networks while keeping their overall structure in place. We show that this reliance on CNNs is not necessary and a pure transformer applied directly to sequences of image patches can perform very well on image classification tasks. When pre-trained on large amounts of data and transferred to multiple mid-sized or small image recognition benchmarks (ImageNet, CIFAR-100, VTAB, etc.), Vision Transformer (ViT) attains excellent results compared to state-of-the-art convolutional networks while requiring substantially fewer computational resources to train.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Alexey Dosovitskiy", "Lucas Beyer", "Alexander Kolesnikov", "Dirk Weissenborn", "Xiaohua Zhai", "Thomas Unterthiner", "Mostafa Dehghani", "Matthias Minderer", "G. Heigold", "S. Gelly", "Jakob Uszkoreit", "N. Houlsby" ], "externalIds": { "MAG": "3094502228", "ArXiv": "2010.11929", "DBLP": "conf/iclr/DosovitskiyB0WZ21", "CorpusId": 225039882 }, "url": "https://www.semanticscholar.org/paper/268d347e8a55b5eb82fb5e7d2f800e33c75ab18a", "referenceCount": 65, "citationCount": 28233, "influentialCitationCount": 4121, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Privacy-Preserving-Oriented DNN Pruning and Mobile Acceleration Framework", "abstract": "Weight pruning of deep neural networks (DNNs) has been proposed to satisfy the limited storage and computing capability of mobile edge devices. However, previous pruning methods mainly focus on reducing the model size and/or improving performance without considering the privacy of user data. To mitigate this concern, we propose a privacy-preserving-oriented pruning and mobile acceleration framework that does not require the private training dataset. At the algorithm level of the proposed framework, a systematic weight pruning technique based on the alternating direction method of multipliers (ADMM) is designed to iteratively solve the pattern-based pruning problem for each layer with randomly generated synthetic data. In addition, corresponding optimizations at the compiler level are leveraged for inference accelerations on devices. With the proposed framework, users could avoid the time-consuming pruning process for non-experts and directly benefit from compressed models. Experimental results show that the proposed framework outperforms three state-of-art end-to-end DNN frameworks, i.e., TensorFlow-Lite, TVM, and MNN, with speedup up to 4.2×, 2.5×, and 2.0×, respectively, with almost no accuracy loss, while preserving data privacy.", "year": 2020, "venue": "ACM Great Lakes Symposium on VLSI", "authors": [ "Yifan Gong", "Zheng Zhan", "Z. Li", "Wei Niu", "Xiaolong Ma", "Wenhao Wang", "Bin Ren", "Caiwen Ding", "X. Lin", "Xiaolin Xu", "Yanzhi Wang" ], "externalIds": { "MAG": "3085631454", "ArXiv": "2003.06513", "DBLP": "conf/glvlsi/Gong0LNMWRDLXW20", "DOI": "10.1145/3386263.3407650", "CorpusId": 221509563 }, "url": "https://www.semanticscholar.org/paper/4709b4f85dd552e2196a60afabbb5e14f10fd7ad", "referenceCount": 27, "citationCount": 19, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Compressing Convolutional Neural Networks via Factorized Convolutional Filters", "abstract": "This work studies the model compression for deep convolutional neural networks (CNNs) via filter pruning. The workflow of a traditional pruning consists of three sequential stages: pre-training the original model, selecting the pre-trained filters via ranking according to a manually designed criterion (e.g., the norm of filters), and learning the remained filters via fine-tuning. Most existing works follow this pipeline and focus on designing different ranking criteria for filter selection. However, it is difficult to control the performance due to the separation of filter selection and filter learning. In this work, we propose to conduct filter selection and filter learning simultaneously, in a unified model. To this end, we define a factorized convolutional filter (FCF), consisting of a standard real-valued convolutional filter and a binary scalar, as well as a dot-product operator between them. We train a CNN model with factorized convolutional filters (CNN-FCF) by updating the standard filter using back-propagation, while updating the binary scalar using the alternating direction method of multipliers (ADMM) based optimization method. With this trained CNN-FCF model, we only keep the standard filters corresponding to the 1-valued scalars, while all other filters and all binary scalars are discarded, to obtain a compact CNN model. Extensive experiments on CIFAR-10 and ImageNet demonstrate the superiority of the proposed method over state-of-the-art filter pruning methods.", "year": 2019, "venue": "Computer Vision and Pattern Recognition", "authors": [ "T. Li", "Baoyuan Wu", "Yujiu Yang", "Yanbo Fan", "Yong Zhang", "Wei Liu" ], "externalIds": { "DBLP": "conf/cvpr/LiWYFZL19", "MAG": "2945335799", "DOI": "10.1109/CVPR.2019.00410", "CorpusId": 181726313 }, "url": "https://www.semanticscholar.org/paper/301c36778a8581a2526d674e7bd5472279f35e07", "referenceCount": 46, "citationCount": 92, "influentialCitationCount": 16, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks", "abstract": "Convolutional Neural Networks (ConvNets) are commonly developed at a fixed resource budget, and then scaled up for better accuracy if more resources are available. In this paper, we systematically study model scaling and identify that carefully balancing network depth, width, and resolution can lead to better performance. Based on this observation, we propose a new scaling method that uniformly scales all dimensions of depth/width/resolution using a simple yet highly effective compound coefficient. We demonstrate the effectiveness of this method on scaling up MobileNets and ResNet. \nTo go even further, we use neural architecture search to design a new baseline network and scale it up to obtain a family of models, called EfficientNets, which achieve much better accuracy and efficiency than previous ConvNets. In particular, our EfficientNet-B7 achieves state-of-the-art 84.4% top-1 / 97.1% top-5 accuracy on ImageNet, while being 8.4x smaller and 6.1x faster on inference than the best existing ConvNet. Our EfficientNets also transfer well and achieve state-of-the-art accuracy on CIFAR-100 (91.7%), Flowers (98.8%), and 3 other transfer learning datasets, with an order of magnitude fewer parameters. Source code is at this https URL.", "year": 2019, "venue": "International Conference on Machine Learning", "authors": [ "Mingxing Tan", "Quoc V. Le" ], "externalIds": { "DBLP": "conf/icml/TanL19", "MAG": "2946948417", "ArXiv": "1905.11946", "CorpusId": 167217261 }, "url": "https://www.semanticscholar.org/paper/4f2eda8077dc7a69bb2b4e0a1a086cf054adb3f9", "referenceCount": 54, "citationCount": 14942, "influentialCitationCount": 1922, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Filter Pruning via Geometric Median for Deep Convolutional Neural Networks Acceleration", "abstract": "Previous works utilized “smaller-norm-less-important” criterion to prune filters with smaller norm values in a convolutional neural network. In this paper, we analyze this norm-based criterion and point out that its effectiveness depends on two requirements that are not always met: (1) the norm deviation of the filters should be large; (2) the minimum norm of the filters should be small. To solve this problem, we propose a novel filter pruning method, namely Filter Pruning via Geometric Median (FPGM), to compress the model regardless of those two requirements. Unlike previous methods, FPGM compresses CNN models by pruning filters with redundancy, rather than those with“relatively less” importance. When applied to two image classification benchmarks, our method validates its usefulness and strengths. Notably, on CIFAR-10, FPGM reduces more than 52% FLOPs on ResNet-110 with even 2.69% relative accuracy improvement. Moreover, on ILSVRC-2012, FPGM reduces more than 42% FLOPs on ResNet-101 without top-5 accuracy drop, which has advanced the state-of-the-art. Code is publicly available on GitHub: https://github.com/he-y/filter-pruning-geometric-median", "year": 2018, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Yang He", "Ping Liu", "Ziwei Wang", "Zhilan Hu", "Yi Yang" ], "externalIds": { "MAG": "2951153470", "ArXiv": "1811.00250", "DBLP": "conf/cvpr/HeLWHY19", "DOI": "10.1109/CVPR.2019.00447", "CorpusId": 102350938 }, "url": "https://www.semanticscholar.org/paper/bb5bc0acea8d452a7999c512127b4f7b3acf8a6d", "referenceCount": 47, "citationCount": 939, "influentialCitationCount": 212, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Unified Perceptual Parsing for Scene Understanding", "abstract": null, "year": 2018, "venue": "European Conference on Computer Vision", "authors": [ "Tete Xiao", "Yingcheng Liu", "Bolei Zhou", "Yuning Jiang", "Jian Sun" ], "externalIds": { "ArXiv": "1807.10221", "DBLP": "journals/corr/abs-1807-10221", "MAG": "2953120679", "DOI": "10.1007/978-3-030-01228-1_26", "CorpusId": 50781105 }, "url": "https://www.semanticscholar.org/paper/aaab0bd4d79d4f19109bab0fbcdb05070fb0edd1", "referenceCount": 46, "citationCount": 1517, "influentialCitationCount": 188, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Soft Filter Pruning for Accelerating Deep Convolutional Neural Networks", "abstract": "This paper proposed a Soft Filter Pruning (SFP) method to accelerate the inference procedure of deep Convolutional Neural Networks (CNNs). Specifically, the proposed SFP enables the pruned filters to be updated when training the model after pruning. SFP has two advantages over previous works: (1) Larger model capacity. Updating previously pruned filters provides our approach with larger optimization space than fixing the filters to zero. Therefore, the network trained by our method has a larger model capacity to learn from the training data. (2) Less dependence on the pretrained model. Large capacity enables SFP to train from scratch and prune the model simultaneously. In contrast, previous filter pruning methods should be conducted on the basis of the pre-trained model to guarantee their performance. Empirically, SFP from scratch outperforms the previous filter pruning methods. Moreover, our approach has been demonstrated effective for many advanced CNN architectures. Notably, on ILSCRC-2012, SFP reduces more than 42% FLOPs on ResNet-101 with even 0.2% top-5 accuracy improvement, which has advanced the state-of-the-art. Code is publicly available on GitHub: https://github.com/he-y/softfilter-pruning", "year": 2018, "venue": "International Joint Conference on Artificial Intelligence", "authors": [ "Yang He", "Guoliang Kang", "Xuanyi Dong", "Yanwei Fu", "Yi Yang" ], "externalIds": { "DBLP": "journals/corr/abs-1808-06866", "ArXiv": "1808.06866", "MAG": "2951977814", "DOI": "10.24963/ijcai.2018/309", "CorpusId": 51608028 }, "url": "https://www.semanticscholar.org/paper/52ff452c2c38d082c07eb434996e07a8c242a692", "referenceCount": 31, "citationCount": 871, "influentialCitationCount": 171, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "NISP: Pruning Networks Using Neuron Importance Score Propagation", "abstract": "To reduce the significant redundancy in deep Convolutional Neural Networks (CNNs), most existing methods prune neurons by only considering the statistics of an individual layer or two consecutive layers (e.g., prune one layer to minimize the reconstruction error of the next layer), ignoring the effect of error propagation in deep networks. In contrast, we argue that for a pruned network to retain its predictive power, it is essential to prune neurons in the entire neuron network jointly based on a unified goal: minimizing the reconstruction error of important responses in the \"final response layer\" (FRL), which is the second-to-last layer before classification. Specifically, we apply feature ranking techniques to measure the importance of each neuron in the FRL, formulate network pruning as a binary integer optimization problem, and derive a closed-form solution to it for pruning neurons in earlier layers. Based on our theoretical analysis, we propose the Neuron Importance Score Propagation (NISP) algorithm to propagate the importance scores of final responses to every neuron in the network. The CNN is pruned by removing neurons with least importance, and it is then fine-tuned to recover its predictive power. NISP is evaluated on several datasets with multiple CNN models and demonstrated to achieve significant acceleration and compression with negligible accuracy loss.", "year": 2017, "venue": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition", "authors": [ "Ruichi Yu", "Ang Li", "Chun-Fu Chen", "Jui-Hsin Lai", "Vlad I. Morariu", "Xintong Han", "M. Gao", "Ching-Yung Lin", "L. Davis" ], "externalIds": { "DBLP": "conf/cvpr/Yu00LMHGLD18", "ArXiv": "1711.05908", "MAG": "2952444016", "DOI": "10.1109/CVPR.2018.00958", "CorpusId": 4142619 }, "url": "https://www.semanticscholar.org/paper/af03709f0893a7ff1c2656b73249d60030bab996", "referenceCount": 48, "citationCount": 758, "influentialCitationCount": 81, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Focal Loss for Dense Object Detection", "abstract": "The highest accuracy object detectors to date are based on a two-stage approach popularized by R-CNN, where a classifier is applied to a sparse set of candidate object locations. In contrast, one-stage detectors that are applied over a regular, dense sampling of possible object locations have the potential to be faster and simpler, but have trailed the accuracy of two-stage detectors thus far. In this paper, we investigate why this is the case. We discover that the extreme foreground-background class imbalance encountered during training of dense detectors is the central cause. We propose to address this class imbalance by reshaping the standard cross entropy loss such that it down-weights the loss assigned to well-classified examples. Our novel Focal Loss focuses training on a sparse set of hard examples and prevents the vast number of easy negatives from overwhelming the detector during training. To evaluate the effectiveness of our loss, we design and train a simple dense detector we call RetinaNet. Our results show that when trained with the focal loss, RetinaNet is able to match the speed of previous one-stage detectors while surpassing the accuracy of all existing state-of-the-art two-stage detectors.", "year": 2017, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Tsung-Yi Lin", "Priya Goyal", "Ross B. Girshick", "Kaiming He", "Piotr Dollár" ], "externalIds": { "MAG": "2950100464", "DBLP": "journals/corr/abs-1708-02002", "DOI": "10.1109/ICCV.2017.324", "CorpusId": 47252984 }, "url": "https://www.semanticscholar.org/paper/79cfb51a51fc093f66aac8e858afe2e14d4a1f20", "referenceCount": 42, "citationCount": 21171, "influentialCitationCount": 2817, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Scene Parsing through ADE20K Dataset", "abstract": "Scene parsing, or recognizing and segmenting objects and stuff in an image, is one of the key problems in computer vision. Despite the communitys efforts in data collection, there are still few image datasets covering a wide range of scenes and object categories with dense and detailed annotations for scene parsing. In this paper, we introduce and analyze the ADE20K dataset, spanning diverse annotations of scenes, objects, parts of objects, and in some cases even parts of parts. A scene parsing benchmark is built upon the ADE20K with 150 object and stuff classes included. Several segmentation baseline models are evaluated on the benchmark. A novel network design called Cascade Segmentation Module is proposed to parse a scene into stuff, objects, and object parts in a cascade and improve over the baselines. We further show that the trained scene parsing networks can lead to applications such as image content removal and scene synthesis1.", "year": 2017, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Bolei Zhou", "Hang Zhao", "Xavier Puig", "S. Fidler", "Adela Barriuso", "A. Torralba" ], "externalIds": { "MAG": "2737258237", "DBLP": "conf/cvpr/ZhouZPFB017", "DOI": "10.1109/CVPR.2017.544", "CorpusId": 5636055 }, "url": "https://www.semanticscholar.org/paper/2a5667702b0f1ff77dde8fb3e2e10d4e05e8de9d", "referenceCount": 35, "citationCount": 2692, "influentialCitationCount": 551, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Attention is All you Need", "abstract": "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "Ashish Vaswani", "Noam M. Shazeer", "Niki Parmar", "Jakob Uszkoreit", "Llion Jones", "Aidan N. Gomez", "Lukasz Kaiser", "Illia Polosukhin" ], "externalIds": { "MAG": "2963403868", "DBLP": "conf/nips/VaswaniSPUJGKP17", "ArXiv": "1706.03762", "CorpusId": 13756489 }, "url": "https://www.semanticscholar.org/paper/204e3073870fae3d05bcbc2f6a8e263d9b72e776", "referenceCount": 41, "citationCount": 104988, "influentialCitationCount": 15363, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Mask R-CNN", "abstract": "We present a conceptually simple, flexible, and general framework for object instance segmentation. Our approach efficiently detects objects in an image while simultaneously generating a high-quality segmentation mask for each instance. The method, called Mask R-CNN, extends Faster R-CNN by adding a branch for predicting an object mask in parallel with the existing branch for bounding box recognition. Mask R-CNN is simple to train and adds only a small overhead to Faster R-CNN, running at 5 fps. Moreover, Mask R-CNN is easy to generalize to other tasks, e.g., allowing us to estimate human poses in the same framework. We show top results in all three tracks of the COCO suite of challenges, including instance segmentation, bounding-box object detection, and person keypoint detection. Without tricks, Mask R-CNN outperforms all existing, single-model entries on every task, including the COCO 2016 challenge winners. We hope our simple and effective approach will serve as a solid baseline and help ease future research in instance-level recognition. Code will be made available.", "year": 2017, "venue": "", "authors": [ "Kaiming He", "Georgia Gkioxari", "Piotr Dollár", "Ross B. Girshick" ], "externalIds": { "ArXiv": "1703.06870", "CorpusId": 54465873 }, "url": "https://www.semanticscholar.org/paper/1a0912bb76777469295bb2c059faee907e7f3258", "referenceCount": 40, "citationCount": 24114, "influentialCitationCount": 3735, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning Structured Sparsity in Deep Neural Networks", "abstract": "High demand for computation resources severely hinders deployment of large-scale Deep Neural Networks (DNN) in resource constrained devices. In this work, we propose a Structured Sparsity Learning (SSL) method to regularize the structures (i.e., filters, channels, filter shapes, and layer depth) of DNNs. SSL can: (1) learn a compact structure from a bigger DNN to reduce computation cost; (2) obtain a hardware-friendly structured sparsity of DNN to efficiently accelerate the DNNs evaluation. Experimental results show that SSL achieves on average 5.1x and 3.1x speedups of convolutional layer computation of AlexNet against CPU and GPU, respectively, with off-the-shelf libraries. These speedups are about twice speedups of non-structured sparsity; (3) regularize the DNN structure to improve classification accuracy. The results show that for CIFAR-10, regularization on layer depth can reduce 20 layers of a Deep Residual Network (ResNet) to 18 layers while improve the accuracy from 91.25% to 92.60%, which is still slightly higher than that of original ResNet with 32 layers. For AlexNet, structure regularization by SSL also reduces the error by around ~1%. Open source code is in this https URL", "year": 2016, "venue": "Neural Information Processing Systems", "authors": [ "W. Wen", "Chunpeng Wu", "Yandan Wang", "Yiran Chen", "Hai Helen Li" ], "externalIds": { "ArXiv": "1608.03665", "DBLP": "journals/corr/WenWWCL16", "MAG": "2513419314", "CorpusId": 2056019 }, "url": "https://www.semanticscholar.org/paper/7601b995303f953955004db7b9b8b206c0e02ff8", "referenceCount": 22, "citationCount": 2201, "influentialCitationCount": 231, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Deep Residual Learning for Image Recognition", "abstract": "Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. We provide comprehensive empirical evidence showing that these residual networks are easier to optimize, and can gain accuracy from considerably increased depth. On the ImageNet dataset we evaluate residual nets with a depth of up to 152 layers - 8× deeper than VGG nets [40] but still having lower complexity. An ensemble of these residual nets achieves 3.57% error on the ImageNet test set. This result won the 1st place on the ILSVRC 2015 classification task. We also present analysis on CIFAR-10 with 100 and 1000 layers. The depth of representations is of central importance for many visual recognition tasks. Solely due to our extremely deep representations, we obtain a 28% relative improvement on the COCO object detection dataset. Deep residual nets are foundations of our submissions to ILSVRC & COCO 2015 competitions1, where we also won the 1st places on the tasks of ImageNet detection, ImageNet localization, COCO detection, and COCO segmentation.", "year": 2015, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Kaiming He", "X. Zhang", "Shaoqing Ren", "Jian Sun" ], "externalIds": { "DBLP": "conf/cvpr/HeZRS16", "MAG": "2949650786", "ArXiv": "1512.03385", "DOI": "10.1109/cvpr.2016.90", "CorpusId": 206594692 }, "url": "https://www.semanticscholar.org/paper/2c03df8b48bf3fa39054345bafabfeff15bfd11d", "referenceCount": 53, "citationCount": 172713, "influentialCitationCount": 28229, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Very Deep Convolutional Networks for Large-Scale Image Recognition", "abstract": "In this work we investigate the effect of the convolutional network depth on its accuracy in the large-scale image recognition setting. Our main contribution is a thorough evaluation of networks of increasing depth using an architecture with very small (3x3) convolution filters, which shows that a significant improvement on the prior-art configurations can be achieved by pushing the depth to 16-19 weight layers. These findings were the basis of our ImageNet Challenge 2014 submission, where our team secured the first and the second places in the localisation and classification tracks respectively. We also show that our representations generalise well to other datasets, where they achieve state-of-the-art results. We have made our two best-performing ConvNet models publicly available to facilitate further research on the use of deep visual representations in computer vision.", "year": 2014, "venue": "International Conference on Learning Representations", "authors": [ "K. Simonyan", "Andrew Zisserman" ], "externalIds": { "MAG": "2949429431", "ArXiv": "1409.1556", "DBLP": "journals/corr/SimonyanZ14a", "CorpusId": 14124313 }, "url": "https://www.semanticscholar.org/paper/eb42cf88027de515750f230b23b1a057dc782108", "referenceCount": 43, "citationCount": 93036, "influentialCitationCount": 13588, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Microsoft COCO: Common Objects in Context", "abstract": null, "year": 2014, "venue": "European Conference on Computer Vision", "authors": [ "Tsung-Yi Lin", "M. Maire", "Serge J. Belongie", "James Hays", "P. Perona", "Deva Ramanan", "Piotr Dollár", "C. L. Zitnick" ], "externalIds": { "ArXiv": "1405.0312", "DBLP": "conf/eccv/LinMBHPRDZ14", "MAG": "2952122856", "DOI": "10.1007/978-3-319-10602-1_48", "CorpusId": 14113767 }, "url": "https://www.semanticscholar.org/paper/71b7178df5d2b112d07e45038cb5637208659ff7", "referenceCount": 52, "citationCount": 38114, "influentialCitationCount": 6016, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ImageNet: A large-scale hierarchical image database", "abstract": "The explosion of image data on the Internet has the potential to foster more sophisticated and robust models and algorithms to index, retrieve, organize and interact with images and multimedia data. But exactly how such data can be harnessed and organized remains a critical problem. We introduce here a new database called “ImageNet”, a large-scale ontology of images built upon the backbone of the WordNet structure. ImageNet aims to populate the majority of the 80,000 synsets of WordNet with an average of 500-1000 clean and full resolution images. This will result in tens of millions of annotated images organized by the semantic hierarchy of WordNet. This paper offers a detailed analysis of ImageNet in its current state: 12 subtrees with 5247 synsets and 3.2 million images in total. We show that ImageNet is much larger in scale and diversity and much more accurate than the current image datasets. Constructing such a large-scale database is a challenging task. We describe the data collection scheme with Amazon Mechanical Turk. Lastly, we illustrate the usefulness of ImageNet through three simple applications in object recognition, image classification and automatic object clustering. We hope that the scale, accuracy, diversity and hierarchical structure of ImageNet can offer unparalleled opportunities to researchers in the computer vision community and beyond.", "year": 2009, "venue": "2009 IEEE Conference on Computer Vision and Pattern Recognition", "authors": [ "Jia Deng", "Wei Dong", "R. Socher", "Li-Jia Li", "K. Li", "Li Fei-Fei" ], "externalIds": { "DBLP": "conf/cvpr/DengDSLL009", "MAG": "2108598243", "DOI": "10.1109/CVPR.2009.5206848", "CorpusId": 57246310 }, "url": "https://www.semanticscholar.org/paper/d2c733e34d48784a37d717fe43d9e93277a8c53e", "referenceCount": 27, "citationCount": 56678, "influentialCitationCount": 8947, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "HiViT: A Simpler and More Efficient Design of Hierarchical Vision Transformer", "abstract": "There has been a debate on the choice of plain vs. hierarchical vision transformers, where researchers often believe that the former (e.g., ViT) has a simpler design but the latter (e.g., Swin) enjoys higher recognition accuracy. Recently, the emerge of masked image modeling (MIM), a self-supervised pre-training method, raised a new challenge to vision transformers in terms of flexibility, i.e., part of image patches or tokens are to be discarded, which seems to claim the advantages of plain vision transformers. In this paper, we delve deep into the comparison between ViT and Swin, revealing that (i) the performance gain of Swin is mainly brought by a deepened backbone and relative positional encoding, (ii) the hierarchical design of Swin can be simplified into hierarchical patch embedding (proposed in this work), and (iii) other designs such as shifted-window attentions can be removed. By removing the unnecessary operations, we come up with a new architecture named HiViT (short for hierarchical ViT), which is simpler and more efficient than Swin yet further improves its performance on fully-supervised and self-supervised visual representation learning. In particular, after pre-trained using masked autoencoder (MAE) on ImageNet-1K, HiViT-B reports a 84.6% accuracy on ImageNet-1K classification, a 53.3% box AP on COCO detection, and a 52.8% mIoU on ADE20K segmentation, significantly surpassing the baseline. Code is available at https://github.com/zhangxiaosong18/hivit.", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Xiaosong Zhang", "Yunjie Tian", "Lingxi Xie", "Wei Huang", "Qi Dai", "Qixiang Ye", "Qi Tian" ], "externalIds": { "DBLP": "conf/iclr/0004TXHDY023", "CorpusId": 259298529 }, "url": "https://www.semanticscholar.org/paper/689bc24f71f8f22784534c764d59baa93a62c2e0", "referenceCount": 64, "citationCount": 36, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "EViT: Expediting Vision Transformers via Token Reorganizations", "abstract": null, "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Youwei Liang", "Chongjian Ge", "Zhan Tong", "Yibing Song", "Jue Wang", "P. Xie" ], "externalIds": { "DBLP": "conf/iclr/LiangGTS0X22", "CorpusId": 251647803 }, "url": "https://www.semanticscholar.org/paper/293535c2b0ef674e1ed9a7ba227e37cca35e5e4b", "referenceCount": 0, "citationCount": 74, "influentialCitationCount": 23, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "S4ND: Modeling Images and Videos as Multidimensional Signals with State Spaces", "abstract": null, "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Eric Nguyen", "Karan Goel", "Albert Gu", "G. Downs", "Preey Shah", "Tri Dao", "S. Baccus", "C. Ré" ], "externalIds": { "DBLP": "conf/nips/NguyenGGDSDBR22", "CorpusId": 260443992 }, "url": "https://www.semanticscholar.org/paper/cf0f8f585c8822e3c6bcd9527d546eefc8486aea", "referenceCount": 0, "citationCount": 107, "influentialCitationCount": 6, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "SPViT: Enabling Faster Vision Transformers via Soft Token Pruning", "abstract": "Recently, Vision Transformer (ViT) has continuously established new milestones in the computer vision field, while the high computation and memory cost makes its propagation in industrial production difficult. Pruning, a traditional model compression paradigm for hardware efficiency, has been widely applied in various DNN structures. Neverthe-less, it stays ambiguous on how to perform exclusive pruning on the ViT structure. Considering three key points: the structural characteristics, the internal data pattern of ViTs, and the related edge device deployment, we leverage the input token sparsity and propose a computation-aware soft pruning framework, which can be set up on vanilla Trans-formers of both flatten and CNN-type structures, such as Pooling-based ViT (PiT). More concretely, we design a dynamic attention-based multi-head token selector, which is a lightweight module for adaptive instance-wise token selection. We further introduce a soft pruning technique, which integrates the less informative tokens generated by the selector module into a package token that will participate in subsequent calculations rather than being completely discarded. Our framework is bound to the trade-off between accuracy and computation constraints of specific edge devices through our proposed computation-aware training strategy. Experimental results show that our", "year": 2021, "venue": "arXiv.org", "authors": [ "Zhenglun Kong", "Peiyan Dong", "Xiaolong Ma", "Xin Meng", "Wei Niu", "Mengshu Sun", "Bin Ren", "Minghai Qin", "Hao Tang", "Yanzhi Wang" ], "externalIds": { "DBLP": "journals/corr/abs-2112-13890", "CorpusId": 260445709 }, "url": "https://www.semanticscholar.org/paper/0d8be19e00af83388523baf86f8cdf682302a0d1", "referenceCount": 88, "citationCount": 61, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] } ] }, "Multi-Source Hard and Soft Information Fusion Approach for Accurate Cryptocurrency Price Movement Prediction": { "paper_title": "Multi-Source Hard and Soft Information Fusion Approach for Accurate Cryptocurrency Price Movement Prediction", "arxiv_id": "2409.18895v1", "keyword": "transformer", "authors": [ "Saeed Mohammadi Dashtaki", "Mehdi Hosseini Chagahi", "Behzad Moshiri", "Md. Jalil Piran" ], "references": [ { "title": "Reducing Privacy of CoinJoin Transactions: Quantitative Bitcoin Network Analysis", "abstract": "Privacy within the Bitcoin ecosystem has been critical for the operation and propagation of the system since its very first release. While various entities have sought to deanonymize and reveal user identities, the default semi-anonymous approach to privacy was judged as insufficient and the community developed a number of advanced privacy-preservation mechanisms. In this study, we propose an improved variant of the multiple-input clustering approach that incorporates advanced privacy-enhancing techniques. We examine the CoinJoin-adjusted user graph of Bitcoin through quantitative network analysis and draw conclusions on the effectiveness of our proposed clustering method compared to naive multiple-input clustering. Our findings indicate that CoinJoin transactions can significantly distort commonly applied address clustering approaches. Moreover, we demonstrate that Bitcoin's user graph has become less dense in recent years, concurrent with the collapse of several independent user clusters. Our results contribute to a more comprehensive understanding of privacy aspects in the Bitcoin transaction network and lay the groundwork for developing enhanced measures to prevent money laundering and terrorism financing.", "year": 2024, "venue": "IEEE Transactions on Dependable and Secure Computing", "authors": [ "Anton Wahrstätter", "Alfred Taudes", "Davor Svetinovic" ], "externalIds": { "DBLP": "journals/tdsc/WahrstatterTS24", "DOI": "10.1109/TDSC.2024.3353803", "CorpusId": 267026016 }, "url": "https://www.semanticscholar.org/paper/d907a81ec0f51dcaf8a4377c46af6b9399b1471b", "referenceCount": 37, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ContractCheck: Checking Ethereum Smart Contracts in Fine-Grained Level", "abstract": "The blockchain has been the main computing scenario for smart contracts, and the decentralized infrastructure of the blockchain is effectively implemented in a de-trusted and executable environment. However, vulnerabilities in smart contracts are particularly vulnerable to exploitation by malicious attackers and have always been a key issue in blockchain security. Existing traditional tools are inefficient in detecting vulnerabilities and have a high rate of false positives when detecting contracts. Some neural network methods have improved the detection efficiency, but they are not competent for fine-grained (code line level) vulnerability detection. We propose the ContractCheck model for detecting contract vulnerabilities based on neural network methods. ContractCheck extracts fine-grained segments from the abstract syntax tree (AST) and function call graph of smart contract source code. Furthermore, the segments are parsed into token flow retaining semantic information as uint, which are used to generate numerical vector sequences that can be trained using neural network methods. We conduct multiple rounds of experiments using a dataset constructed from 36,885 smart contracts and identified the optimal ContractCheck model structure by employing the Fasttext embedding vector algorithm and constructing a composite model using CNN and BiGRU for training the network. Evaluation on other datasets demonstrates that ContractCheck exhibits significant improvement in contract-level detection performance compared to other methods, with an increase of 23.60% in F1 score over the best existing method. Particularly, it achieves fine-grained detection based on neural network methods. The cases provide indicate that ContractCheck can effectively assist developers in accurately locating the presence of vulnerabilities, thereby enhancing the security of Ethereum smart contracts.", "year": 2024, "venue": "IEEE Transactions on Software Engineering", "authors": [ "Xite Wang", "Senping Tian", "Wei Cui" ], "externalIds": { "DBLP": "journals/tse/WangTC24", "DOI": "10.1109/TSE.2024.3400294", "CorpusId": 269799120 }, "url": "https://www.semanticscholar.org/paper/e833340be428b5fccf474da9c8017abbdb2b0197", "referenceCount": 62, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "An ensemble learning method for Bitcoin price prediction based on volatility indicators and trend", "abstract": null, "year": 2024, "venue": "Engineering applications of artificial intelligence", "authors": [ "A. Bâra", "S. Oprea" ], "externalIds": { "DBLP": "journals/eaai/BaraO24", "DOI": "10.1016/j.engappai.2024.107991", "CorpusId": 267624926 }, "url": "https://www.semanticscholar.org/paper/faa7d0ef52cebcc591c213dbf6b956a6a867af93", "referenceCount": 61, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Unveiling the potential: Exploring the predictability of complex exchange rate trends", "abstract": null, "year": 2024, "venue": "Engineering applications of artificial intelligence", "authors": [ "Yuntao Mao", "Ziwei Chen", "Siyuan Liu", "Yanfeng Li" ], "externalIds": { "DBLP": "journals/eaai/MaoCLL24", "DOI": "10.1016/j.engappai.2024.108112", "CorpusId": 268591595 }, "url": "https://www.semanticscholar.org/paper/5243242879cf840cd6b1d060fd2b8a3e63b3878f", "referenceCount": 34, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep-learning-based stock market prediction incorporating ESG sentiment and technical indicators", "abstract": null, "year": 2024, "venue": "Scientific Reports", "authors": [ "Haein Lee", "Jang Hyun Kim", "H. Jung" ], "externalIds": { "PubMedCentral": "11069555", "DOI": "10.1038/s41598-024-61106-2", "CorpusId": 269587331, "PubMed": "38704434" }, "url": "https://www.semanticscholar.org/paper/c435faa5ca66f83188ae66606127458cac7d04ad", "referenceCount": 39, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Cardiovascular disease detection using a novel stack-based ensemble classifier with aggregation layer, DOWA operator, and feature transformation", "abstract": null, "year": 2024, "venue": "Comput. Biol. Medicine", "authors": [ "Mehdi Hosseini Chagahi", "Saeed Mohammadi Dashtaki", "Behzad Moshiri", "J. Piran" ], "externalIds": { "DBLP": "journals/cbm/ChagahiDMP24", "DOI": "10.1016/j.compbiomed.2024.108345", "CorpusId": 268786772, "PubMed": "38564852" }, "url": "https://www.semanticscholar.org/paper/45bc024265f7539bb1ffc7554215cbed0f3e8f9d", "referenceCount": 36, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "A novel feature engineering approach for high-frequency financial data", "abstract": null, "year": 2023, "venue": "Engineering applications of artificial intelligence", "authors": [ "Pablo Mantilla", "Sebastián Dormido" ], "externalIds": { "DBLP": "journals/eaai/MantillaD23", "DOI": "10.1016/j.engappai.2023.106705", "CorpusId": 259893666 }, "url": "https://www.semanticscholar.org/paper/d81cd372749543856c063e9bd9941ff5b7e80392", "referenceCount": 47, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Novel Ensemble Learning Approach for Stock Market Prediction Based on Sentiment Analysis and the Sliding Window Method", "abstract": "Financial news disclosures provide valuable information for traders and investors while making stock market investment decisions. Essential but challenging, the stock market prediction problem has attracted significant attention from both researchers and practitioners. Conventional machine learning models often fail to interpret the content of financial news due to the complexity and ambiguity of natural language used in the news. Inspired by the success of recurrent neural networks (RNNs) in sequential data processing, we propose an ensemble RNN approach (long short-term memory, gated recurrent unit, and SimpleRNN) to predict stock market movements. To avoid extracting tens of thousands of features using traditional natural language processing methods, we apply sentiment analysis and the sliding window method to extract only the most representative features. Our experimental results confirm the effectiveness of these two methods for feature extraction and show that the proposed ensemble approach is able to outperform other models under comparison.", "year": 2023, "venue": "IEEE Transactions on Computational Social Systems", "authors": [ "R. Chiong", "Zongwen Fan", "Zhongyi Hu", "Sandeep Dhakal" ], "externalIds": { "DBLP": "journals/tcss/ChiongFHD23", "DOI": "10.1109/TCSS.2022.3182375", "CorpusId": 251332209 }, "url": "https://www.semanticscholar.org/paper/8af132215a0ceab9f90a60f4d6727bc4f62d4073", "referenceCount": 62, "citationCount": 19, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Centralized decomposition approach in LSTM for Bitcoin price prediction", "abstract": null, "year": 2023, "venue": "Expert systems with applications", "authors": [ "Eunho Koo", "Geonwoo Kim" ], "externalIds": { "DBLP": "journals/eswa/KooK24", "DOI": "10.1016/j.eswa.2023.121401", "CorpusId": 261671359 }, "url": "https://www.semanticscholar.org/paper/d9406d7ba62816d71241fda25851aeb5868f46cd", "referenceCount": 59, "citationCount": 7, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Machine Learning based Cryptocurrency Price Prediction using Historical Data and Social Media Sentiment", "abstract": "The purpose of this research is to investigate the impact of social media sentiments on predicting the Bitcoin price using machine learning models, with a focus on integrating onchain data and employing a Multi Modal Fusion Model. For conducting the experiments, the crypto market data, on-chain data, and corresponding social media data (Twitter) has been collected from 2014 to 2022 containing over 2000 samples. We trained various models over historical data including K-Nearest Neighbors, Logistic Regression, Gaussian Naive Bayes, Support Vector Machine, Extreme Gradient Boosting and a Multi Modal Fusion. Next, we added Twitter sentiment data to the models, using the Twitter-roBERTa and VADAR models to analyse the sentiments expressed in social media about Bitcoin. We then compared the performance of these models with and without the Twitter sentiment data and found that the inclusion of sentiment feature resulted in consistently better performance, with TwitterRoBERTa-based sentiment giving an average F1 scores of 0.79. The best performing model was an optimised Multi Modal Fusion classifier using Twitter-RoBERTa based sentiment, producing an F1 score of 0.85. This study represents a significant contribution to the field of financial forecasting by demonstrating the potential of social media sentiment analysis, onchain data integration, and the application of a Multi Modal Fusion model to improve the accuracy and robustness of machine learning models for predicting market trends, providing a valuable tool for investors, brokers, and traders seeking to make informed decisions", "year": 2023, "venue": "Machine Learning & Applications", "authors": [ "Saachin Bhatt", "M. Ghazanfar", "Mohammad Hossein Amirhosseini" ], "externalIds": { "DOI": "10.5121/csit.2023.131001", "CorpusId": 261502726 }, "url": "https://www.semanticscholar.org/paper/ca597dd817e9b25b6bc972b326be6b186409d31d", "referenceCount": 21, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "Machine learning techniques for stock price prediction and graphic signal recognition", "abstract": null, "year": 2023, "venue": "Engineering applications of artificial intelligence", "authors": [ "Junde Chen", "Yuxin Wen", "Y. A. Nanehkaran", "M. Suzauddola", "Weirong Chen", "Defu Zhang" ], "externalIds": { "DBLP": "journals/eaai/ChenWNSCZ23", "DOI": "10.1016/j.engappai.2023.106038", "CorpusId": 257291133 }, "url": "https://www.semanticscholar.org/paper/77497c39a610bd3da4e91691fb53af6c7ee8d66c", "referenceCount": 34, "citationCount": 23, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "News-based intelligent prediction of financial markets using text mining and machine learning: A systematic literature review", "abstract": null, "year": 2023, "venue": "Expert systems with applications", "authors": [ "Matin N. Ashtiani", "B. Raahemi" ], "externalIds": { "DBLP": "journals/eswa/AshtianiR23", "DOI": "10.1016/j.eswa.2023.119509", "CorpusId": 255745699 }, "url": "https://www.semanticscholar.org/paper/8c7d6bf2d4b9c65a17c98687f76cadf790c7192f", "referenceCount": 133, "citationCount": 58, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "LSTM-ReGAT: A network-centric approach for cryptocurrency price trend prediction", "abstract": null, "year": 2023, "venue": "Decision Support Systems", "authors": [ "Chao Zhong", "Wei Du", "Wei-jun Xu", "Qianhui Huang", "Yinuo Zhao", "Mingming Wang" ], "externalIds": { "DBLP": "journals/dss/ZhongDXHZW23", "DOI": "10.1016/j.dss.2023.113955", "CorpusId": 257371428 }, "url": "https://www.semanticscholar.org/paper/9a49bd8db688030a56c170a742a9ed5ccd3bb535", "referenceCount": 48, "citationCount": 26, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep Learning and Technical Analysis in Cryptocurrency Market", "abstract": "A large number of modern practices in financial forecasting rely on technical analysis, which involves several heuristics techniques of price charts visual pattern recognition as well as other technical indicators. In this study, we aim to investigate the potential use of those technical information (candlestick information as well as technical indicators) as inputs for machine learning models, especially the state-of-the-art deep learning algorithms, to generate trading signals. To properly address this problem, empirical research is conducted which applies several machine learning methods to 5 years of Bitcoin hourly data from 2017 to 2022. From the result of our study, we confirm the potential of trading strategies using machine learning approaches. We also find that among several machine learning models, deep learning models, specifically the recurrent neural networks, tend to outperform the others in time-series prediction.", "year": 2023, "venue": "Social Science Research Network", "authors": [ "Stéphane Goutte", "Hans-J org von Mettenheim", "Fei Liu", "Viet Phuong Le-Hoang" ], "externalIds": { "DOI": "10.2139/ssrn.4316106", "CorpusId": 255636703 }, "url": "https://www.semanticscholar.org/paper/85ccab08d9d07cb1075d4851382e228749a51699", "referenceCount": 13, "citationCount": 14, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "An Overview of Machine Learning, Deep Learning, and Reinforcement Learning-Based Techniques in Quantitative Finance: Recent Progress and Challenges", "abstract": "Forecasting the behavior of the stock market is a classic but difficult topic, one that has attracted the interest of both economists and computer scientists. Over the course of the last couple of decades, researchers have investigated linear models as well as models that are based on machine learning (ML), deep learning (DL), reinforcement learning (RL), and deep reinforcement learning (DRL) in order to create an accurate predictive model. Machine learning algorithms can now extract high-level financial market data patterns. Investors are using deep learning models to anticipate and evaluate stock and foreign exchange markets due to the advantage of artificial intelligence. Recent years have seen a proliferation of the deep reinforcement learning algorithm’s application in algorithmic trading. DRL agents, which combine price prediction and trading signal production, have been used to construct several completely automated trading systems or strategies. Our objective is to enable interested researchers to stay current and easily imitate earlier findings. In this paper, we have worked to explain the utility of Machine Learning, Deep Learning, Reinforcement Learning, and Deep Reinforcement Learning in Quantitative Finance (QF) and the Stock Market. We also outline potential future study paths in this area based on the overview that was presented before.", "year": 2023, "venue": "Applied Sciences", "authors": [ "S. Sahu", "A. Mokhade", "N. Bokde" ], "externalIds": { "DOI": "10.3390/app13031956", "CorpusId": 256590675 }, "url": "https://www.semanticscholar.org/paper/2288a23c7ee48101537d31bd62cc74275c95a23a", "referenceCount": 72, "citationCount": 52, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "Financial applications of machine learning: A literature review", "abstract": null, "year": 2023, "venue": "Expert systems with applications", "authors": [ "Noella Nazareth", "Yeruva Venkata Ramana Reddy" ], "externalIds": { "DBLP": "journals/eswa/NazarethR23", "DOI": "10.1016/j.eswa.2023.119640", "CorpusId": 256583903 }, "url": "https://www.semanticscholar.org/paper/90e149e9e684cba42e78b6267e693fcd1c367592", "referenceCount": 147, "citationCount": 52, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Multi-source data driven cryptocurrency price movement prediction and portfolio optimization", "abstract": null, "year": 2023, "venue": "Expert systems with applications", "authors": [ "Zhongbao Zhou", "Zhengyang Song", "Helu Xiao", "Tiantian Ren" ], "externalIds": { "DBLP": "journals/eswa/ZhouSXR23", "DOI": "10.2139/ssrn.4161476", "CorpusId": 250593544 }, "url": "https://www.semanticscholar.org/paper/cd0fc4952105f93afd2b260f0346590be28bfc79", "referenceCount": 63, "citationCount": 14, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Stock Market Prediction Using Hard and Soft Data Fusion", "abstract": "The stock market fluctuates a lot due to economic factors and public sentiments. This challenge is exacerbated by the high volatility of stock price trends. To predict the trend of this market, better and more accurate forecasting is expected by combining different sources. We use the available historical data in three values using a Stacked LSTMs network. Then we achieve better results in network output by using OWA methods. The next step is to get the correct news from the website and use the Natural Language Toolkit (NLTK) to analyze their negative or positive impact on the market. Then, by using the technical knowledge and experience of several human experts, including the recognition of patterns in technical analysis and knowledge of the market news, we obtain the soft outputs, which finally combine the outputs of human experts with the outputs obtained from the network. One of the applications of combining hard and soft data is stock market forecasting, and finally, the proposed model for this work is presented.", "year": 2022, "venue": "Conference on Information and Knowledge Technology", "authors": [ "Saeed Mohammadi Dashtaki", "Masoud Alizadeh", "Behzad Moshiri" ], "externalIds": { "DOI": "10.1109/IKT57960.2022.10038991", "CorpusId": 256878535 }, "url": "https://www.semanticscholar.org/paper/e31278dcb61bc1511e915de511ddccdb6095797a", "referenceCount": 32, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Applying Artificial Intelligence in Cryptocurrency Markets: A Survey", "abstract": "The total capital in cryptocurrency markets is around two trillion dollars in 2022, which is almost the same as Apple’s market capitalisation at the same time. Increasingly, cryptocurrencies have become established in financial markets with an enormous number of transactions and trades happening every day. Similar to other financial systems, price prediction is one of the main challenges in cryptocurrency trading. Therefore, the application of artificial intelligence, as one of the tools of prediction, has emerged as a recently popular subject of investigation in the cryptocurrency domain. Since machine learning models, as opposed to traditional financial models, demonstrate satisfactory performance in quantitative finance, they seem ideal for coping with the price prediction problem in the complex and volatile cryptocurrency market. There have been several studies that have focused on applying machine learning for price and movement prediction and portfolio management in cryptocurrency markets, though these methods and models are in their early stages. This survey paper aims to review the current research trends in applications of supervised and reinforcement learning models in cryptocurrency price prediction. This study also highlights potential research gaps and possible areas for improvement. In addition, it emphasises potential challenges and research directions that will be of interest in the artificial intelligence and machine learning communities focusing on cryptocurrencies.", "year": 2022, "venue": "Algorithms", "authors": [ "R. Amirzadeh", "A. Nazari", "D. Thiruvady" ], "externalIds": { "DBLP": "journals/algorithms/AmirzadehNT22", "DOI": "10.3390/a15110428", "CorpusId": 253568710 }, "url": "https://www.semanticscholar.org/paper/a520a24ce27684e1fd87b3fe96e9b05a3e727dd5", "referenceCount": 116, "citationCount": 17, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Multi-source aggregated classification for stock price movement prediction", "abstract": null, "year": 2022, "venue": "Information Fusion", "authors": [ "Yu Ma", "Rui Mao", "Qika Lin", "Peng Wu", "E. Cambria" ], "externalIds": { "DBLP": "journals/inffus/MaMLWC23", "DOI": "10.1016/j.inffus.2022.10.025", "CorpusId": 253436931 }, "url": "https://www.semanticscholar.org/paper/6cb557ab022a1620d25e79ad8d8f23c11fabfeef", "referenceCount": 73, "citationCount": 66, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Algorithmic Trading Using Continuous Action Space Deep Reinforcement Learning", "abstract": "Price movement prediction has always been one of the traders' concerns in financial market trading. In order to increase their profit, they can analyze the historical data and predict the price movement. The large size of the data and complex relations between them lead us to use algorithmic trading and artificial intelligence. This paper aims to offer an approach using Twin-Delayed DDPG (TD3) and the daily close price in order to achieve a trading strategy in the stock and cryptocurrency markets. Unlike previous studies using a discrete action space reinforcement learning algorithm, the TD3 is continuous, offering both position and the number of trading shares. Both the stock (Amazon) and cryptocurrency (Bitcoin) markets are addressed in this research to evaluate the performance of the proposed algorithm. The achieved strategy using the TD3 is compared with some algorithms using technical analysis, reinforcement learning, stochastic, and deterministic strategies through two standard metrics, Return and Sharpe ratio. The results indicate that employing both position and the number of trading shares can improve the performance of a trading system based on the mentioned metrics.", "year": 2022, "venue": "Social Science Research Network", "authors": [ "Naseh Majidi", "Mahdieh Shamsi", "F. Marvasti" ], "externalIds": { "ArXiv": "2210.03469", "DBLP": "journals/corr/abs-2210-03469", "DOI": "10.48550/arXiv.2210.03469", "CorpusId": 252762317 }, "url": "https://www.semanticscholar.org/paper/f88f56576f65f36604ab6306c162ab88d37640fa", "referenceCount": 28, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Economics" ] }, { "title": "From Text Representation to Financial Market Prediction: A Literature Review", "abstract": "News dissemination in social media causes fluctuations in financial markets. (Scope) Recent advanced methods in deep learning-based natural language processing have shown promising results in financial market analysis. However, understanding how to leverage large amounts of textual data alongside financial market information is important for the investors’ behavior analysis. In this study, we review over 150 publications in the field of behavioral finance that jointly investigated natural language processing (NLP) approaches and a market data analysis for financial decision support. This work differs from other reviews by focusing on applied publications in computer science and artificial intelligence that contributed to a heterogeneous information fusion for the investors’ behavior analysis. (Goal) We study various text representation methods, sentiment analysis, and information retrieval methods from heterogeneous data sources. (Findings) We present current and future research directions in text mining and deep learning for correlation analysis, forecasting, and recommendation systems in financial markets, such as stocks, cryptocurrencies, and Forex (Foreign Exchange Market).", "year": 2022, "venue": "Inf.", "authors": [ "Saeede Anbaee Farimani", "M. V. Jahan", "Amin Milani Fard" ], "externalIds": { "DBLP": "journals/information/FarimaniJF22", "DOI": "10.3390/info13100466", "CorpusId": 252659271 }, "url": "https://www.semanticscholar.org/paper/b6a299da923652a0d9eb64e4ebe95ec304b0795b", "referenceCount": 0, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A comprehensive review on multiple hybrid deep learning approaches for stock prediction", "abstract": null, "year": 2022, "venue": "Intelligent Systems with Applications", "authors": [ "Jaimin Shah", "Darsh Vaidya", "Manan Shah" ], "externalIds": { "DBLP": "journals/iswa/ShahVS22", "DOI": "10.1016/j.iswa.2022.200111", "CorpusId": 251576372 }, "url": "https://www.semanticscholar.org/paper/7f958a7c5d10426642728b2bcd8ea08f8bd71487", "referenceCount": 85, "citationCount": 49, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "An automated cryptocurrency trading system based on the detection of unusual price movements with a Time-Series Clustering-Based approach", "abstract": null, "year": 2022, "venue": "Expert systems with applications", "authors": [ "Faruk Ozer", "C. O. Sakar" ], "externalIds": { "DBLP": "journals/eswa/OzerS22", "DOI": "10.1016/j.eswa.2022.117017", "CorpusId": 247925590 }, "url": "https://www.semanticscholar.org/paper/64f3841898d361ad7c1a887971c33a68f82e88f0", "referenceCount": 32, "citationCount": 19, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Forex market forecasting with two-layer stacked Long Short-Term Memory neural network (LSTM) and correlation analysis", "abstract": null, "year": 2022, "venue": "Journal of Electrical Systems and Information Technology", "authors": [ "Michael Ayitey Junior", "Peter Appiahene", "Obed Appiah" ], "externalIds": { "DOI": "10.1186/s43067-022-00054-1", "CorpusId": 250149010 }, "url": "https://www.semanticscholar.org/paper/426d8dfd3b0f999fbea2ae1e86d96a10afa20ea2", "referenceCount": 82, "citationCount": 14, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "PreBit - A multimodal model with Twitter FinBERT embeddings for extreme price movement prediction of Bitcoin", "abstract": null, "year": 2022, "venue": "Expert systems with applications", "authors": [ "Yanzhao Zou", "Dorien Herremans" ], "externalIds": { "ArXiv": "2206.00648", "DBLP": "journals/eswa/ZouH23", "DOI": "10.1016/j.eswa.2023.120838", "CorpusId": 249240127 }, "url": "https://www.semanticscholar.org/paper/d63a0780f80a0d87f7fa83fb2ad8eb63398a6c3b", "referenceCount": 94, "citationCount": 21, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Economics" ] }, { "title": "Pearson Correlation Coefficient-Based Performance Enhancement of Broad Learning System for Stock Price Prediction", "abstract": "Accurate prediction of a stock price is a challenging task due to the complexity, chaos, and non-linearity nature of financial systems. In this brief, we proposed a multi-indicator feature selection method for stock price prediction based on Pearson correlation coefficient (PCC) and Broad Learning System (BLS), named the PCC-BLS framework. Firstly, PCC was used to select the input features from 35 features, including original stock price, technical indicators, and financial indicators. Secondly, these screened input features were used for rapid information feature extraction and training a BLS. Four stocks recorded on the Shanghai Stock Exchange or Shenzhen Stock Exchange were adopted to evaluate the performance of the proposed method. In addition, we compared the forecasting results with ten machine learning methods, including Support Vector Regression (SVR), Adaptive Boosting (Adaboost), Bootstrap aggregating (Bagging), Random Forest (RF), Gradient Boosting Decision Tree (GBDT), Multi-layer Perceptron (MLP), Convolutional Neural Network (CNN), and Long Short-Term Memory (LSTM), Gated Recurrent Unit (GRU) and Broad Learning System (BLS). Among all algorithms used in this brief, the proposed model showed the best performance with the highest model fitting ability.", "year": 2022, "venue": "IEEE Transactions on Circuits and Systems - II - Express Briefs", "authors": [ "Guanzhi Li", "Aining Zhang", "Qizhi Zhang", "Di Wu", "Choujun Zhan" ], "externalIds": { "DBLP": "journals/tcasII/LiZZWZ22", "DOI": "10.1109/tcsii.2022.3160266", "CorpusId": 247529345 }, "url": "https://www.semanticscholar.org/paper/17bb5f945485670647819d6a885866a035bbbd26", "referenceCount": 0, "citationCount": 43, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Do news headlines matter in the cryptocurrency market?", "abstract": "ABSTRACT The paper examines the influence of investor sentiment based on news headlines on the Cryptocurrency Market Index and ten individual cryptocurrency returns. We capture investors’ sentiment from cryptocurrency-specific news headlines. We use a lexicon-based Natural Language Processing (NLP) technique to construct a unique sentiment indicator, and the sentiment scores are generated using two financial dictionaries: Henry(2008)(HE) and Loughran and Mcdonald(2011)(LM). The findings of the study show that news sentiment has a significant impact on cryptocurrency returns. When the investors’ sentiment is optimistic or bullish, the cryptocurrency market experiences herding behaviour, leading to an increase in prices. The diverse and heterogeneous nature of the various cryptocurrencies causes each individual cryptocurrency to respond differently to sentiment. Further, we see that sentiment has a more pronounced impact on young, small, and volatile cryptocurrencies. Our study is among the few studies that use cryptocurrency-specific news headlines rather than news bodies to build a news sentiment indicator. JEL codes: E49, G14, G15", "year": 2022, "venue": "Social Science Research Network", "authors": [ "A. Anamika", "S. Subramaniam" ], "externalIds": { "DOI": "10.1080/00036846.2022.2061904", "CorpusId": 247814229 }, "url": "https://www.semanticscholar.org/paper/65acfc676b8e299c76a9a69768bc04d1494a80ac", "referenceCount": 69, "citationCount": 17, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "BABD: A Bitcoin Address Behavior Dataset for Pattern Analysis", "abstract": "Cryptocurrencies have dramatically increased adoption in mainstream applications in various fields such as financial and online services, however, there are still a few amounts of cryptocurrency transactions that involve illicit or criminal activities. It is essential to identify and monitor addresses associated with illegal behaviors to ensure the security and stability of the cryptocurrency ecosystem. In this paper, we propose a framework to build a dataset comprising Bitcoin transactions between 12 July 2019 and 26 May 2021. This dataset (hereafter referred to as BABD-13) contains 13 types of Bitcoin addresses, 5 categories of indicators with 148 features, and 544,462 labeled data, which is the largest labeled Bitcoin address behavior dataset publicly available to our knowledge. We also propose a novel and efficient subgraph generation algorithm called BTC-SubGen to extract a ${k}$ -hop subgraph from the entire Bitcoin transaction graph constructed by the directed heterogeneous multigraph starting from a specific Bitcoin address node. We then conduct 13-class classification tasks on BABD-13 by five machine learning models namely ${k}$ -nearest neighbors algorithm, decision tree, random forest, multilayer perceptron, and XGBoost, the results show that the accuracy rates are between 93.24% and 97.13%. In addition, we study the relations and importance of the proposed features and analyze how they affect the effect of machine learning models. Finally, we conduct a preliminary analysis of the behavior patterns of different types of Bitcoin addresses using concrete features and find several meaningful and explainable modes.", "year": 2022, "venue": "IEEE Transactions on Information Forensics and Security", "authors": [ "Yuexin Xiang", "Yuchen Lei", "Ding Bao", "Tiantian Li", "Qin Yang", "Wenmao Liu", "Wei Ren", "K. Choo" ], "externalIds": { "DBLP": "journals/tifs/XiangLBLYLRC24", "ArXiv": "2204.05746", "DOI": "10.1109/TIFS.2023.3347894", "CorpusId": 248525215 }, "url": "https://www.semanticscholar.org/paper/cb0dccc7cae67553f7647595cb638f2e5a32330a", "referenceCount": 59, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Investigating the informativeness of technical indicators and news sentiment in financial market price prediction", "abstract": null, "year": 2022, "venue": "Knowledge-Based Systems", "authors": [ "Saeede Anbaee Farimani", "M. V. Jahan", "Amin Milani Fard", "S. R. K. Tabbakh" ], "externalIds": { "DBLP": "journals/kbs/FarimaniJFT22", "DOI": "10.1016/j.knosys.2022.108742", "CorpusId": 248144481 }, "url": "https://www.semanticscholar.org/paper/4475283620ee383b14419b19d86915bdb5650bf9", "referenceCount": 72, "citationCount": 30, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The impact of news media on Bitcoin prices: modelling data driven discourses in the crypto-economy with natural language processing", "abstract": "This paper examines the relationship between events reported in international news via categorical discourses and Bitcoin price. Natural language processing was adopted in this study to model data-driven discourses in the crypto-economy, specifically the Bitcoin market. Using topic modelling, namely Latent Dirichlet Allocation, a text analysis of cryptocurrency articles (N = 4218) published from 60 countries in international news media identified key topics associated with cryptocurrency in the international news media from 2018 to 2020. This study provides empirical evidence that across the corpora of international news articles, 18 key topics were framed around the following categorical macro discourses: crypto-related crime, financial governance, and economy and markets. Analysis shows that the identified discourses may have had a ‘social signal’ effect on movements in the crypto-financial markets, particularly on Bitcoin's price volatility. Results show these specific discourses proved to have a negative effect on Bitcoin's market price, within 24 h of when the crypto news articles were published. Further, the study found that in some cases, the source of the news may have amplified the volatility effect, particularly in terms of geographical region, relative to broader market conditions.", "year": 2022, "venue": "Royal Society Open Science", "authors": [ "K. Coulter" ], "externalIds": { "PubMedCentral": "9019510", "DOI": "10.1098/rsos.220276", "CorpusId": 248243380, "PubMed": "35462778" }, "url": "https://www.semanticscholar.org/paper/3e1ba91f8ffa28a89945a4616cc2adf993060441", "referenceCount": 52, "citationCount": 8, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Deep Reinforcement Learning with the Random Neural Network", "abstract": null, "year": 2022, "venue": "Engineering applications of artificial intelligence", "authors": [ "Will Serrano" ], "externalIds": { "DBLP": "journals/eaai/Serrano22", "DOI": "10.1016/j.engappai.2022.104751", "CorpusId": 247068616 }, "url": "https://www.semanticscholar.org/paper/129736f6d30b4133a7ed1b7ea1256c49b9a70947", "referenceCount": 33, "citationCount": 14, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Journal of Behavioral and Experimental Finance: A Bibliometric Overview", "abstract": "Behavioral science has made a considerable contribution to finance. To gain an understanding of the scientific contributions emerging from all fields of finance with a behavioral perspective, this paper reviews the content of the major journal dedicated to behavioral finance, the Journal of Behavioral and Experimental Finance (JBEF), since its foundation 8 years ago. For this purpose, we employ bibliometrics and content analysis to shed light on the publication trends and intellectual structure of the JBEF, obtaining numerous intriguing findings. First, the JBEF is still a young journal, and its numbers of publications and citations have grown significantly since its inception. Second, though there are contributions from all parts of the world, the United States is acknowledged as contributing the most to the JBEF. Diverse authors have contributed to the journal, but those affiliated with the University of Innsbruck and Macquarie University lead the list. Third, most of the studies have used the theoretical underpinnings of behavioral theory and prospect theory. Methodologically, most of the studies are empirical and primarily based on quantitative research designs, archival data and regression analysis. Fourth, the JBEF’s contributions concern eight intellectual clusters—namely personal characteristics and national cultures;psychological factors, financial literacy and robo-advising;investor sentiment and stock market volatility;asset market experiments;overconfidence and the disposition effects in the stock market;externalities (COVID-19) and financial markets;socially responsible investing;and herding behavior in financial markets. Finally, “behavioral finance” is the most prominently used author keyword in the JBEF’s publications, followed by “financial literacy” All in all, these findings should offer readers a retrospection of scholarly contributions from the JBEF.", "year": 2022, "venue": "Social Science Research Network", "authors": [ "Satish Kumar", "Sandeep Rao", "Kirtika Goyal", "Nishank Goyal" ], "externalIds": { "DOI": "10.2139/ssrn.4057623", "CorpusId": 247653790 }, "url": "https://www.semanticscholar.org/paper/b7c609983b0d3f0311ae8d6842bdaaee06bf194b", "referenceCount": 161, "citationCount": 23, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "On technical trading and social media indicators for cryptocurrency price classification through deep learning", "abstract": null, "year": 2022, "venue": "Expert systems with applications", "authors": [ "Marco Ortu", "Nicola Uras", "Claudio Conversano", "Silvia Bartolucci", "Giuseppe Destefanis" ], "externalIds": { "DBLP": "journals/eswa/OrtuUCBD22", "DOI": "10.1016/j.eswa.2022.116804", "CorpusId": 231934289 }, "url": "https://www.semanticscholar.org/paper/b70736da8f45fec2003890e5926364a5bdd67a9f", "referenceCount": 42, "citationCount": 38, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Ensemble and Multimodal Approach for Forecasting Cryptocurrency Price", "abstract": "Since the birth of Bitcoin in 2009, cryptocurrencies have emerged to become a global phenomenon and an important decentralized financial asset. Due to this decentralization, the value of these digital currencies against fiat currencies is highly volatile over time. Therefore, forecasting the crypto-fiat currency exchange rate is an extremely challenging task. For reliable forecasting, this paper proposes a multimodal AdaBoost-LSTM ensemble approach that employs all modalities which derive price fluctuation such as social media sentiments, search volumes, blockchain information, and trading data. To better support investment decision making, the approach forecasts also the fluctuation distribution. The conducted extensive experiments demonstrated the effectiveness of relying on multimodalities instead of only trading data. Further experiments demonstrate the outperformance of the proposed approach compared to existing tools and methods with a 19.29% improvement.", "year": 2022, "venue": "arXiv.org", "authors": [ "Zeyd Boukhers", "Azeddine Bouabdallah", "Matthias Lohr", "J. Jürjens" ], "externalIds": { "DBLP": "journals/corr/abs-2202-08967", "ArXiv": "2202.08967", "CorpusId": 246996536 }, "url": "https://www.semanticscholar.org/paper/cb3dac173e4f20d5e71a9943b644f9d7fc76bd27", "referenceCount": 32, "citationCount": 7, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Economics" ] }, { "title": "Machine learning techniques and data for stock market forecasting: A literature review", "abstract": null, "year": 2022, "venue": "Expert systems with applications", "authors": [ "Mahinda Mailagaha Kumbure", "Christoph Lohrmann", "P. Luukka", "J. Porras" ], "externalIds": { "DBLP": "journals/eswa/KumbureLLP22", "DOI": "10.1016/j.eswa.2022.116659", "CorpusId": 247003373 }, "url": "https://www.semanticscholar.org/paper/d47cfd72f79217cd1226e81e8f2c84019f7ab4d2", "referenceCount": 198, "citationCount": 200, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Predicting bitcoin price movements using sentiment analysis: a machine learning approach", "abstract": "\nPurpose\nCryptocurrencies such as Bitcoin (BTC) attracted a lot of attention in recent months due to their unprecedented price fluctuations. This paper aims to propose a new method for predicting the direction of BTC price using linear discriminant analysis (LDA) together with sentiment analysis.\n\n\nDesign/methodology/approach\nConcretely, the authors train an LDA-based classifier that uses the current BTC price information and BTC news announcements headlines to forecast the next-day direction of BTC prices. The authors compare the results with a Support Vector Machine (SVM) model and random guess approach. The use of BTC price information and news announcements related to crypto enables us to value the importance of these different sources and types of information.\n\n\nFindings\nRelative to the LDA results, the SVM model was more accurate in predicting BTC next day’s price movement. All models yielded better forecasts of an increase in tomorrow’s BTC price compared to forecasting a decrease in the crypto price. The inclusion of news sentiment resulted in the highest forecast accuracy of 0.585 on the test data, which is superior to a random guess. The LDA (SVM) model with asset specific (news sentiment and asset specific) input features ranked first within their respective model classifiers, suggesting both BTC news sentiment and asset specific are prized factors in predicting tomorrow’s price direction.\n\n\nOriginality/value\nTo the best of the authors’ knowledge, this is the first study to analyze the potential effect of crypto-related sentiment and BTC specific news on BTC’s price using LDA and sentiment analysis.\n", "year": 2021, "venue": "Studies in Economics and Finance", "authors": [ "Ikhlaas Gurrib", "Firuz Kamalov" ], "externalIds": { "DOI": "10.1108/sef-07-2021-0293", "CorpusId": 245159886 }, "url": "https://www.semanticscholar.org/paper/7663686688d8a383bcab2db13615e0d552ccff80", "referenceCount": 65, "citationCount": 27, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "A Multimodal Event-Driven LSTM Model for Stock Prediction Using Online News", "abstract": "In finance, it is believed that market information, namely, fundamentals and news information, affects stock movements. Such media-aware stock movements essentially comprise a multimodal problem. Two unique challenges arise in processing these multimodal data. First, information from one data mode will interact with information from other data modes. A common strategy is to concatenate various data modes into one compound vector; however, this strategy ignores the interactions among different modes. The second challenge is the heterogeneity of the data in terms of sampling time. Specifically, fundamental data consist of continuous values sampled at fixed time intervals, whereas news information emerges randomly. This heterogeneity can cause valuable information to be partially missing or can distort the feature spaces. In addition, the study of media-aware stock movements in previous work has focused on the one-to-one problem, in which it is assumed that news affects only the performance of the stocks mentioned in the reports. However, news articles also impact related stocks and cause stock co-movements. In this article, we propose a tensor-based event-driven LSTM model to address these challenges. Experiments performed on the China securities market demonstrate the superiority of the proposed approach over state-of-the-art algorithms, including AZFinText, eMAQT, and TeSIA.", "year": 2021, "venue": "IEEE Transactions on Knowledge and Data Engineering", "authors": [ "Qing Li", "Jinghua Tan", "Jun Wang", "Hsinchun Chen" ], "externalIds": { "DBLP": "journals/tkde/LiTWC21", "MAG": "3001790167", "DOI": "10.1109/tkde.2020.2968894", "CorpusId": 211103451 }, "url": "https://www.semanticscholar.org/paper/ee20b3e9c3aac126cd7261120889a7daeeceb31c", "referenceCount": 50, "citationCount": 112, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Information Quality Assessment for Data Fusion Systems", "abstract": "This paper provides a comprehensive description of the current literature on data fusion, with an emphasis on Information Quality (IQ) and performance evaluation. This literature review highlights recent studies that reveal existing gaps, the need to find a synergy between data fusion and IQ, several research issues, and the challenges and pitfalls in this field. First, the main models, frameworks, architectures, algorithms, solutions, problems, and requirements are analyzed. Second, a general data fusion engineering process is presented to show how complex it is to design a framework for a specific application. Third, an IQ approach, as well as the different methodologies and frameworks used to assess IQ in information systems are addressed; in addition, data fusion systems are presented along with their related criteria. Furthermore, information on the context in data fusion systems and its IQ assessment are discussed. Subsequently, the issue of data fusion systems’ performance is reviewed. Finally, some key aspects and concluding remarks are outlined, and some future lines of work are gathered.", "year": 2021, "venue": "International Conference on Data Technologies and Applications", "authors": [ "M. A. Becerra", "Catalina Tobón", "A. E. Castro-Ospina", "Diego Hernán Peluffo-Ordóñez" ], "externalIds": { "MAG": "3167658574", "DBLP": "journals/data/BecerraTCP21", "DOI": "10.3390/data6060060", "CorpusId": 236095736 }, "url": "https://www.semanticscholar.org/paper/22c05e25fd309640f53fe87dc3e290d0e4a74791", "referenceCount": 161, "citationCount": 18, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Forecasting the movements of Bitcoin prices: an application of machine learning algorithms", "abstract": "Cryptocurrencies, such as Bitcoin, are one of the most controversial and complex technological innovations in today’s financial system. This study aims to forecast the movements of Bitcoin prices at a high degree of accuracy. To this aim, four different Machine Learning (ML) algorithms are applied, namely, the Support Vector Machines (SVM), the Artificial Neural Network (ANN), the Naive Bayes (NB) and the Random Forest (RF) besides the logistic regression (LR) as a benchmark model. In order to test these algorithms, besides existing continuous dataset, discrete dataset was also created and used. For the evaluations of algorithm performances, the F statistic, accuracy statistic, the Mean Absolute Error (MAE), the Root Mean Square Error (RMSE) and the Root Absolute Error (RAE) metrics were used. The t test was used to compare the performances of the SVM, ANN, NB and RF with the performance of the LR. Empirical findings reveal that, while the RF has the highest forecasting performance in the continuous dataset, the NB has the lowest. On the other hand, while the ANN has the highest and the NB the lowest performance in the discrete dataset. Furthermore, the discrete dataset improves the overall forecasting performance in all algorithms (models) estimated.", "year": 2020, "venue": "Quantitative Finance and Economics", "authors": [ "Hakan Pabuçcu", "Serdar Ongan", "Ayşe Ongan" ], "externalIds": { "ArXiv": "2303.04642", "DBLP": "journals/corr/abs-2303-04642", "MAG": "3103074365", "DOI": "10.3934/QFE.2020031", "CorpusId": 228883279 }, "url": "https://www.semanticscholar.org/paper/f2614e50c9cbf0acf06e08c1e51b6b408bed3ce8", "referenceCount": 34, "citationCount": 36, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Economics", "Computer Science", "Mathematics" ] }, { "title": "Fusion in stock market prediction: A decade survey on the necessity, recent developments, and potential future directions", "abstract": null, "year": 2020, "venue": "Information Fusion", "authors": [ "Ankit Thakkar", "Kinjal Chaudhari" ], "externalIds": { "DBLP": "journals/inffus/ThakkarC21", "MAG": "3080733778", "PubMedCentral": "7448965", "DOI": "10.1016/j.inffus.2020.08.019", "CorpusId": 221323781, "PubMed": "32868979" }, "url": "https://www.semanticscholar.org/paper/d00aaebbcc0bb9c07d66aab4f9cb9a9a59120bf5", "referenceCount": 130, "citationCount": 126, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Ensemble of machine learning algorithms for cryptocurrency investment with different data resampling methods", "abstract": null, "year": 2020, "venue": "Applied Soft Computing", "authors": [ "T. A. Borges", "R. Neves" ], "externalIds": { "DBLP": "journals/asc/BorgesN20", "MAG": "3008066307", "DOI": "10.1016/j.asoc.2020.106187", "CorpusId": 214480352 }, "url": "https://www.semanticscholar.org/paper/224594b261ad835e894cc270560d16b52a993a68", "referenceCount": 37, "citationCount": 46, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A survey on machine learning for data fusion", "abstract": null, "year": 2020, "venue": "Information Fusion", "authors": [ "Tong Meng", "Xuyang Jing", "Zheng Yan", "W. Pedrycz" ], "externalIds": { "DBLP": "journals/inffus/MengJYP20", "MAG": "2995201943", "DOI": "10.1016/j.inffus.2019.12.001", "CorpusId": 211216636 }, "url": "https://www.semanticscholar.org/paper/cf15c1898c81594165d74de465605aa9f559c769", "referenceCount": 78, "citationCount": 403, "influentialCitationCount": 8, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Applications of deep learning in stock market prediction: recent progress", "abstract": null, "year": 2020, "venue": "Expert systems with applications", "authors": [ "Weiwei Jiang" ], "externalIds": { "MAG": "3009650506", "ArXiv": "2003.01859", "DBLP": "journals/eswa/Jiang21", "DOI": "10.1016/j.eswa.2021.115537", "CorpusId": 211988999 }, "url": "https://www.semanticscholar.org/paper/00c7a21bd4d7c2c67ae54efbd2f6336cd5dc17e6", "referenceCount": 213, "citationCount": 380, "influentialCitationCount": 12, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Economics" ] }, { "title": "FinBERT: Financial Sentiment Analysis with Pre-trained Language Models", "abstract": "Financial sentiment analysis is a challenging task due to the specialized language and lack of labeled data in that domain. General-purpose models are not effective enough because of the specialized language used in a financial context. We hypothesize that pre-trained language models can help with this problem because they require fewer labeled examples and they can be further trained on domain-specific corpora. We introduce FinBERT, a language model based on BERT, to tackle NLP tasks in the financial domain. Our results show improvement in every measured metric on current state-of-the-art results for two financial sentiment analysis datasets. We find that even with a smaller training set and fine-tuning only a part of the model, FinBERT outperforms state-of-the-art machine learning methods.", "year": 2019, "venue": "arXiv.org", "authors": [ "Dogu Araci" ], "externalIds": { "ArXiv": "1908.10063", "DBLP": "journals/corr/abs-1908-10063", "MAG": "2970636124", "CorpusId": 201646244 }, "url": "https://www.semanticscholar.org/paper/7102bb3fe73bd057ff161d9db5214a267c1ef312", "referenceCount": 38, "citationCount": 508, "influentialCitationCount": 83, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "RoBERTa: A Robustly Optimized BERT Pretraining Approach", "abstract": "Language model pretraining has led to significant performance gains but careful comparison between different approaches is challenging. Training is computationally expensive, often done on private datasets of different sizes, and, as we will show, hyperparameter choices have significant impact on the final results. We present a replication study of BERT pretraining (Devlin et al., 2019) that carefully measures the impact of many key hyperparameters and training data size. We find that BERT was significantly undertrained, and can match or exceed the performance of every model published after it. Our best model achieves state-of-the-art results on GLUE, RACE and SQuAD. These results highlight the importance of previously overlooked design choices, and raise questions about the source of recently reported improvements. We release our models and code.", "year": 2019, "venue": "arXiv.org", "authors": [ "Yinhan Liu", "Myle Ott", "Naman Goyal", "Jingfei Du", "Mandar Joshi", "Danqi Chen", "Omer Levy", "M. Lewis", "Luke Zettlemoyer", "Veselin Stoyanov" ], "externalIds": { "DBLP": "journals/corr/abs-1907-11692", "MAG": "2965373594", "ArXiv": "1907.11692", "CorpusId": 198953378 }, "url": "https://www.semanticscholar.org/paper/077f8329a7b6fa3b7c877a57b81eb6c18b5f87de", "referenceCount": 68, "citationCount": 20963, "influentialCitationCount": 4860, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Combining Principal Component Analysis, Discrete Wavelet Transform and XGBoost to trade in the financial markets", "abstract": null, "year": 2019, "venue": "Expert systems with applications", "authors": [ "J. Nobre", "R. Neves" ], "externalIds": { "MAG": "2914888512", "DBLP": "journals/eswa/NobreN19", "DOI": "10.1016/J.ESWA.2019.01.083", "CorpusId": 86651773 }, "url": "https://www.semanticscholar.org/paper/c09e033b22f7173fe228d8c54e48c1bbf26eacfb", "referenceCount": 35, "citationCount": 193, "influentialCitationCount": 9, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Market Sentiment Analysis Based on Social Media and Trading Volume for Asset Price Movement Prediction", "abstract": null, "year": 2023, "venue": "International Conference on Advanced Data Mining and Applications", "authors": [ "Jiahao Li", "Yuyun Gong", "Qinghua Zhao", "Yufan Xie", "Simon Fong", "Jerome Yen" ], "externalIds": { "DBLP": "conf/adma/LiGZXFY23", "DOI": "10.1007/978-3-031-46661-8_26", "CorpusId": 265214315 }, "url": "https://www.semanticscholar.org/paper/e3ad4b540eb20f30dbfba81d69f1591403db834c", "referenceCount": 0, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Forecasting Bitcoin Volatility Through on-Chain and Whale-Alert Tweet Analysis Using the Q-Learning Algorithm", "abstract": "As the adoption of cryptocurrencies, especially Bitcoin (BTC) continues to rise in today’s digital economy, understanding their unpredictable nature becomes increasingly critical. This research paper addresses this need by investigating the volatile nature of the cryptocurrency market, mainly focusing on Bitcoin trend prediction utilizing on-chain data and whale-alert tweets. By employing a Q-learning algorithm, a type of reinforcement learning, we analyze variables such as transaction volume, network activity, and significant Bitcoin transactions highlighted in whale-alert tweets. Our findings indicate that the algorithm effectively predicts Bitcoin trends when integrating on-chain and Twitter data. Consequently, this study offers valuable insights that could potentially guide investors in informed Bitcoin investment decisions, thereby playing a pivotal role in the realm of cryptocurrency risk management.", "year": 2023, "venue": "IEEE Access", "authors": [ "Muminov Azamjon", "Otabek Sattarov", "Jinsoo Cho" ], "externalIds": { "DBLP": "journals/access/MuminovOC23", "DOI": "10.1109/ACCESS.2023.3317899", "CorpusId": 262213249 }, "url": "https://www.semanticscholar.org/paper/bae860748ace508b364a3cd781dd85b4c80983e9", "referenceCount": 55, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A machine learning trading system for the stock market based on N-period Min-Max labeling using XGBoost", "abstract": null, "year": 2023, "venue": "Expert systems with applications", "authors": [ "Yechan Han", "Jaeyun Kim", "D. Enke" ], "externalIds": { "DBLP": "journals/eswa/HanKE23", "CorpusId": 251679349 }, "url": "https://www.semanticscholar.org/paper/b05e201430794536860f408a0ca4b31366545e96", "referenceCount": 0, "citationCount": 16, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "On the Vital Aspects and Characteristics of Cryptocurrency—A Survey", "abstract": "Cryptocurrencies acquire user confidence by making the whole creation and transaction history transparent to the public. In exchange, the transaction history accurately captures the complete range of user activities related to cryptocurrencies. It is thought to be one of the safest and simplest payment methods that may be employed in the future. The trend of banks and other financial institutions investing in cryptocurrencies has increased rapidly in recent years. Therefore, it is necessary to synthesize the findings of previous studies on cryptocurrencies. In this paper, the use of data mining methods in Bitcoin transactions is analyzed and summarized. Cryptocurrencies, similar to the well-known Bitcoin, were targeted to ensure transaction security and privacy and overcome the drawbacks of traditional banking systems as well as other centralized systems. In addition, a comprehensive analysis of the literature on the challenges and applications of electronic currencies is conducted. The evolution of digital currency from electronic cash to cryptocurrencies is summarized and the methods used to increase user privacy are highlighted. The security threats in existing cryptocurrency systems (that compromise the privacy of Bitcoin users) are also highlighted. Finally, several research gaps and trends are identified that need to be further explored.", "year": 2023, "venue": "IEEE Access", "authors": [ "A. S. Shahen Shah", "Muhammet Ali Karabulut", "A. Akhter", "Nazifa Mustari", "A. Pathan", "Khaled M. Rabie", "T. Shongwe" ], "externalIds": { "DBLP": "journals/access/ShahKAMPRS23", "DOI": "10.1109/ACCESS.2023.3240103", "CorpusId": 256291335 }, "url": "https://www.semanticscholar.org/paper/b9090628c170832eb3693ef3f76a5a1ef44759cd", "referenceCount": 149, "citationCount": 8, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Fusion in Cryptocurrency Price Prediction: A Decade Survey on Recent Advancements, Architecture, and Potential Future Directions", "abstract": "Cryptographic forms of money are distributed peer-to-peer (P2P) computerized exchange mediums, where the exchanges or records are secured through a protected hash set of secure hash algorithm-256 (SHA-256) and message digest 5 (MD5) calculations. Since their initiation, the prices seem highly volatile and came to their amazing cutoff points during the COVID-19 pandemic. This factor makes them a popular choice for investors with an aim to get higher returns over a short span of time. The colossal high points and low points in digital forms of money costs have drawn in analysts from the scholarly community as well as ventures to foresee their costs. A few machines and deep learning algorithms like gated recurrent unit (GRU), long short-term memory (LSTM), autoregressive integrated moving average with explanatory variable (ARIMAX), and a lot more have been utilized to exactly predict and investigate the elements influencing cryptocurrency prices. The current literature is totally centered around the forecast of digital money costs disregarding its reliance on other cryptographic forms of money. However, Dash coin is an individual cryptocurrency, but it is derived from Bitcoin and Litecoin. The change in Bitcoin and Litecoin prices affects the Dash coin price. Motivated from these, we present a cryptocurrency price prediction framework in this paper. It acknowledges different cryptographic forms of money (which are subject to one another) as information and yields higher accuracy. To illustrate this concept, we have considered a price prediction of Dash coin through the past days’ prices of Dash, Litecoin, and Bitcoin as they have hierarchical dependency among them at the protocol level. We can portray the outcomes that the proposed scheme predicts the prices with low misfortune and high precision. The model can be applied to different digital money cost expectations.", "year": 2022, "venue": "IEEE Access", "authors": [ "Nisarg P. Patel", "Raj Parekh", "Nihar Thakkar", "Rajesh Gupta", "S. Tanwar", "Gulshan Sharma", "I. Davidson", "Ravi Sharma" ], "externalIds": { "DBLP": "journals/access/PatelPTGTSDS22", "DOI": "10.1109/ACCESS.2022.3163023", "CorpusId": 247788574 }, "url": "https://www.semanticscholar.org/paper/e1b7173cf667dc67be67695d8cf9faa9e7119243", "referenceCount": 120, "citationCount": 16, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Bitcoin: A Peer-to-Peer Electronic Cash System", "abstract": ". A purely peer-to-peer version of electronic cash would allow online payments to be sent directly from one party to another without going through a financial institution. Digital signatures provide part of the solution, but the main benefits are lost if a trusted third party is still required to prevent double-spending. We propose a solution to the double-spending problem using a peer-to-peer network. The network timestamps transactions by hashing them into an ongoing chain of hash-based proof-of-work, forming a record that cannot be changed without redoing the proof-of-work. The longest chain not only serves as proof of the sequence of events witnessed, but proof that it came from the largest pool of CPU power. As long as a majority of CPU power is controlled by nodes that are not cooperating to attack the network, they'll generate the longest chain and outpace attackers. The network itself requires minimal structure. Messages are broadcast on a best effort basis, and nodes can leave and rejoin the network at will, accepting the longest proof-of-work chain as proof of what happened while they were gone.", "year": 2008, "venue": "", "authors": [ "Anthony Dewayne Hunt" ], "externalIds": { "DOI": "10.2139/ssrn.3440802", "CorpusId": 236214795 }, "url": "https://www.semanticscholar.org/paper/4e9ec92a90c5d571d2f1d496f8df01f0a8f38596", "referenceCount": 76, "citationCount": 19255, "influentialCitationCount": 2664, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "“Quantitative stock port-folio optimization by multi-task learning risk and return,”", "abstract": null, "year": null, "venue": "Information Fusion", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "“Forecasting cryp-tocurrencies volatility using statistical and machine learning methods: A comparative study,”", "abstract": null, "year": null, "venue": "Applied Soft Computing", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "“News-driven stock prediction via noisy equity state representation,”", "abstract": null, "year": null, "venue": "Neurocomputing", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Multi-modal Medical Image Fusion For Non-Small Cell Lung Cancer Classification": { "paper_title": "Multi-modal Medical Image Fusion For Non-Small Cell Lung Cancer Classification", "arxiv_id": "2409.18715v1", "keyword": "transformer", "authors": [ "Salma Hassan", "Hamad Al Hammadi", "Ibrahim Mohammed", "Muhammad Haris Khan" ], "references": [ { "title": "Implementation of CRNN Method for Lung Cancer Detection based on Microarray Data", "abstract": "Lung Cancer is one of the cancer types with the most significant mortality rate, mainly because of the disease's slow detection. Therefore, the early identification of this disease is crucial. However, the primary issue of microarray is the curse of dimensionality. This problem is related to the characteristic of microarray data, which has a small sample size yet many attributes. Moreover, this problem could lower the accuracy of cancer detection systems. Various machines and deep learning techniques have been researched to solve this problem. This paper implemented a deep learning method named Convolutional Recurrent Neural Network (CRNN) to build the Lung Cancer detection system. Convolutional neural networks (CNN) are used to extract features, and recurrent neural networks (RNN) are used to summarize the derived features. CNN and RNN methods are combined in CRNN to derive the advantages of each of the methods. Several previous research uses CRNN to build a Lung Cancer detection system using medical image biomarkers (MRI or CT scan). Thus, the researchers concluded that CRNN achieved higher accuracy than CNN and RNN independently. Moreover, CRNN was implemented in this research by using a microarray-based Lung Cancer dataset. Furthermore, different drop-out values are compared to determine the best drop-out value for the system. Thus, the result shows that CRNN gave a higher accuracy than CNN and RNN. The CRNN method achieved the highest accuracy of 91%, while the CNN and RNN methods achieved 83% and 71% accuracy, respectively.", "year": 2023, "venue": "JOIV: International Journal on Informatics Visualization", "authors": [ "Azka Khoirunnisa", "-. Adiwijaya", "D. Adytia" ], "externalIds": { "DOI": "10.30630/joiv.7.2.1339", "CorpusId": 259622471 }, "url": "https://www.semanticscholar.org/paper/660883502b43421b34683bdaeebaad7479ed8db5", "referenceCount": 32, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "Artificial Intelligence in CT and MR Imaging for Oncological Applications", "abstract": "Simple Summary The two most common cross-sectional imaging modalities, computed tomography (CT) and magnetic resonance imaging (MRI), have shown enormous utility in clinical oncology. The emergence of artificial intelligence (AI)-based tools in medical imaging has been motivated by the desire for greater efficiency and efficacy in clinical care. Although a growing number of new AI tools for narrow-specific tasks in imaging is highly encouraging, the effort to tackle the key challenges to implementation by the worldwide imaging community has yet to be appropriately addressed. In this review, we discuss a few challenges in using AI tools and offer some potential solutions with examples from lung CT and MRI of the abdomen, pelvis, and head and neck (HN) region. As we advance, AI tools may significantly enhance clinician workflows and clinical decision-making. Abstract Cancer care increasingly relies on imaging for patient management. The two most common cross-sectional imaging modalities in oncology are computed tomography (CT) and magnetic resonance imaging (MRI), which provide high-resolution anatomic and physiological imaging. Herewith is a summary of recent applications of rapidly advancing artificial intelligence (AI) in CT and MRI oncological imaging that addresses the benefits and challenges of the resultant opportunities with examples. Major challenges remain, such as how best to integrate AI developments into clinical radiology practice, the vigorous assessment of quantitative CT and MR imaging data accuracy, and reliability for clinical utility and research integrity in oncology. Such challenges necessitate an evaluation of the robustness of imaging biomarkers to be included in AI developments, a culture of data sharing, and the cooperation of knowledgeable academics with vendor scientists and companies operating in radiology and oncology fields. Herein, we will illustrate a few challenges and solutions of these efforts using novel methods for synthesizing different contrast modality images, auto-segmentation, and image reconstruction with examples from lung CT as well as abdome, pelvis, and head and neck MRI. The imaging community must embrace the need for quantitative CT and MRI metrics beyond lesion size measurement. AI methods for the extraction and longitudinal tracking of imaging metrics from registered lesions and understanding the tumor environment will be invaluable for interpreting disease status and treatment efficacy. This is an exciting time to work together to move the imaging field forward with narrow AI-specific tasks. New AI developments using CT and MRI datasets will be used to improve the personalized management of cancer patients.", "year": 2023, "venue": "Cancers", "authors": [ "R. Paudyal", "A. Shah", "O. Akin", "Richard K G Do", "A. Konar", "V. Hatzoglou", "U. Mahmood", "N. Lee", "R. Wong", "Suchandrima Banerjee", "Jaemin Shin", "H. Veeraraghavan", "A. Shukla-Dave" ], "externalIds": { "PubMedCentral": "10177423", "DOI": "10.3390/cancers15092573", "CorpusId": 258492725, "PubMed": "37174039" }, "url": "https://www.semanticscholar.org/paper/bc6b56919568e1f2ed447bc24a93befa9f6bfee2", "referenceCount": 129, "citationCount": 20, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Recent advancements in deep learning based lung cancer detection: A systematic review", "abstract": null, "year": 2022, "venue": "Engineering applications of artificial intelligence", "authors": [ "Shubham Dodia", "B. Annappa", "M. Anand" ], "externalIds": { "DBLP": "journals/eaai/DodiaBA22", "DOI": "10.1016/j.engappai.2022.105490", "CorpusId": 252802216 }, "url": "https://www.semanticscholar.org/paper/8ce59fc3278341136e8f2eddbc800635cc0bf686", "referenceCount": 136, "citationCount": 34, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MedCLIP: Contrastive Learning from Unpaired Medical Images and Text", "abstract": "Existing vision-text contrastive learning like CLIP aims to match the paired image and caption embeddings while pushing others apart, which improves representation transferability and supports zero-shot prediction. However, medical image-text datasets are orders of magnitude below the general images and captions from the internet. Moreover, previous methods encounter many false negatives, i.e., images and reports from separate patients probably carry the same semantics but are wrongly treated as negatives. In this paper, we decouple images and texts for multimodal contrastive learning, thus scaling the usable training data in a combinatorial magnitude with low cost. We also propose to replace the InfoNCE loss with semantic matching loss based on medical knowledge to eliminate false negatives in contrastive learning. We prove that MedCLIP is a simple yet effective framework: it outperforms state-of-the-art methods on zero-shot prediction, supervised classification, and image-text retrieval. Surprisingly, we observe that with only 20K pre-training data, MedCLIP wins over the state-of-the-art method (using 200K data). The code is available at https://github.com/RyanWangZf/MedCLIP.", "year": 2022, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Zifeng Wang", "Zhenbang Wu", "Dinesh Agarwal", "Jimeng Sun" ], "externalIds": { "DBLP": "journals/corr/abs-2210-10163", "ACL": "2022.emnlp-main.256", "ArXiv": "2210.10163", "DOI": "10.48550/arXiv.2210.10163", "CorpusId": 252992913, "PubMed": "39144675" }, "url": "https://www.semanticscholar.org/paper/cdd9c1d23f9e89d5113f3e31821bb174c6a6afed", "referenceCount": 46, "citationCount": 220, "influentialCitationCount": 23, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Deep Learning-Based Chest CT Image Features in Diagnosis of Lung Cancer", "abstract": "This study was to evaluate the diagnostic value of deep learning-optimized chest CT in the patients with lung cancer. 90 patients who were diagnosed with lung cancer by surgery or puncture in hospital were selected as the research subjects. The Mask Region Convolutional Neural Network (Mask-RCNN) model was a typical end-to-end image segmentation model, and Dual Path Network (DPN) was used in nodule detection. The results showed that the accuracy of DPN algorithm model in detecting lung lesions in lung cancer patients was 88.74%, the accuracy of CT diagnosis of lung cancer was 88.37%, the sensitivity was 82.91%, and the specificity was 87.43%. Deep learning-based CT examination combined with serum tumor detection, factoring into Neurospecific enolase (N S E), cytokeratin 19 fragment (CYFRA21), Carcinoembryonic antigen (CEA), and squamous cell carcinoma (SCC) antigen, improved the accuracy to 97.94%, the sensitivity to 98.12%, and the specificity to 100%, all showing significant differences (P < 0.05). In conclusion, this study provides a scientific basis for improving the diagnostic efficiency of CT imaging in lung cancer and theoretical support for subsequent lung cancer diagnosis and treatment.", "year": 2022, "venue": "Computational and Mathematical Methods in Medicine", "authors": [ "J. Feng", "Jun Jiang" ], "externalIds": { "PubMedCentral": "8791752", "DOI": "10.1155/2022/4153211", "CorpusId": 246068815, "PubMed": "35096129" }, "url": "https://www.semanticscholar.org/paper/e32692242e2f3f0335f2280041f5e0df05879932", "referenceCount": 29, "citationCount": 26, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Multi-level multi-modality (PET and CT) fusion radiomics: prognostic modeling for non-small cell lung carcinoma", "abstract": "We developed multi-modality radiomic models by integrating information extracted from 18F-FDG PET and CT images using feature- and image-level fusions, toward improved prognosis for non-small cell lung carcinoma (NSCLC) patients. Two independent cohorts of NSCLC patients from two institutions (87 and 95 patients) were cycled as training and testing datasets. Fusion approaches were applied at two levels, namely feature- and image-levels. For feature-level fusion, radiomic features were extracted individually from CT and PET images and concatenated. Alternatively, radiomic features extracted separately from CT and PET images were averaged. For image-level fusion, wavelet fusion was utilized and tuned with two parameters, namely CT weight and Wavelet Band Pass Filtering Ratio. Clinical and combined clinical + radiomic models were developed. Gray level discretization was performed at 3 different levels (16, 32 and 64) and 225 radiomics features were extracted. Overall survival (OS) was considered as the endpoint. For feature reduction, correlated (redundant) features were excluded using Spearman’s correlation, and best combination of top ten features with highest concordance-indices (via univariate Cox model) were selected in each model for further multivariate Cox model. Moreover, prognostic score’s median, obtained from the training cohort, was used intact in the testing cohort as a threshold to classify patients into low- versus high-risk groups, and log-rank test was applied to assess differences between the Kaplan–Meier curves. Overall, while models based on feature-level fusion strategy showed limited superiority over single-modalities, image-level fusion strategy significantly outperformed both single-modality and feature-level fusion strategies. As such, the clinical model (C-index = 0.656) outperformed all models from single-modality and feature-level strategies, but was outperformed by certain models from image-level fusion strategy. Our findings indicated that image-level fusion multi-modality radiomics models outperformed single-modality, feature-level fusion, and clinical models for OS prediction of NSCLC patients.", "year": 2021, "venue": "Physics in Medicine and Biology", "authors": [ "Mehdi Amini", "M. Nazari", "Isaac Shiri Lord", "G. Hajianfar", "M. Deevband", "H. Abdollahi", "Hossein ARABI", "A. Rahmim", "H. Zaidi" ], "externalIds": { "DOI": "10.1088/1361-6560/ac287d", "CorpusId": 237584747, "PubMed": "34544053" }, "url": "https://www.semanticscholar.org/paper/ac34a2f1fc79910223c1f47e0f333622e629b132", "referenceCount": 64, "citationCount": 44, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Physics" ] }, { "title": "BEiT: BERT Pre-Training of Image Transformers", "abstract": "We introduce a self-supervised vision representation model BEiT, which stands for Bidirectional Encoder representation from Image Transformers. Following BERT developed in the natural language processing area, we propose a masked image modeling task to pretrain vision Transformers. Specifically, each image has two views in our pre-training, i.e, image patches (such as 16x16 pixels), and visual tokens (i.e., discrete tokens). We first\"tokenize\"the original image into visual tokens. Then we randomly mask some image patches and fed them into the backbone Transformer. The pre-training objective is to recover the original visual tokens based on the corrupted image patches. After pre-training BEiT, we directly fine-tune the model parameters on downstream tasks by appending task layers upon the pretrained encoder. Experimental results on image classification and semantic segmentation show that our model achieves competitive results with previous pre-training methods. For example, base-size BEiT achieves 83.2% top-1 accuracy on ImageNet-1K, significantly outperforming from-scratch DeiT training (81.8%) with the same setup. Moreover, large-size BEiT obtains 86.3% only using ImageNet-1K, even outperforming ViT-L with supervised pre-training on ImageNet-22K (85.2%). The code and pretrained models are available at https://aka.ms/beit.", "year": 2021, "venue": "International Conference on Learning Representations", "authors": [ "Hangbo Bao", "Li Dong", "Furu Wei" ], "externalIds": { "DBLP": "conf/iclr/Bao0PW22", "ArXiv": "2106.08254", "CorpusId": 235436185 }, "url": "https://www.semanticscholar.org/paper/722ad6ac92286507437b31486f47987d6ece05c9", "referenceCount": 59, "citationCount": 2260, "influentialCitationCount": 370, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep learning classification of lung cancer histology using CT images", "abstract": null, "year": 2021, "venue": "Scientific Reports", "authors": [ "T. Chaunzwa", "A. Hosny", "Yiwen Xu", "Andrea Shafer", "N. Diao", "M. Lanuti", "D. Christiani", "R. Mak", "H. Aerts" ], "externalIds": { "PubMedCentral": "7943565", "DOI": "10.1038/s41598-021-84630-x", "CorpusId": 232185911, "PubMed": "33727623" }, "url": "https://www.semanticscholar.org/paper/0e373dbc2a07c089efdb32924f073d4578252a6c", "referenceCount": 72, "citationCount": 133, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Histologic subtype classification of non-small cell lung cancer using PET/CT images", "abstract": null, "year": 2020, "venue": "European Journal of Nuclear Medicine and Molecular Imaging", "authors": [ "Yong Han", "Yuan Ma", "Zhiyuan Wu", "Feng Zhang", "D. Zheng", "Xiangtong Liu", "Lixin Tao", "Zhigang Liang", "Zhi Yang", "Xia Li", "Jian Huang", "Xiuhua Guo" ], "externalIds": { "MAG": "3048173156", "DOI": "10.1007/s00259-020-04771-5", "CorpusId": 221100005, "PubMed": "32776232" }, "url": "https://www.semanticscholar.org/paper/74b2e659ef365460755f16158225af0f6b2933b9", "referenceCount": 65, "citationCount": 108, "influentialCitationCount": 6, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Automatic lung segmentation in routine imaging is primarily a data diversity problem, not a methodology problem", "abstract": null, "year": 2020, "venue": "European Radiology Experimental", "authors": [ "J. Hofmanninger", "F. Prayer", "Jeanny Pan", "S. Röhrich", "H. Prosch", "G. Langs" ], "externalIds": { "MAG": "3054666633", "PubMedCentral": "7438418", "DOI": "10.1186/s41747-020-00173-2", "CorpusId": 221183043, "PubMed": "32814998" }, "url": "https://www.semanticscholar.org/paper/2446fefa0b4466592254ee90bad6737a511351ff", "referenceCount": 46, "citationCount": 354, "influentialCitationCount": 27, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Optuna: A Next-generation Hyperparameter Optimization Framework", "abstract": "The purpose of this study is to introduce new design-criteria for next-generation hyperparameter optimization software. The criteria we propose include (1) define-by-run API that allows users to construct the parameter search space dynamically, (2) efficient implementation of both searching and pruning strategies, and (3) easy-to-setup, versatile architecture that can be deployed for various purposes, ranging from scalable distributed computing to light-weight experiment conducted via interactive interface. In order to prove our point, we will introduce Optuna, an optimization software which is a culmination of our effort in the development of a next generation optimization software. As an optimization software designed with define-by-run principle, Optuna is particularly the first of its kind. We will present the design-techniques that became necessary in the development of the software that meets the above criteria, and demonstrate the power of our new design through experimental results and real world applications. Our software is available under the MIT license (https://github.com/pfnet/optuna/).", "year": 2019, "venue": "Knowledge Discovery and Data Mining", "authors": [ "Takuya Akiba", "Shotaro Sano", "Toshihiko Yanase", "Takeru Ohta", "Masanori Koyama" ], "externalIds": { "DBLP": "journals/corr/abs-1907-10902", "MAG": "2962897394", "ArXiv": "1907.10902", "DOI": "10.1145/3292500.3330701", "CorpusId": 196194314 }, "url": "https://www.semanticscholar.org/paper/4cdf2fad22afc865999747336c7399fe422e6e8e", "referenceCount": 34, "citationCount": 4014, "influentialCitationCount": 274, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "A radiogenomic dataset of non-small cell lung cancer", "abstract": null, "year": 2018, "venue": "Scientific Data", "authors": [ "Shaimaa Bakr", "O. Gevaert", "Sebastian Echegaray", "K. Ayers", "Mu Zhou", "M. Shafiq", "Hong Zheng", "J. Benson", "Weiruo Zhang", "A. Leung", "M. Kadoch", "Chuong D. Hoang", "J. Shrager", "A. Quon", "D. Rubin", "S. Plevritis", "S. Napel" ], "externalIds": { "MAG": "2896246551", "PubMedCentral": "6190740", "DOI": "10.1038/sdata.2018.202", "CorpusId": 53102382, "PubMed": "30325352" }, "url": "https://www.semanticscholar.org/paper/722cb04bb9b1ca0098e12bb6bf3fd3cb9416209f", "referenceCount": 44, "citationCount": 195, "influentialCitationCount": 14, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "A large-scale ct and pet/ct dataset for lung cancer diagnosis [dataset]", "abstract": null, "year": 2020, "venue": "The cancer imaging archive", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Data from nsclc-radiomics", "abstract": null, "year": 2015, "venue": "The cancer imaging archive", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Lung Cancer Detection Using Modified AlexNet Architecture and Support Vector Machine", "abstract": null, "year": null, "venue": "Computers, Materials & Continua", "authors": [ "Iftikhar Naseer", "Tehreem Masood", "Sheeraz Akram", "A. Jaffar", "Muhammad Rashid", "Muhammad Amjad Iqbal" ], "externalIds": { "DOI": "10.32604/cmc.2023.032927", "CorpusId": 252475922 }, "url": "https://www.semanticscholar.org/paper/dfdd1b16a2a5224c746c58ab4de6417c02745d33", "referenceCount": 0, "citationCount": 18, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "World health organization: Lung cancer fact sheet", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "HARMONIC: A Framework for Explanatory Cognitive Robots": { "paper_title": "HARMONIC: A Framework for Explanatory Cognitive Robots", "arxiv_id": "2409.18037v1", "keyword": "transformer", "authors": [ "Sanjay Oruganti", "Sergei Nirenburg", "Marjorie McShane", "Jesse English", "Michael K. Roberts", "Christian Arndt" ], "references": [ { "title": "Intelligent Humanoid Robots in Manufacturing", "abstract": "Technological evolution in the field of robotics is emerging with major breakthroughs in recent years. This was especially fostered by revolutionary new software applications leading to humanoid robots. Humanoids are being envisioned for manufacturing applications to form human-robot teams. But their implication in the manufacturing practices especially for industrial safety standards and lean manufacturing practices have been minimally addressed. Humanoids will also be competing with conventional robotic arms and effective methods to assess their return on investment are needed. To study the next generation of industrial automation, we used the case context of Tesla's humanoid robot. The company has recently unveiled its project on an intelligent humanoid robot named 'Optimus' to achieve an increased level of manufacturing automation. This article proposes a framework to integrate humanoids for manufacturing automation and also presents the significance of safety standards of human-robot collaboration. A case of lean assembly cell for the manufacturing of an open-source medical ventilator was used for human-humanoid automation. Simulation results indicate that humanoids can increase the level of manufacturing automation. Managerial and research implications are presented.", "year": 2024, "venue": "IEEE/ACM International Conference on Human-Robot Interaction", "authors": [ "A. A. Malik", "Tariq Masood", "Alexander Brem" ], "externalIds": { "DBLP": "conf/hri/MalikM024", "DOI": "10.1145/3610978.3640765", "CorpusId": 268489200 }, "url": "https://www.semanticscholar.org/paper/4ed0e8af5653c5f843f123ec6069c27839f2a580", "referenceCount": 14, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "LLMs Can't Plan, But Can Help Planning in LLM-Modulo Frameworks", "abstract": "There is considerable confusion about the role of Large Language Models (LLMs) in planning and reasoning tasks. On one side are over-optimistic claims that LLMs can indeed do these tasks with just the right prompting or self-verification strategies. On the other side are perhaps over-pessimistic claims that all that LLMs are good for in planning/reasoning tasks are as mere translators of the problem specification from one syntactic format to another, and ship the problem off to external symbolic solvers. In this position paper, we take the view that both these extremes are misguided. We argue that auto-regressive LLMs cannot, by themselves, do planning or self-verification (which is after all a form of reasoning), and shed some light on the reasons for misunderstandings in the literature. We will also argue that LLMs should be viewed as universal approximate knowledge sources that have much more meaningful roles to play in planning/reasoning tasks beyond simple front-end/back-end format translators. We present a vision of {\\bf LLM-Modulo Frameworks} that combine the strengths of LLMs with external model-based verifiers in a tighter bi-directional interaction regime. We will show how the models driving the external verifiers themselves can be acquired with the help of LLMs. We will also argue that rather than simply pipelining LLMs and symbolic components, this LLM-Modulo Framework provides a better neuro-symbolic approach that offers tighter integration between LLMs and symbolic components, and allows extending the scope of model-based planning/reasoning regimes towards more flexible knowledge, problem and preference specifications.", "year": 2024, "venue": "arXiv.org", "authors": [ "Subbarao Kambhampati", "Karthik Valmeekam", "L. Guan", "Kaya Stechly", "Mudit Verma", "Siddhant Bhambri", "Lucas Saldyt", "Anil Murthy" ], "externalIds": { "DBLP": "journals/corr/abs-2402-01817", "ArXiv": "2402.01817", "DOI": "10.48550/arXiv.2402.01817", "CorpusId": 267413178 }, "url": "https://www.semanticscholar.org/paper/b156004675ad3aa5e39a56928afc530aec191044", "referenceCount": 52, "citationCount": 43, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Automating Knowledge Acquisition for Content-Centric Cognitive Agents Using LLMs", "abstract": "The paper describes a system that uses large language model (LLM) technology to support automatic learning of new entries in an intelligent agent’s semantic lexicon. The process is bootstrapped by an existing non-toy lexicon and a natural language generator that converts formal, ontologically-grounded representations of meaning into natural language sentences. The learning method involves a sequence of LLM requests and includes an automatic quality control step. To date, this learning method has been applied to learning multiword expressions whose meanings are equivalent to those of transitive verbs in the agent’s lexicon. The experiment demonstrates the benefits of a hybrid learning architecture that integrates knowledge-based methods and resources with both traditional data analytics and LLMs.", "year": 2023, "venue": "Proceedings of the AAAI Symposium Series", "authors": [ "Sanjay Oruganti", "S. Nirenburg", "Jesse English", "Marjorie J. McShane" ], "externalIds": { "ArXiv": "2312.16378", "DBLP": "journals/corr/abs-2312-16378", "DOI": "10.48550/arXiv.2312.16378", "CorpusId": 266573638 }, "url": "https://www.semanticscholar.org/paper/d6ddfc50e0a5eb71568644e74458a02ebbbc5c59", "referenceCount": 14, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "IKT-BT: Indirect Knowledge Transfer Behavior Tree Framework for Multi-Robot Systems Through Communication Eavesdropping", "abstract": "Multi-agent and multi-robot systems (MRS) often rely on direct communication for information sharing. This work explores an alternative approach inspired by eavesdropping mechanisms in nature that involves casual observation of agent interactions to enhance decentralized knowledge dissemination. We achieve this through a novel IKT-BT framework tailored for a behavior-based MRS, encapsulating knowledge and control actions in Behavior Trees (BT). We present two new BT-based modalities - eavesdrop-update (EU) and eavesdrop-buffer-update (EBU) - incorporating unique eavesdropping strategies and efficient episodic memory management suited for resource-limited swarm robots. We theoretically analyze the IKT-BT framework for an MRS and validate the performance of the proposed modalities through extensive experiments simulating a search and rescue mission. Our results reveal improvements in both global mission performance outcomes and agent-level knowledge dissemination with a reduced need for direct communication.", "year": 2023, "venue": "arXiv.org", "authors": [ "Sanjay Oruganti", "Ramviyas Parasuraman", "R. Pidaparti" ], "externalIds": { "DBLP": "journals/corr/abs-2312-11802", "ArXiv": "2312.11802", "DOI": "10.48550/arXiv.2312.11802", "CorpusId": 266361998 }, "url": "https://www.semanticscholar.org/paper/fafbdd419b0ad13a16b8c37fd0cbabd774cd1118", "referenceCount": 0, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Real-Time Approach for Humanoid Robot Walking Including Dynamic Obstacles Avoidance", "abstract": "This paper proposes a novel approach to online re-plan the walking trajectory of a biped humanoid robot to avoid unexpected interactions and impacts with dynamic obstacles that may compromise the balance of the humanoid robot. The proposed method adjusts the position of the contacts of a pre-planned global trajectory according to the position of moving obstacles and the robot's dynamic properties. The methodology includes a graph-based footstep planner to generate a footstep sequence aware of possible changes in a dynamic environment, a Model Predictive Controller based on the Single-Rigid Body Dynamics to track the computed footsteps, and a final Whole-Body Control layer to compute proper joint torque commands. Preliminary results using the proposed approach are presented to demonstrate the effectiveness of the proposed framework in simulated scenarios with the DRACO3 humanoid bipedal platform.", "year": 2023, "venue": "IEEE-RAS International Conference on Humanoid Robots", "authors": [ "Luca Rossini", "E. Hoffman", "S. Bang", "Luis Sentis", "N. Tsagarakis" ], "externalIds": { "DBLP": "conf/humanoids/RossiniHBST23", "DOI": "10.1109/Humanoids57100.2023.10375191", "CorpusId": 266732913 }, "url": "https://www.semanticscholar.org/paper/9cb8361fe56c500873db83990c79b1dd2f8779af", "referenceCount": 27, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Open X-Embodiment: Robotic Learning Datasets and RT-X Models", "abstract": "Large, high-capacity models trained on diverse datasets have shown remarkable successes on efficiently tackling downstream applications. In domains from NLP to Computer Vision, this has led to a consolidation of pretrained models, with general pretrained backbones serving as a starting point for many applications. Can such a consolidation happen in robotics? Conventionally, robotic learning methods train a separate model for every application, every robot, and even every environment. Can we instead train generalist X-robot policy that can be adapted efficiently to new robots, tasks, and environments? In this paper, we provide datasets in standardized data formats and models to make it possible to explore this possibility in the context of robotic manipulation, alongside experimental results that provide an example of effective X-robot policies. We assemble a dataset from 22 different robots collected through a collaboration between 21 institutions, demonstrating 527 skills (160266 tasks). We show that a high-capacity model trained on this data, which we call RT-X, exhibits positive transfer and improves the capabilities of multiple robots by leveraging experience from other platforms. More details can be found on the project website https://robotics-transformer-x.github.io.", "year": 2023, "venue": "arXiv.org", "authors": [ "A. Padalkar", "Acorn Pooley", "Ajinkya Jain", "Alex Bewley", "Alex Herzog", "A. Irpan", "Alexander Khazatsky", "Anant Rai", "Anika Singh", "Anthony Brohan", "A. Raffin", "Ayzaan Wahid", "Ben Burgess-Limerick", "Beomjoon Kim", "Bernhard Schölkopf", "Brian Ichter", "Cewu Lu", "Charles Xu", "Chelsea Finn", "Chenfeng Xu", "Cheng Chi", "Chenguang Huang", "Christine Chan", "Chuer Pan", "Chuyuan Fu", "Coline Devin", "Danny Driess", "Deepak Pathak", "Dhruv Shah", "Dieter Büchler", "Dmitry Kalashnikov", "Dorsa Sadigh", "Edward Johns", "Federico Ceola", "Fei Xia", "F. Stulp", "Gaoyue Zhou", "G. Sukhatme", "G. Salhotra", "Ge Yan", "Giulio Schiavi", "Hao Su", "Haoshu Fang", "Haochen Shi", "H. B. Amor", "Henrik I Christensen", "Hiroki Furuta", "Homer Walke", "Hongjie Fang", "Igor Mordatch", "Ilija Radosavovic", "Isabel Leal", "Jacky Liang", "Jaehyung Kim", "Jan Schneider", "Jasmine Hsu", "J. Bohg", "Jeff Bingham", "Jiajun Wu", "Jialin Wu", "Jianlan Luo", "Jiayuan Gu", "Jie Tan", "Jihoon Oh", "Jitendra Malik", "Jonathan Tompson", "Jonathan Yang", "Joseph J. Lim", "João Silvério", "Junhyek Han", "Kanishka Rao", "Karl Pertsch", "Karol Hausman", "Keegan Go", "K. Gopalakrishnan", "Ken Goldberg", "Kendra Byrne", "Kenneth Oslund", "Kento Kawaharazuka", "Kevin Zhang", "K. Majd", "Krishan Rana", "K. Srinivasan", "L. Chen", "Lerrel Pinto", "Liam Tan", "Lionel Ott", "Lisa Lee", "Masayoshi Tomizuka", "Maximilian Du", "Michael Ahn", "Mingtong Zhang", "Mingyu Ding", "M. K. Srirama", "Mohit Sharma", "Moo Jin Kim", "Naoaki Kanazawa", "Nicklas Hansen", "N. Heess", "Nikhil J. Joshi", "Niko Suenderhauf", "Norman Di Palo", "N. Shafiullah", "Oier Mees", "Oliver Kroemer", "Pannag R. Sanketi", "Paul Wohlhart", "Peng Xu", "P. Sermanet", "Priya Sundaresan", "Q. Vuong", "Rafael Rafailov", "Ran Tian", "Ria Doshi", "Russell Mendonca", "Rutav Shah", "Ryan Hoque", "Ryan C. Julian", "Samuel Bustamante", "Sean Kirmani", "Sergey Levine", "Sherry Moore", "Shikhar Bahl", "Shivin Dass", "Shuran Song", "Sichun Xu", "Siddhant Haldar", "S. Adebola", "Simon Guist", "Soroush Nasiriany", "S. Schaal", "Stefan Welker", "Stephen Tian", "Sudeep Dasari", "Suneel Belkhale", "Takayuki Osa", "Tatsuya Harada", "T. Matsushima", "Ted Xiao", "Tianhe Yu", "Tianli Ding", "Todor Davchev", "Tony Zhao", "Travis Armstrong", "T. Darrell", "Vidhi Jain", "Vincent Vanhoucke", "Wei Zhan", "Wenxuan Zhou", "Wolfram Burgard", "Xi Chen", "Xiaolong Wang", "Xinghao Zhu", "Xuanlin Li", "Yao Lu", "Yevgen Chebotar", "Yifan Zhou", "Yifeng Zhu", "Ying Xu", "Yixuan Wang", "Yonatan Bisk", "Yoonyoung Cho", "Youngwoon Lee", "Yuchen Cui", "Yueh-hua Wu", "Yujin Tang", "Yuke Zhu", "Yunzhu Li", "Yusuke Iwasawa", "Yutaka Matsuo", "Zhuo Xu", "Zichen Jeff Cui" ], "externalIds": { "ArXiv": "2310.08864", "DBLP": "journals/corr/abs-2310-08864", "DOI": "10.48550/arXiv.2310.08864", "CorpusId": 263626099 }, "url": "https://www.semanticscholar.org/paper/ef7d31137ef06c5be8c2824ecc5af6ce3358cc8f", "referenceCount": 134, "citationCount": 199, "influentialCitationCount": 20, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "RT-2: Vision-Language-Action Models Transfer Web Knowledge to Robotic Control", "abstract": "We study how vision-language models trained on Internet-scale data can be incorporated directly into end-to-end robotic control to boost generalization and enable emergent semantic reasoning. Our goal is to enable a single end-to-end trained model to both learn to map robot observations to actions and enjoy the benefits of large-scale pretraining on language and vision-language data from the web. To this end, we propose to co-fine-tune state-of-the-art vision-language models on both robotic trajectory data and Internet-scale vision-language tasks, such as visual question answering. In contrast to other approaches, we propose a simple, general recipe to achieve this goal: in order to fit both natural language responses and robotic actions into the same format, we express the actions as text tokens and incorporate them directly into the training set of the model in the same way as natural language tokens. We refer to such category of models as vision-language-action models (VLA) and instantiate an example of such a model, which we call RT-2. Our extensive evaluation (6k evaluation trials) shows that our approach leads to performant robotic policies and enables RT-2 to obtain a range of emergent capabilities from Internet-scale training. This includes significantly improved generalization to novel objects, the ability to interpret commands not present in the robot training data (such as placing an object onto a particular number or icon), and the ability to perform rudimentary reasoning in response to user commands (such as picking up the smallest or largest object, or the one closest to another object). We further show that incorporating chain of thought reasoning allows RT-2 to perform multi-stage semantic reasoning, for example figuring out which object to pick up for use as an improvised hammer (a rock), or which type of drink is best suited for someone who is tired (an energy drink).", "year": 2023, "venue": "Conference on Robot Learning", "authors": [ "Anthony Brohan", "Noah Brown", "Justice Carbajal", "Yevgen Chebotar", "K. Choromanski", "Tianli Ding", "Danny Driess", "Kumar Avinava Dubey", "Chelsea Finn", "Peter R. Florence", "Chuyuan Fu", "Montse Gonzalez Arenas", "K. Gopalakrishnan", "Kehang Han", "Karol Hausman", "Alexander Herzog", "Jasmine Hsu", "Brian Ichter", "A. Irpan", "Nikhil J. Joshi", "Ryan C. Julian", "Dmitry Kalashnikov", "Yuheng Kuang", "Isabel Leal", "S. Levine", "H. Michalewski", "Igor Mordatch", "Karl Pertsch", "Kanishka Rao", "Krista Reymann", "M. Ryoo", "Grecia Salazar", "Pannag R. Sanketi", "P. Sermanet", "Jaspiar Singh", "Anika Singh", "Radu Soricut", "Huong Tran", "Vincent Vanhoucke", "Q. Vuong", "Ayzaan Wahid", "Stefan Welker", "Paul Wohlhart", "Ted Xiao", "Tianhe Yu", "Brianna Zitkovich" ], "externalIds": { "ArXiv": "2307.15818", "DBLP": "conf/corl/ZitkovichYXXXXW23", "DOI": "10.48550/arXiv.2307.15818", "CorpusId": 260293142 }, "url": "https://www.semanticscholar.org/paper/38939304bb760473141c2aca0305e44fbe04e6e8", "referenceCount": 94, "citationCount": 595, "influentialCitationCount": 47, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "KT-BT: A Framework for Knowledge Transfer Through Behavior Trees in Multirobot Systems", "abstract": "Multirobot and multiagent systems demonstrate collective (swarm) intelligence through systematic and distributed integration of local behaviors in a group. Agents sharing knowledge about the mission and environment can enhance performance at individual and mission levels. However, this is difficult to achieve, partly due to the lack of a generic framework for transferring part of the known knowledge (behaviors) between agents. This article presents a new knowledge representation framework and a transfer strategy called KT-BT: knowledge transfer through behavior trees. The KT-BT framework follows a query-response-update mechanism through an online behavior tree framework, where agents broadcast queries for unknown conditions and respond with appropriate knowledge using a condition-action-control subflow. We embed a novel grammar structure called stringBT that encodes knowledge, enabling behavior sharing. We theoretically investigate the properties of the KT-BT framework in achieving homogeneity of high knowledge across the entire group compared to a heterogeneous system without the capability of sharing their knowledge. We extensively verify our framework in a simulated multirobot search and rescue problem. The results show successful knowledge transfers and improved group performance in various scenarios. We further study the effects of opportunities and communication range on group performance, knowledge spread, and functional heterogeneity in a group of agents, presenting interesting insights.", "year": 2022, "venue": "IEEE Transactions on robotics", "authors": [ "Sanjay Sarma Oruganti Venkata", "Ramviyas Parasuraman", "R. Pidaparti" ], "externalIds": { "ArXiv": "2209.02886", "DBLP": "journals/corr/abs-2209-02886", "DOI": "10.1109/TRO.2023.3290449", "CorpusId": 252111092 }, "url": "https://www.semanticscholar.org/paper/42ec6c9fb18a29f870ba055d3e516c46877de866", "referenceCount": 76, "citationCount": 7, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Survey of Behavior Trees in Robotics and AI", "abstract": null, "year": 2020, "venue": "Robotics Auton. Syst.", "authors": [ "Matteo Iovino", "Edvards Scukins", "J. Styrud", "Petter Ögren", "Christian Smith" ], "externalIds": { "MAG": "3024044737", "ArXiv": "2005.05842", "DBLP": "journals/corr/abs-2005-05842", "DOI": "10.1016/j.robot.2022.104096", "CorpusId": 218595872 }, "url": "https://www.semanticscholar.org/paper/595c7aeb6a9c9033a68aff1b25067ce967ea2a77", "referenceCount": 183, "citationCount": 180, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Behavior Trees in Robotics and AI: An Introduction", "abstract": "Behavior Trees (BTs) provide a way to structure the behavior of an artificial agent such as a robot or a non-player character in a computer game.  Traditional design methods, such as finite state m ...", "year": 2017, "venue": "arXiv.org", "authors": [ "Michele Colledanchise", "Petter Ögren" ], "externalIds": { "DBLP": "journals/corr/abs-1709-00084", "MAG": "2753034999", "ArXiv": "1709.00084", "DOI": "10.1201/9780429489105", "CorpusId": 27470659 }, "url": "https://www.semanticscholar.org/paper/9830c9d16293f8f87f998aa449143f0ed1554d1a", "referenceCount": 74, "citationCount": 394, "influentialCitationCount": 59, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "No falls, no resets: Reliable humanoid behavior in the DARPA robotics challenge", "abstract": "We describe Team WPI-CMU's approach to the DARPA Robotics Challenge (DRC), focusing on our strategy to avoid failures that required physical human intervention. We implemented safety features in our controller to detect potential catastrophic failures, stop the current behavior, and allow remote intervention by a human supervisor. Our safety methods and operator interface worked: we avoided catastrophe and remote operators could safely recover from difficult situations. We were the only team in the DRC Finals that attempted all tasks, scored points (14/16), did not require physical human intervention (a reset), and did not fall in the two missions during the two days of tests. We also had the most consistent pair of runs. Much of the paper discusses lessons learned from the DRC.", "year": 2015, "venue": "IEEE-RAS International Conference on Humanoid Robots", "authors": [ "C. Atkeson", "B. W. Babu", "Nandan Banerjee", "D. Berenson", "Christoper P. Bove", "Xiongyi Cui", "M. DeDonato", "R. Du", "S. Feng", "P. Franklin", "M. Gennert", "J. Graff", "P. He", "A. Jaeger", "Joohyung Kim", "K. Knoedler", "Lening Li", "Chenggang Liu", "X. Long", "T. Padır", "F. Polido", "G. Tighe", "X. Xinjilefu" ], "externalIds": { "DBLP": "conf/humanoids/AtkesonBBBBCDDF15", "MAG": "2201087437", "DOI": "10.1109/HUMANOIDS.2015.7363436", "CorpusId": 1536171 }, "url": "https://www.semanticscholar.org/paper/348c711f19a70591e3f634efa1df27831362ea1a", "referenceCount": 18, "citationCount": 107, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Thinking fast and slow.", "abstract": null, "year": 2014, "venue": "Australian Veterinary Journal", "authors": [ "N. McGlynn" ], "externalIds": { "MAG": "2753615671", "CorpusId": 36031679, "PubMed": "25577814" }, "url": "https://www.semanticscholar.org/paper/2f2961362355e45fa014ca0bb8ce4495aedf8824", "referenceCount": 0, "citationCount": 8490, "influentialCitationCount": 847, "isOpenAccess": false, "fieldsOfStudy": [ "Biology", "Medicine" ] }, { "title": "Practical verification of decision-making in agent-based autonomous systems", "abstract": null, "year": 2013, "venue": "International Conference on Automated Software Engineering", "authors": [ "Louise Dennis", "Michael Fisher", "N. Lincoln", "A. Lisitsa", "S. Veres" ], "externalIds": { "MAG": "2138512963", "ArXiv": "1310.2431", "DBLP": "journals/ase/DennisFLLV16", "DOI": "10.1007/s10515-014-0168-9", "CorpusId": 3414751 }, "url": "https://www.semanticscholar.org/paper/6b67e8e7218cbaadef06303e50b2dbcc2f30b691", "referenceCount": 99, "citationCount": 102, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DIARC: A Testbed for Natural Human-Robot Interaction", "abstract": "DIARC, a distributed integrated affect, reflection, cognition architecture for robots, provides many features that are critical to successful natural human-robot interaction. As such, DIARC is an ideal platform for experimentation in HRI. In this paper we describe the architecture and and its implementation in ADE, paying particular attention to its interaction capabilities and features that allow robust operation. These features are evaluated in the context of the 2006 AAAI Robot Competition.", "year": 2006, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "P. Schermerhorn", "J. Kramer", "Christopher Middendorff", "Matthias Scheutz" ], "externalIds": { "DBLP": "conf/aaai/SchermerhornKMS06", "MAG": "1207053982", "CorpusId": 5931840 }, "url": "https://www.semanticscholar.org/paper/7f79f8b6f4c1e0d7e692d8ad0069d1c26db32dfb", "referenceCount": 13, "citationCount": 58, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "content-centric cognitive models", "abstract": null, "year": 2020, "venue": "Proceedings of the Annual Conference on Advances in Cognitive Systems", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Systematic Integration of Cognitive and Robotic Architectures", "abstract": "Originally, progress towards the AI goal of building artificial agents with human-like intelligence was best seen in cognitive architecture research that focused on developing complete agents in a systematic, theory-driven way. Later, research in embodied AI and robotics turned away from this focus on higher-level cognition in favor of making robots robustly achieve simple tasks in the real world. The ensuing hiatus between cognition-focused and action-focused research perspectives is still reflected in cognitive and robotic architectures today. In this paper, we attempt to reunite the two views by introducing a theoretically motivated, generic interface between cognitive and robotic architectures. From this integration the advances in both cognitive and robotic architectures can be leveraged to produce more complex adaptive robotic behavior. We start by reviewing the differences between cognitive and robotic architecture, followed by a comparison of two alternative methods for integrating such architectures. As a result of this comparison, we propose a threepart interface framework for architecture integration. We then report two specific instances of the interface for integrating the ICARUS and ACT-R cognitive architectures into the robotic DIARC architecture, along with proof-of-concept implementations with two sets of knowledge structures for executing a simple office environment exploration task using a Pioneer P3-AT robot. We describe qualitative evaluations of both integrated architectures and discuss directions for future research with the proposed framework.", "year": 2012, "venue": "", "authors": [ "P. Schermerhorn", "J. Harris", "Matthias Scheutz" ], "externalIds": { "MAG": "2187711819", "CorpusId": 2189967 }, "url": "https://www.semanticscholar.org/paper/d2c2ec0f5deeef0896a85eb51d266bf7cd6be41a", "referenceCount": 28, "citationCount": 25, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A cognitive architecture for simulating bodies and minds.", "abstract": "This paper presents an overview of a cognitive architecture, OntoAgent, that supports the creation and deployment of intelligent agents capable of simulating human-like abilities. The agents, which have a simulated mind and, if applicable, a simulated body, are intended to operate as members of multi-agent teams featuring both artificial and human agents. The agent architecture and its underlying knowledge resources and processors are being developed in a sufficiently generic way to support a variety of applications. In this paper we briefly describe the architecture and two applications being configured within it: the Maryland Virtual Patient (MVP) system for training medical personnel and the CLinician's ADvisor (CLAD). We organize the discussion around four aspects of agent modeling and how they are utilized in the two applications: physiological simulation, modeling an agent's knowledge and learning, decision-making and language processing.", "year": 2011, "venue": "AMIA ... Annual Symposium proceedings. AMIA Symposium", "authors": [ "S. Nirenburg", "Marjorie J. McShane", "S. Beale", "R. Catizone" ], "externalIds": { "MAG": "50748042", "CorpusId": 38566027, "PubMed": "22195149" }, "url": "https://www.semanticscholar.org/paper/9bba64c52fa55c5c95a09d8a6391b6bbb1dcc51a", "referenceCount": 16, "citationCount": 9, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "DEKADE : An Environment Supporting Development of NLP Systems", "abstract": "This paper describes ongoing work on the DEKADE (Development, Evaluation, Knowledge Acquisition, and Demonstration Environment) system and its components, the DekadeAPI, the DekadeServer, and the DekadeClient. DEKADE supports the development and operation of the natural language processing (NLP) system OntoSem, including its processors and static knowledge resources as well as applications that rely on OntoSem for their natural language processing needs", "year": 2007, "venue": "", "authors": [ "Jesse English", "S. Nirenburg" ], "externalIds": { "CorpusId": 198990689 }, "url": "https://www.semanticscholar.org/paper/672ad38b89da516fc0e735355ed605ad1f77cf70", "referenceCount": 17, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Linguistics for the Age of AI", "abstract": null, "year": null, "venue": "", "authors": [ "Marjorie J. McShane", "S. Nirenburg" ], "externalIds": { "DOI": "10.7551/mitpress/13618.001.0001", "CorpusId": 243257518 }, "url": "https://www.semanticscholar.org/paper/8c983dbea7a40a8ef735735a105d96f758cdda8b", "referenceCount": 0, "citationCount": 21, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "“Mutual trust in human-ai teams relies on metacognition,”", "abstract": null, "year": null, "venue": "Metacognitive Artificial Intelligence", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "HydraViT: Stacking Heads for a Scalable ViT": { "paper_title": "HydraViT: Stacking Heads for a Scalable ViT", "arxiv_id": "2409.17978v1", "keyword": "transformer", "authors": [ "Janek Haberer", "Ali Hojjat", "Olaf Landsiedel" ], "references": [ { "title": "SHARCS: Efficient Transformers through Routing with Dynamic Width Sub-networks", "abstract": "We introduce SHARCS for adaptive inference that takes into account the hardness of input samples. SHARCS can train a router on any transformer network, enabling the model to direct different samples to sub-networks with varying widths. Our experiments demonstrate that: (1) SHARCS outperforms or complements existing per-sample adaptive inference methods across various classification tasks in terms of accuracy vs. FLOPs; (2) SHARCS generalizes across different architectures and can be even applied to compressed and efficient transformer encoders to further improve their efficiency; (3) SHARCS can provide a 2 times inference speed up at an insignificant drop in accuracy.", "year": 2023, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Mohammadreza Salehi", "Sachin Mehta", "Aditya Kusupati", "Ali Farhadi", "Hannaneh Hajishirzi" ], "externalIds": { "DBLP": "journals/corr/abs-2310-12126", "ArXiv": "2310.12126", "DOI": "10.48550/arXiv.2310.12126", "CorpusId": 264289348 }, "url": "https://www.semanticscholar.org/paper/b065adeeded4d0c85b7624044058f7a3af1d55f2", "referenceCount": 48, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MatFormer: Nested Transformer for Elastic Inference", "abstract": "Transformer models are deployed in a wide range of settings, from multi-accelerator clusters to standalone mobile phones. The diverse inference constraints in these scenarios necessitate practitioners to train foundation models such as PaLM 2, Llama,&ViTs as a series of models of varying sizes. Due to significant training costs, only a select few model sizes are trained and supported, limiting more fine-grained control over relevant tradeoffs, including latency, cost, and accuracy. This work introduces MatFormer, a nested Transformer architecture designed to offer elasticity in a variety of deployment constraints. Each Feed Forward Network (FFN) block of a MatFormer model is jointly optimized with a few nested smaller FFN blocks. This training procedure allows for the Mix'n'Match of model granularities across layers -- i.e., a trained universal MatFormer model enables extraction of hundreds of accurate smaller models, which were never explicitly optimized. We empirically demonstrate MatFormer's effectiveness across different model classes (decoders&encoders), modalities (language&vision), and scales (up to 2.6B parameters). We find that a 2.6B decoder-only MatFormer language model (MatLM) allows us to extract smaller models spanning from 1.5B to 2.6B, each exhibiting comparable validation loss and one-shot downstream evaluations to their independently trained counterparts. Furthermore, we observe that smaller encoders extracted from a universal MatFormer-based ViT (MatViT) encoder preserve the metric-space structure for adaptive large-scale retrieval. Finally, we showcase that speculative decoding with the accurate and consistent submodels extracted from MatFormer can further reduce inference latency.", "year": 2023, "venue": "arXiv.org", "authors": [ "Devvrit", "Sneha Kudugunta", "Aditya Kusupati", "Tim Dettmers", "Kaifeng Chen", "Inderjit S. Dhillon", "Yulia Tsvetkov", "Hannaneh Hajishirzi", "S. Kakade", "Ali Farhadi", "Prateek Jain" ], "externalIds": { "ArXiv": "2310.07707", "DBLP": "journals/corr/abs-2310-07707", "DOI": "10.48550/arXiv.2310.07707", "CorpusId": 263834773 }, "url": "https://www.semanticscholar.org/paper/eac59779da7262968a9043985e7cd933c00247a5", "referenceCount": 72, "citationCount": 11, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FlexiViT: One Model for All Patch Sizes", "abstract": "Vision Transformers convert images to sequences by slicing them into patches. The size of these patches controls a speed/accuracy tradeoff, with smaller patches leading to higher accuracy at greater computational cost, but changing the patch size typically requires retraining the model. In this paper, we demonstrate that simply randomizing the patch size at training time leads to a single set of weights that performs well across a wide range of patch sizes, making it possible to tailor the model to different compute budgets at deployment time. We extensively evaluate the resulting model, which we call FlexiViT, on a wide range of tasks, including classification, image-text retrieval, open-world detection, panoptic segmentation, and semantic segmentation, concluding that it usually matches, and sometimes outperforms, standard ViT models trained at a single patch size in an otherwise identical setup. Hence, FlexiViT training is a simple drop-in improvement for ViT that makes it easy to add compute-adaptive capabilities to most models relying on a ViT backbone architecture. Code and pre-trained models are available at github.com/google-research/big_vision.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Lucas Beyer", "Pavel Izmailov", "Alexander Kolesnikov", "Mathilde Caron", "Simon Kornblith", "Xiaohua Zhai", "Matthias Minderer", "M. Tschannen", "Ibrahim M. Alabdulmohsin", "Filip Pavetic" ], "externalIds": { "DBLP": "journals/corr/abs-2212-08013", "ArXiv": "2212.08013", "DOI": "10.1109/CVPR52729.2023.01393", "CorpusId": 254685937 }, "url": "https://www.semanticscholar.org/paper/925fe4b2225e534888a2c78c9f6539a8e4e58d59", "referenceCount": 83, "citationCount": 65, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DeiT III: Revenge of the ViT", "abstract": "A Vision Transformer (ViT) is a simple neural architecture amenable to serve several computer vision tasks. It has limited built-in architectural priors, in contrast to more recent architectures that incorporate priors either about the input data or of specific tasks. Recent works show that ViTs benefit from self-supervised pre-training, in particular BerT-like pre-training like BeiT. In this paper, we revisit the supervised training of ViTs. Our procedure builds upon and simplifies a recipe introduced for training ResNet-50. It includes a new simple data-augmentation procedure with only 3 augmentations, closer to the practice in self-supervised learning. Our evaluations on Image classification (ImageNet-1k with and without pre-training on ImageNet-21k), transfer learning and semantic segmentation show that our procedure outperforms by a large margin previous fully supervised training recipes for ViT. It also reveals that the performance of our ViT trained with supervision is comparable to that of more recent architectures. Our results could serve as better baselines for recent self-supervised approaches demonstrated on ViT.", "year": 2022, "venue": "European Conference on Computer Vision", "authors": [ "Hugo Touvron", "M. Cord", "Herv'e J'egou" ], "externalIds": { "ArXiv": "2204.07118", "DBLP": "journals/corr/abs-2204-07118", "DOI": "10.48550/arXiv.2204.07118", "CorpusId": 248178188 }, "url": "https://www.semanticscholar.org/paper/d2f63b56fc6bc373f5c023454c2b253326962865", "referenceCount": 68, "citationCount": 281, "influentialCitationCount": 43, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Vision Transformer Slimming: Multi-Dimension Searching in Continuous Optimization Space", "abstract": "This paper explores the feasibility of finding an optimal sub-model from a vision transformer and introduces a pure vision transformer slimming (ViT-Slim) framework. It can search a sub-structure from the original model end-to-end across multiple dimensions, including the input tokens, MHSA and MLP modules with state-of-the-art performance. Our method is based on a learnable and unified ℓ1sparsity constraint with pre-defined factors to reflect the global importance in the continuous searching space of different dimensions. The searching process is highly efficient through a single-shot training scheme. For instance, on DeiT-S, ViT-Slim only takes ~43 GPU hours for the searching process, and the searched structure is flexible with diverse dimensionalities in different modules. Then, a budget threshold is employed according to the requirements of accuracy-FLOPs trade-off on running devices, and a retraining process is performed to obtain the final model. The extensive experiments show that our ViT-Slim can compress up to 40% of parameters and 40% FLOPs on various vision transformers while increasing the accuracy by ~0.6% on ImageNet. We also demonstrate the advantage of our searched models on several downstream datasets. Our code is available at https://github.com/Arnav0400/ViT-Slim.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Arnav Chavan", "Zhiqiang Shen", "Zhuang Liu", "Zechun Liu", "Kwang-Ting Cheng", "Eric P. Xing" ], "externalIds": { "DBLP": "conf/cvpr/ChavanS0LCX22", "ArXiv": "2201.00814", "DOI": "10.1109/CVPR52688.2022.00488", "CorpusId": 245650313 }, "url": "https://www.semanticscholar.org/paper/5ab70d95ca49702a3dd49b39d9396d8136b52311", "referenceCount": 68, "citationCount": 61, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A-ViT: Adaptive Tokens for Efficient Vision Transformer", "abstract": "We introduce A - ViT, a method that adaptively adjusts the inference cost of vision transformer (ViT) for images of different complexity. A - ViT achieves this by automatically reducing the number of tokens in vision transformers that are processed in the network as inference proceeds. We refor-mulate Adaptive Computation Time (ACT [17]) for this task, extending halting to discard redundant spatial tokens. The appealing architectural properties of vision transformers enables our adaptive token reduction mechanism to speed up inference without modifying the network architecture or inference hardware. We demonstrate that A - ViT requires no extra parameters or sub-network for halting, as we base the learning of adaptive halting on the original network parameters. We further introduce distributional prior regularization that stabilizes training compared to prior ACT approaches. On the image classification task (ImageNet1K), we show that our proposed A - ViT yields high efficacy in filtering informative spatial features and cutting down on the overall compute. The proposed method improves the throughput of DeiT-Tiny by 62% and DeiT-Small by 38% with only 0.3% accuracy drop, outperforming prior art by a large margin.", "year": 2021, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Hongxu Yin", "Arash Vahdat", "J. Álvarez", "Arun Mallya", "J. Kautz", "Pavlo Molchanov" ], "externalIds": { "ArXiv": "2112.07658", "DBLP": "conf/cvpr/YinVAMKM22", "DOI": "10.1109/CVPR52688.2022.01054", "CorpusId": 245131572 }, "url": "https://www.semanticscholar.org/paper/c2a0c18e810535db52e5ebaf180c64ce70356748", "referenceCount": 56, "citationCount": 230, "influentialCitationCount": 25, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MetaFormer is Actually What You Need for Vision", "abstract": "Transformers have shown great potential in computer vision tasks. A common belief is their attention-based token mixer module contributes most to their competence. However, recent works show the attention-based module in transformers can be replaced by spatial MLPs and the resulted models still perform quite well. Based on this observation, we hypothesize that the general architecture of the transformers, instead of the specific token mixer module, is more essential to the model's performance. To verify this, we deliberately replace the attention module in transformers with an embarrassingly simple spatial pooling operator to conduct only basic token mixing. Surprisingly, we observe that the derived model, termed as PoolFormer, achieves competitive performance on multiple computer vision tasks. For example, on ImageNet-1K, PoolFormer achieves 82.1 % top-1 accuracy, surpassing well-tuned vision transformer/MLP-like baselines DeiT-B/ResMLP-B24 by 0.3%/1.1% accuracy with 35%/52% fewer parameters and 49%/61% fewer MACs. The effectiveness of Pool-Former verifies our hypothesis and urges us to initiate the concept of “MetaFormer”, a general architecture abstracted from transformers without specifying the token mixer. Based on the extensive experiments, we argue that MetaFormer is the key player in achieving superior results for recent transformer and MLP-like models on vision tasks. This work calls for more future research dedicated to improving MetaFormer instead of focusing on the token mixer modules. Additionally, our proposed PoolFormer could serve as a starting baseline for future MetaFormer architecture design.", "year": 2021, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Weihao Yu", "Mi Luo", "Pan Zhou", "Chenyang Si", "Yichen Zhou", "Xinchao Wang", "Jiashi Feng", "Shuicheng Yan" ], "externalIds": { "DBLP": "conf/cvpr/YuLZSZWFY22", "ArXiv": "2111.11418", "DOI": "10.1109/CVPR52688.2022.01055", "CorpusId": 244478080 }, "url": "https://www.semanticscholar.org/paper/57150ca7d793d6f784cf82da1c349edf7beb6bc2", "referenceCount": 70, "citationCount": 669, "influentialCitationCount": 92, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Swin Transformer V2: Scaling Up Capacity and Resolution", "abstract": "We present techniques for scaling Swin Transformer [35] up to 3 billion parameters and making it capable of training with images of up to 1,536x1,536 resolution. By scaling up capacity and resolution, Swin Transformer sets new records on four representative vision benchmarks: 84.0% top-1 accuracy on ImageNet- V2 image classification, 63.1 / 54.4 box / mask mAP on COCO object detection, 59.9 mIoU on ADE20K semantic segmentation, and 86.8% top-1 accuracy on Kinetics-400 video action classification. We tackle issues of training instability, and study how to effectively transfer models pre-trained at low resolutions to higher resolution ones. To this aim, several novel technologies are proposed: 1) a residual post normalization technique and a scaled cosine attention approach to improve the stability of large vision models; 2) a log-spaced continuous position bias technique to effectively transfer models pre-trained at low-resolution images and windows to their higher-resolution counterparts. In addition, we share our crucial implementation details that lead to significant savings of GPU memory consumption and thus make it feasi-ble to train large vision models with regular GPUs. Using these techniques and self-supervised pre-training, we suc-cessfully train a strong 3 billion Swin Transformer model and effectively transfer it to various vision tasks involving high-resolution images or windows, achieving the state-of-the-art accuracy on a variety of benchmarks. Code is avail-able at https://github.com/microsoft/Swin-Transformer.", "year": 2021, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Ze Liu", "Han Hu", "Yutong Lin", "Zhuliang Yao", "Zhenda Xie", "Yixuan Wei", "Jia Ning", "Yue Cao", "Zheng Zhang", "Li Dong", "Furu Wei", "B. Guo" ], "externalIds": { "ArXiv": "2111.09883", "DBLP": "journals/corr/abs-2111-09883", "DOI": "10.1109/CVPR52688.2022.01170", "CorpusId": 244346076 }, "url": "https://www.semanticscholar.org/paper/be0fbb810583930c071d0b9b2c5187fe260783f5", "referenceCount": 86, "citationCount": 1290, "influentialCitationCount": 128, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Patch Slimming for Efficient Vision Transformers", "abstract": "This paper studies the efficiency problem for visual transformers by excavating redundant calculation in given networks. The recent transformer architecture has demonstrated its effectiveness for achieving excellent performance on a series of computer vision tasks. However, similar to that of convolutional neural networks, the huge computational cost of vision transformers is still a severe issue. Considering that the attention mechanism aggregates different patches layer-by-layer, we present a novel patch slimming approach that discards useless patches in a topdown paradigm. We first identify the effective patches in the last layer and then use them to guide the patch selection process of previous layers. For each layer, the impact of a patch on the final output feature is approximated and patches with less impacts will be removed. Experimental results on benchmark datasets demonstrate that the proposed method can significantly reduce the computational costs of vision transformers without affecting their performances. For example, over 45% FLOPs of the ViT-Ti model can be reduced with only 0.2% top-1 accuracy drop on the ImageNet dataset.", "year": 2021, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Yehui Tang", "Kai Han", "Yunhe Wang", "Chang Xu", "Jianyuan Guo", "Chao Xu", "D. Tao" ], "externalIds": { "ArXiv": "2106.02852", "DBLP": "journals/corr/abs-2106-02852", "DOI": "10.1109/CVPR52688.2022.01185", "CorpusId": 235358476 }, "url": "https://www.semanticscholar.org/paper/33fd56e5067a1e8a9713378af3e1c1c08d5ce93b", "referenceCount": 49, "citationCount": 134, "influentialCitationCount": 17, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DynamicViT: Efficient Vision Transformers with Dynamic Token Sparsification", "abstract": "Attention is sparse in vision transformers. We observe the final prediction in vision transformers is only based on a subset of most informative tokens, which is sufficient for accurate image recognition. Based on this observation, we propose a dynamic token sparsification framework to prune redundant tokens progressively and dynamically based on the input. Specifically, we devise a lightweight prediction module to estimate the importance score of each token given the current features. The module is added to different layers to prune redundant tokens hierarchically. To optimize the prediction module in an end-to-end manner, we propose an attention masking strategy to differentiably prune a token by blocking its interactions with other tokens. Benefiting from the nature of self-attention, the unstructured sparse tokens are still hardware friendly, which makes our framework easy to achieve actual speed-up. By hierarchically pruning 66% of the input tokens, our method greatly reduces 31%~37% FLOPs and improves the throughput by over 40% while the drop of accuracy is within 0.5% for various vision transformers. Equipped with the dynamic token sparsification framework, DynamicViT models can achieve very competitive complexity/accuracy trade-offs compared to state-of-the-art CNNs and vision transformers on ImageNet. Code is available at https://github.com/raoyongming/DynamicViT", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Yongming Rao", "Wenliang Zhao", "Benlin Liu", "Jiwen Lu", "Jie Zhou", "Cho-Jui Hsieh" ], "externalIds": { "DBLP": "conf/nips/RaoZLLZH21", "ArXiv": "2106.02034", "CorpusId": 235313562 }, "url": "https://www.semanticscholar.org/paper/dbdcabd0444ad50b68ee09e30f39b66e9068f5d2", "referenceCount": 39, "citationCount": 508, "influentialCitationCount": 101, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Not All Images are Worth 16x16 Words: Dynamic Transformers for Efficient Image Recognition", "abstract": "Vision Transformers (ViT) have achieved remarkable success in large-scale image recognition. They split every 2D image into a fixed number of patches, each of which is treated as a token. Generally, representing an image with more tokens would lead to higher prediction accuracy, while it also results in drastically increased computational cost. To achieve a decent trade-off between accuracy and speed, the number of tokens is empirically set to 16x16 or 14x14. In this paper, we argue that every image has its own characteristics, and ideally the token number should be conditioned on each individual input. In fact, we have observed that there exist a considerable number of\"easy\"images which can be accurately predicted with a mere number of 4x4 tokens, while only a small fraction of\"hard\"ones need a finer representation. Inspired by this phenomenon, we propose a Dynamic Transformer to automatically configure a proper number of tokens for each input image. This is achieved by cascading multiple Transformers with increasing numbers of tokens, which are sequentially activated in an adaptive fashion at test time, i.e., the inference is terminated once a sufficiently confident prediction is produced. We further design efficient feature reuse and relationship reuse mechanisms across different components of the Dynamic Transformer to reduce redundant computations. Extensive empirical results on ImageNet, CIFAR-10, and CIFAR-100 demonstrate that our method significantly outperforms the competitive baselines in terms of both theoretical computational efficiency and practical inference speed. Code and pre-trained models (based on PyTorch and MindSpore) are available at https://github.com/blackfeather-wang/Dynamic-Vision-Transformer and https://github.com/blackfeather-wang/Dynamic-Vision-Transformer-MindSpore.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Yulin Wang", "Rui Huang", "S. Song", "Zeyi Huang", "Gao Huang" ], "externalIds": { "DBLP": "conf/nips/WangHSHH21", "ArXiv": "2105.15075", "CorpusId": 239885997 }, "url": "https://www.semanticscholar.org/paper/14b97585f136671742f6ce4151081e487b1fc1fe", "referenceCount": 62, "citationCount": 154, "influentialCitationCount": 9, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MLP-Mixer: An all-MLP Architecture for Vision", "abstract": "Convolutional Neural Networks (CNNs) are the go-to model for computer vision. Recently, attention-based networks, such as the Vision Transformer, have also become popular. In this paper we show that while convolutions and attention are both sufficient for good performance, neither of them are necessary. We present MLP-Mixer, an architecture based exclusively on multi-layer perceptrons (MLPs). MLP-Mixer contains two types of layers: one with MLPs applied independently to image patches (i.e.\"mixing\"the per-location features), and one with MLPs applied across patches (i.e.\"mixing\"spatial information). When trained on large datasets, or with modern regularization schemes, MLP-Mixer attains competitive scores on image classification benchmarks, with pre-training and inference cost comparable to state-of-the-art models. We hope that these results spark further research beyond the realms of well established CNNs and Transformers.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "I. Tolstikhin", "N. Houlsby", "Alexander Kolesnikov", "Lucas Beyer", "Xiaohua Zhai", "Thomas Unterthiner", "Jessica Yung", "Daniel Keysers", "Jakob Uszkoreit", "Mario Lucic", "Alexey Dosovitskiy" ], "externalIds": { "DBLP": "conf/nips/TolstikhinHKBZU21", "ArXiv": "2105.01601", "CorpusId": 233714958 }, "url": "https://www.semanticscholar.org/paper/67571d29190faea9fbd104acd16274f8c4edf254", "referenceCount": 65, "citationCount": 2146, "influentialCitationCount": 321, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Pyramid Vision Transformer: A Versatile Backbone for Dense Prediction without Convolutions", "abstract": "Although convolutional neural networks (CNNs) have achieved great success in computer vision, this work investigates a simpler, convolution-free backbone network use-fid for many dense prediction tasks. Unlike the recently-proposed Vision Transformer (ViT) that was designed for image classification specifically, we introduce the Pyramid Vision Transformer (PVT), which overcomes the difficulties of porting Transformer to various dense prediction tasks. PVT has several merits compared to current state of the arts. (1) Different from ViT that typically yields low-resolution outputs and incurs high computational and memory costs, PVT not only can be trained on dense partitions of an image to achieve high output resolution, which is important for dense prediction, but also uses a progressive shrinking pyramid to reduce the computations of large feature maps. (2) PVT inherits the advantages of both CNN and Transformer, making it a unified backbone for various vision tasks without convolutions, where it can be used as a direct replacement for CNN backbones. (3) We validate PVT through extensive experiments, showing that it boosts the performance of many downstream tasks, including object detection, instance and semantic segmentation. For example, with a comparable number of parameters, PVT+RetinaNet achieves 40.4 AP on the COCO dataset, surpassing ResNet50+RetinNet (36.3 AP) by 4.1 absolute AP (see Figure 2). We hope that PVT could, serre as an alternative and useful backbone for pixel-level predictions and facilitate future research.", "year": 2021, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Wenhai Wang", "Enze Xie", "Xiang Li", "Deng-Ping Fan", "Kaitao Song", "Ding Liang", "Tong Lu", "P. Luo", "Ling Shao" ], "externalIds": { "DBLP": "conf/iccv/WangX0FSLL0021", "ArXiv": "2102.12122", "DOI": "10.1109/ICCV48922.2021.00061", "CorpusId": 232035922 }, "url": "https://www.semanticscholar.org/paper/3e398bad2d8636491a1034cc938a5e024c7aa881", "referenceCount": 87, "citationCount": 2983, "influentialCitationCount": 430, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Training data-efficient image transformers & distillation through attention", "abstract": "Recently, neural networks purely based on attention were shown to address image understanding tasks such as image classification. However, these visual transformers are pre-trained with hundreds of millions of images using an expensive infrastructure, thereby limiting their adoption. In this work, we produce a competitive convolution-free transformer by training on Imagenet only. We train them on a single computer in less than 3 days. Our reference vision transformer (86M parameters) achieves top-1 accuracy of 83.1% (single-crop evaluation) on ImageNet with no external data. More importantly, we introduce a teacher-student strategy specific to transformers. It relies on a distillation token ensuring that the student learns from the teacher through attention. We show the interest of this token-based distillation, especially when using a convnet as a teacher. This leads us to report results competitive with convnets for both Imagenet (where we obtain up to 85.2% accuracy) and when transferring to other tasks. We share our code and models.", "year": 2020, "venue": "International Conference on Machine Learning", "authors": [ "Hugo Touvron", "M. Cord", "Matthijs Douze", "Francisco Massa", "Alexandre Sablayrolles", "Herv'e J'egou" ], "externalIds": { "ArXiv": "2012.12877", "DBLP": "journals/corr/abs-2012-12877", "CorpusId": 229363322 }, "url": "https://www.semanticscholar.org/paper/ad7ddcc14984caae308c397f1a589aae75d4ab71", "referenceCount": 66, "citationCount": 5424, "influentialCitationCount": 892, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Transformer Interpretability Beyond Attention Visualization", "abstract": "Self-attention techniques, and specifically Transformers, are dominating the field of text processing and are becoming increasingly popular in computer vision classification tasks. In order to visualize the parts of the image that led to a certain classification, existing methods either rely on the obtained attention maps or employ heuristic propagation along the attention graph. In this work, we propose a novel way to compute relevancy for Transformer networks. The method assigns local relevance based on the Deep Taylor Decomposition principle and then propagates these relevancy scores through the layers. This propagation involves attention layers and skip connections, which challenge existing methods. Our solution is based on a specific formulation that is shown to maintain the total relevancy across layers. We benchmark our method on very recent visual Transformer networks, as well as on a text classification problem, and demonstrate a clear advantage over the existing explainability methods. Our code is available at: https://github.com/hila-chefer/Transformer-Explainability.", "year": 2020, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Hila Chefer", "Shir Gur", "Lior Wolf" ], "externalIds": { "DBLP": "conf/cvpr/CheferGW21", "ArXiv": "2012.09838", "MAG": "3112516115", "DOI": "10.1109/CVPR46437.2021.00084", "CorpusId": 229297908 }, "url": "https://www.semanticscholar.org/paper/0acd7ff5817d29839b40197f7a4b600b7fba24e4", "referenceCount": 50, "citationCount": 507, "influentialCitationCount": 72, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale", "abstract": "While the Transformer architecture has become the de-facto standard for natural language processing tasks, its applications to computer vision remain limited. In vision, attention is either applied in conjunction with convolutional networks, or used to replace certain components of convolutional networks while keeping their overall structure in place. We show that this reliance on CNNs is not necessary and a pure transformer applied directly to sequences of image patches can perform very well on image classification tasks. When pre-trained on large amounts of data and transferred to multiple mid-sized or small image recognition benchmarks (ImageNet, CIFAR-100, VTAB, etc.), Vision Transformer (ViT) attains excellent results compared to state-of-the-art convolutional networks while requiring substantially fewer computational resources to train.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Alexey Dosovitskiy", "Lucas Beyer", "Alexander Kolesnikov", "Dirk Weissenborn", "Xiaohua Zhai", "Thomas Unterthiner", "Mostafa Dehghani", "Matthias Minderer", "G. Heigold", "S. Gelly", "Jakob Uszkoreit", "N. Houlsby" ], "externalIds": { "MAG": "3094502228", "ArXiv": "2010.11929", "DBLP": "conf/iclr/DosovitskiyB0WZ21", "CorpusId": 225039882 }, "url": "https://www.semanticscholar.org/paper/268d347e8a55b5eb82fb5e7d2f800e33c75ab18a", "referenceCount": 65, "citationCount": 28233, "influentialCitationCount": 4121, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DynaBERT: Dynamic BERT with Adaptive Width and Depth", "abstract": "The pre-trained language models like BERT and RoBERTa, though powerful in many natural language processing tasks, are both computational and memory expensive. To alleviate this problem, one approach is to compress them for specific tasks before deployment. However, recent works on BERT compression usually reduce the large BERT model to a fixed smaller size, and can not fully satisfy the requirements of different edge devices with various hardware performances. In this paper, we propose a novel dynamic BERT model (abbreviated as DynaBERT), which can run at adaptive width and depth. The training process of DynaBERT includes first training a width-adaptive BERT and then allows both adaptive width and depth, by distilling knowledge from the full-sized model to small sub-networks. Network rewiring is also used to keep the more important attention heads and neurons shared by more sub-networks. Comprehensive experiments under various efficiency constraints demonstrate that our proposed dynamic BERT (or RoBERTa) at its largest size has comparable performance as BERT (or RoBERTa), while at smaller widths and depths consistently outperforms existing BERT compression methods.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Lu Hou", "Zhiqi Huang", "Lifeng Shang", "Xin Jiang", "Qun Liu" ], "externalIds": { "DBLP": "journals/corr/abs-2004-04037", "MAG": "3101731278", "ArXiv": "2004.04037", "CorpusId": 215415863 }, "url": "https://www.semanticscholar.org/paper/1c332cfa211400fc6f56983fb01a6692046116dd", "referenceCount": 45, "citationCount": 285, "influentialCitationCount": 43, "isOpenAccess": false, "fieldsOfStudy": [ "Geology", "Computer Science" ] }, { "title": "Slimmable Neural Networks", "abstract": "We present a simple and general method to train a single neural network executable at different widths (number of channels in a layer), permitting instant and adaptive accuracy-efficiency trade-offs at runtime. Instead of training individual networks with different width configurations, we train a shared network with switchable batch normalization. At runtime, the network can adjust its width on the fly according to on-device benchmarks and resource constraints, rather than downloading and offloading different models. Our trained networks, named slimmable neural networks, achieve similar (and in many cases better) ImageNet classification accuracy than individually trained models of MobileNet v1, MobileNet v2, ShuffleNet and ResNet-50 at different widths respectively. We also demonstrate better performance of slimmable models compared with individual ones across a wide range of applications including COCO bounding-box object detection, instance segmentation and person keypoint detection without tuning hyper-parameters. Lastly we visualize and discuss the learned features of slimmable networks. Code and models are available at: this https URL", "year": 2018, "venue": "International Conference on Learning Representations", "authors": [ "Jiahui Yu", "L. Yang", "N. Xu", "Jianchao Yang", "Thomas S. Huang" ], "externalIds": { "MAG": "2952416103", "DBLP": "conf/iclr/YuYXYH19", "ArXiv": "1812.08928", "CorpusId": 56657799 }, "url": "https://www.semanticscholar.org/paper/120ffccea4787b88f78b55b9302891ff96cb4228", "referenceCount": 54, "citationCount": 511, "influentialCitationCount": 93, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "NestDNN: Resource-Aware Multi-Tenant On-Device Deep Learning for Continuous Mobile Vision", "abstract": "Mobile vision systems such as smartphones, drones, and augmented-reality headsets are revolutionizing our lives. These systems usually run multiple applications concurrently and their available resources at runtime are dynamic due to events such as starting new applications, closing existing applications, and application priority changes. In this paper, we present NestDNN, a framework that takes the dynamics of runtime resources into account to enable resource-aware multi-tenant on-device deep learning for mobile vision systems. NestDNN enables each deep learning model to offer flexible resource-accuracy trade-offs. At runtime, it dynamically selects the optimal resource-accuracy trade-off for each deep learning model to fit the model's resource demand to the system's available runtime resources. In doing so, NestDNN efficiently utilizes the limited resources in mobile vision systems to jointly maximize the performance of all the concurrently running applications. Our experiments show that compared to the resource-agnostic status quo approach, NestDNN achieves as much as 4.2% increase in inference accuracy, 2.0× increase in video frame processing rate and 1.7× reduction on energy consumption.", "year": 2018, "venue": "ACM/IEEE International Conference on Mobile Computing and Networking", "authors": [ "Biyi Fang", "Xiao Zeng", "Mi Zhang" ], "externalIds": { "MAG": "3103203826", "DBLP": "conf/mobicom/FangZ018", "ArXiv": "1810.10090", "DOI": "10.1145/3241539.3241559", "CorpusId": 52978791 }, "url": "https://www.semanticscholar.org/paper/cb6a82115777edee8d72feeb77427265b2a48e27", "referenceCount": 42, "citationCount": 244, "influentialCitationCount": 16, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Attention is All you Need", "abstract": "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "Ashish Vaswani", "Noam M. Shazeer", "Niki Parmar", "Jakob Uszkoreit", "Llion Jones", "Aidan N. Gomez", "Lukasz Kaiser", "Illia Polosukhin" ], "externalIds": { "MAG": "2963403868", "DBLP": "conf/nips/VaswaniSPUJGKP17", "ArXiv": "1706.03762", "CorpusId": 13756489 }, "url": "https://www.semanticscholar.org/paper/204e3073870fae3d05bcbc2f6a8e263d9b72e776", "referenceCount": 41, "citationCount": 104988, "influentialCitationCount": 15363, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning Ordered Representations with Nested Dropout", "abstract": "In this paper, we present results on ordered representations of data in which different dimensions have different degrees of importance. To learn these representations we introduce nested dropout, a procedure for stochastically removing coherent nested sets of hidden units in a neural network. We first present a sequence of theoretical results for the special case of a semilinear autoencoder. We rigorously show that the application of nested dropout enforces identifiability of the units, which leads to an exact equivalence with PCA. We then extend the algorithm to deep models and demonstrate the relevance of ordered representations to a number of applications. Specifically, we use the ordered property of the learned codes to construct hash-based data structures that permit very fast retrieval, achieving retrieval in time logarithmic in the database size and independent of the dimensionality of the representation. This allows codes that are hundreds of times longer than currently feasible for retrieval. We therefore avoid the diminished quality associated with short codes, while still performing retrieval that is competitive in speed with existing methods. We also show that ordered representations are a promising way to learn adaptive compression for efficient online data reconstruction.", "year": 2014, "venue": "International Conference on Machine Learning", "authors": [ "Oren Rippel", "M. Gelbart", "Ryan P. Adams" ], "externalIds": { "ArXiv": "1402.0915", "MAG": "2950305991", "DBLP": "journals/corr/RippelGA14", "CorpusId": 10333238 }, "url": "https://www.semanticscholar.org/paper/7a6fd5573d2679506765d461ec4892fd4017b745", "referenceCount": 24, "citationCount": 80, "influentialCitationCount": 19, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "ImageNet: A large-scale hierarchical image database", "abstract": "The explosion of image data on the Internet has the potential to foster more sophisticated and robust models and algorithms to index, retrieve, organize and interact with images and multimedia data. But exactly how such data can be harnessed and organized remains a critical problem. We introduce here a new database called “ImageNet”, a large-scale ontology of images built upon the backbone of the WordNet structure. ImageNet aims to populate the majority of the 80,000 synsets of WordNet with an average of 500-1000 clean and full resolution images. This will result in tens of millions of annotated images organized by the semantic hierarchy of WordNet. This paper offers a detailed analysis of ImageNet in its current state: 12 subtrees with 5247 synsets and 3.2 million images in total. We show that ImageNet is much larger in scale and diversity and much more accurate than the current image datasets. Constructing such a large-scale database is a challenging task. We describe the data collection scheme with Amazon Mechanical Turk. Lastly, we illustrate the usefulness of ImageNet through three simple applications in object recognition, image classification and automatic object clustering. We hope that the scale, accuracy, diversity and hierarchical structure of ImageNet can offer unparalleled opportunities to researchers in the computer vision community and beyond.", "year": 2009, "venue": "2009 IEEE Conference on Computer Vision and Pattern Recognition", "authors": [ "Jia Deng", "Wei Dong", "R. Socher", "Li-Jia Li", "K. Li", "Li Fei-Fei" ], "externalIds": { "DBLP": "conf/cvpr/DengDSLL009", "MAG": "2108598243", "DOI": "10.1109/CVPR.2009.5206848", "CorpusId": 57246310 }, "url": "https://www.semanticscholar.org/paper/d2c733e34d48784a37d717fe43d9e93277a8c53e", "referenceCount": 27, "citationCount": 56678, "influentialCitationCount": 8947, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Swin Transformer: Hierarchical Vision Transformer using Shifted Windows", "abstract": "This paper presents a new vision Transformer, called Swin Transformer, that capably serves as a general-purpose backbone for computer vision. Challenges in adapting Transformer from language to vision arise from differences between the two domains, such as large variations in the scale of visual entities and the high resolution of pixels in images compared to words in text. To address these differences, we propose a hierarchical Transformer whose representation is computed with Shifted windows. The shifted windowing scheme brings greater efficiency by limiting self-attention computation to non-overlapping local windows while also allowing for cross-window connection. This hierarchical architecture has the flexibility to model at various scales and has linear computational complexity with respect to image size. These qualities of Swin Transformer make it compatible with a broad range of vision tasks, including image classification (87.3 top-1 accuracy on ImageNet-1K) and dense prediction tasks such as object detection (58.7 box AP and 51.1 mask AP on COCO test-dev) and semantic segmentation (53.5 mIoU on ADE20K val). Its performance surpasses the previous state-of-the-art by a large margin of +2.7 box AP and +2.6 mask AP on COCO, and +3.2 mIoU on ADE20K, demonstrating the potential of Transformer-based models as vision backbones. The hierarchical design and the shifted window approach also prove beneficial for all-MLP architectures. The code and models are publicly available at https://github.com/microsoft/Swin-Transformer.", "year": 2021, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Ze Liu", "Yutong Lin", "Yue Cao", "Han Hu", "Yixuan Wei", "Zheng Zhang", "Stephen Lin", "B. Guo" ], "externalIds": { "ArXiv": "2103.14030", "DBLP": "conf/iccv/LiuL00W0LG21", "DOI": "10.1109/ICCV48922.2021.00986", "CorpusId": 232352874 }, "url": "https://www.semanticscholar.org/paper/c8b25fab5608c3e033d34b4483ec47e68ba109b7", "referenceCount": 86, "citationCount": 15783, "influentialCitationCount": 2591, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning Multiple Layers of Features from Tiny Images", "abstract": "Groups at MIT and NYU have collected a dataset of millions of tiny colour images from the web. It is, in principle, an excellent dataset for unsupervised training of deep generative models, but previous researchers who have tried this have found it dicult to learn a good set of lters from the images. We show how to train a multi-layer generative model that learns to extract meaningful features which resemble those found in the human visual cortex. Using a novel parallelization algorithm to distribute the work among multiple machines connected on a network, we show how training such a model can be done in reasonable time. A second problematic aspect of the tiny images dataset is that there are no reliable class labels which makes it hard to use for object recognition experiments. We created two sets of reliable labels. The CIFAR-10 set has 6000 examples of each of 10 classes and the CIFAR-100 set has 600 examples of each of 100 non-overlapping classes. Using these labels, we show that object recognition is signicantly improved by pre-training a layer of features on a large set of unlabeled tiny images.", "year": 2009, "venue": "", "authors": [ "A. Krizhevsky" ], "externalIds": { "MAG": "2945315962", "CorpusId": 18268744 }, "url": "https://www.semanticscholar.org/paper/5d90f06bb70a0a3dced62413346235c02b1aa086", "referenceCount": 15, "citationCount": 31347, "influentialCitationCount": 7621, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "2023. Sortednet, a place for every network and every network in its place: Towards a generalized solution for training many-in-one neural networks", "abstract": null, "year": null, "venue": "arXiv preprint", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2022. Matryoshka representation learning", "abstract": null, "year": null, "venue": "Advances in Neural Information Processing Systems", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2024. Vision Transformers Need Registers", "abstract": null, "year": null, "venue": "The Twelfth International Conference on Learning Representations", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Confidence intervals uncovered: Are we ready for real-world medical imaging AI?": { "paper_title": "Confidence intervals uncovered: Are we ready for real-world medical imaging AI?", "arxiv_id": "2409.17763v2", "keyword": "transformer", "authors": [ "Evangelia Christodoulou", "Annika Reinke", "Rola Houhou", "Piotr Kalinowski", "Selen Erkan", "Carole H. Sudre", "Ninon Burgos", "Sofiène Boutaj", "Sophie Loizillon", "Maëlys Solal", "Nicola Rieke", "Veronika Cheplygina", "Michela Antonelli", "Leon D. Mayer", "Minu D. Tizabi", "M. Jorge Cardoso", "Amber Simpson", "Paul F. Jäger", "Annette Kopp-Schneider", "Gaël Varoquaux", "Olivier Colliot", "Lena Maier-Hein" ], "references": [ { "title": "TRIPOD+AI statement: updated guidance for reporting clinical prediction models that use regression or machine learning methods", "abstract": "The TRIPOD (Transparent Reporting of a multivariable prediction model for Individual Prognosis Or Diagnosis) statement was published in 2015 to provide the minimum reporting recommendations for studies developing or evaluating the performance of a prediction model. Methodological advances in the field of prediction have since included the widespread use of artificial intelligence (AI) powered by machine learning methods to develop prediction models. An update to the TRIPOD statement is thus needed. TRIPOD+AI provides harmonised guidance for reporting prediction model studies, irrespective of whether regression modelling or machine learning methods have been used. The new checklist supersedes the TRIPOD 2015 checklist, which should no longer be used. This article describes the development of TRIPOD+AI and presents the expanded 27 item checklist with more detailed explanation of each reporting recommendation, and the TRIPOD+AI for Abstracts checklist. TRIPOD+AI aims to promote the complete, accurate, and transparent reporting of studies that develop a prediction model or evaluate its performance. Complete reporting will facilitate study appraisal, model evaluation, and model implementation.", "year": 2024, "venue": "British medical journal", "authors": [ "Gary S. Collins", "K. Moons", "P. Dhiman", "R. Riley", "A. L. Beam", "Ben Van Calster", "Marzyeh Ghassemi", "Xiaoxuan Liu", "Johannes B Reitsma", "M. van Smeden", "A. Boulesteix", "J. Camaradou", "L. Celi", "S. Denaxas", "A. Denniston", "Ben Glocker", "Robert M Golub", "Hugh Harvey", "G. Heinze", "Michael M. Hoffman", "A. Kengne", "Emily Lam", "Naomi Lee", "Elizabeth W Loder", "Lena Maier-Hein", "B. Mateen", "M. Mccradden", "Lauren Oakden-Rayner", "Johan Ordish", "Richard Parnell", "Sherri Rose", "Karandeep Singh", "L. Wynants", "P. Logullo" ], "externalIds": { "PubMedCentral": "11019967", "DOI": "10.1136/bmj-2023-078378", "CorpusId": 269184445, "PubMed": "38626948" }, "url": "https://www.semanticscholar.org/paper/6b0570e66cdf79704461e1f8a07dc761ac10d6b6", "referenceCount": 101, "citationCount": 78, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Confidence intervals for performance estimates in 3D medical image segmentation", "abstract": "Medical segmentation models are evaluated empirically. As such an evaluation is based on a limited set of example images, it is unavoidably noisy. Beyond a mean performance measure, reporting confidence intervals is thus crucial. However, this is rarely done in medical image segmentation. The width of the confidence interval depends on the test set size and on the spread of the performance measure (its standard-deviation across of the test set). For classification, many test images are needed to avoid wide confidence intervals. Segmentation, however, has not been studied, and it differs by the amount of information brought by a given test image. In this paper, we study the typical confidence intervals in medical image segmentation. We carry experiments on 3D image segmentation using the standard nnU-net framework, two datasets from the Medical Decathlon challenge and two performance measures: the Dice accuracy and the Hausdorff distance. We show that the parametric confidence intervals are reasonable approximations of the bootstrap estimates for varying test set sizes and spread of the performance metric. Importantly, we show that the test size needed to achieve a given precision is often much lower than for classification tasks. Typically, a 1% wide confidence interval requires about 100-200 test samples when the spread is low (standard-deviation around 3%). More difficult segmentation tasks may lead to higher spreads and require over 1000 samples.", "year": 2023, "venue": "arXiv.org", "authors": [ "Rosana El Jurdi", "G. Varoquaux", "O. Colliot" ], "externalIds": { "DBLP": "journals/corr/abs-2307-10926", "ArXiv": "2307.10926", "DOI": "10.48550/arXiv.2307.10926", "CorpusId": 259991430 }, "url": "https://www.semanticscholar.org/paper/92b3f772af965d94198db5ba874f46e71c8f8610", "referenceCount": 19, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Machine learning for medical imaging: methodological failures and recommendations for the future", "abstract": null, "year": 2022, "venue": "npj Digital Medicine", "authors": [ "G. Varoquaux", "V. Cheplygina" ], "externalIds": { "PubMedCentral": "9005663", "DBLP": "journals/npjdm/VaroquauxC22", "DOI": "10.1038/s41746-022-00592-y", "CorpusId": 232269760, "PubMed": "35413988" }, "url": "https://www.semanticscholar.org/paper/53e1484bba5237351f708e10e9e38b71a169f7c7", "referenceCount": 110, "citationCount": 290, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "The Medical Segmentation Decathlon", "abstract": null, "year": 2021, "venue": "Nature Communications", "authors": [ "M. Antonelli", "Annika Reinke", "S. Bakas", "K. Farahani", "AnnetteKopp-Schneider", "B. Landman", "G. Litjens", "Bjoern H Menze", "O. Ronneberger", "Ronald M.Summers", "B. Ginneken", "M. Bilello", "Patrick Bilic", "P. Christ", "R. Do", "M. Gollub", "S. Heckers", "H. Huisman", "W. Jarnagin", "M. McHugo", "S. Napel", "Jennifer S. Goli Pernicka", "K. Rhode", "C. Tobon-Gomez", "Eugene Vorontsov", "J. Meakin", "S. Ourselin", "M. Wiesenfarth", "P. Arbeláez", "Byeonguk Bae", "Sihong Chen", "L. Daza", "Jian-Jun Feng", "Baochun He", "Fabian Isensee", "Yuanfeng Ji", "F. Jia", "Namkug Kim", "Ildoo Kim", "D. Merhof", "A. Pai", "Beomhee Park", "Mathias Perslev", "R. Rezaiifar", "Oliver Rippel", "Ignacio Sarasua", "Wei Shen", "Jaemin Son", "C. Wachinger", "Liansheng Wang", "Yan Wang", "Yingda Xia", "Daguang Xu", "Zhanwei Xu", "Yefeng Zheng", "Amber L. Simpson", "L. Maier-Hein", "M. Cardoso" ], "externalIds": { "ArXiv": "2106.05735", "DBLP": "journals/corr/abs-2106-05735", "PubMedCentral": "9287542", "DOI": "10.1038/s41467-022-30695-9", "CorpusId": 235390655, "PubMed": "35840566" }, "url": "https://www.semanticscholar.org/paper/979a9f247700d00ff2c3f0612d5eb001379f93c8", "referenceCount": 51, "citationCount": 678, "influentialCitationCount": 53, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science", "Engineering" ] }, { "title": "Accounting for Variance in Machine Learning Benchmarks", "abstract": "Strong empirical evidence that one machine-learning algorithm A outperforms another one B ideally calls for multiple trials optimizing the learning pipeline over sources of variation such as data sampling, data augmentation, parameter initialization, and hyperparameters choices. This is prohibitively expensive, and corners are cut to reach conclusions. We model the whole benchmarking process, revealing that variance due to data sampling, parameter initialization and hyperparameter choice impact markedly the results. We analyze the predominant comparison methods used today in the light of this variance. We show a counter-intuitive result that adding more sources of variation to an imperfect estimator approaches better the ideal estimator at a 51 times reduction in compute cost. Building on these results, we study the error rate of detecting improvements, on five different deep-learning tasks/architectures. This study leads us to propose recommendations for performance comparisons.", "year": 2021, "venue": "Conference on Machine Learning and Systems", "authors": [ "Xavier Bouthillier", "Pierre Delaunay", "Mirko Bronzi", "Assya Trofimov", "B. Nichyporuk", "Justin Szeto", "Naz Sepah", "Edward Raff", "Kanika Madan", "Vikram S. Voleti", "Samira Ebrahimi Kahou", "Vincent Michalski", "Dmitriy Serdyuk", "T. Arbel", "C. Pal", "G. Varoquaux", "Pascal Vincent" ], "externalIds": { "DBLP": "journals/corr/abs-2103-03098", "ArXiv": "2103.03098", "CorpusId": 232110457 }, "url": "https://www.semanticscholar.org/paper/9ceae85a0bd4231cd2efe14884c40b7bc04d3dac", "referenceCount": 76, "citationCount": 130, "influentialCitationCount": 10, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Improving Reproducibility in Machine Learning Research (A Report from the NeurIPS 2019 Reproducibility Program)", "abstract": "One of the challenges in machine learning research is to ensure that presented and published results are sound and reliable. Reproducibility, that is obtaining similar results as presented in a paper or talk, using the same code and data (when available), is a necessary step to verify the reliability of research findings. Reproducibility is also an important step to promote open and accessible research, thereby allowing the scientific community to quickly integrate new findings and convert ideas to practice. Reproducibility also promotes the use of robust experimental workflows, which potentially reduce unintentional errors. In 2019, the Neural Information Processing Systems (NeurIPS) conference, the premier international conference for research in machine learning, introduced a reproducibility program, designed to improve the standards across the community for how we conduct, communicate, and evaluate machine learning research. The program contained three components: a code submission policy, a community-wide reproducibility challenge, and the inclusion of the Machine Learning Reproducibility checklist as part of the paper submission process. In this paper, we describe each of these components, how it was deployed, as well as what we were able to learn from this initiative.", "year": 2020, "venue": "Journal of machine learning research", "authors": [ "Joelle Pineau", "Philippe Vincent-Lamarre", "Koustuv Sinha", "V. Larivière", "A. Beygelzimer", "Florence d'Alché-Buc", "E. Fox", "H. Larochelle" ], "externalIds": { "ArXiv": "2003.12206", "MAG": "3013688454", "DBLP": "journals/corr/abs-2003-12206", "CorpusId": 214693121 }, "url": "https://www.semanticscholar.org/paper/5e331bf7887e2e634bf5b12788849d2d2b74bc7f", "referenceCount": 44, "citationCount": 307, "influentialCitationCount": 19, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Methods and open-source toolkit for analyzing and visualizing challenge results", "abstract": null, "year": 2019, "venue": "Scientific Reports", "authors": [ "M. Wiesenfarth", "Annika Reinke", "B. Landman", "M. Cardoso", "L. Maier-Hein", "A. Kopp-Schneider" ], "externalIds": { "ArXiv": "1910.05121", "MAG": "2979330839", "PubMedCentral": "7841186", "DBLP": "journals/corr/abs-1910-05121", "DOI": "10.1038/s41598-021-82017-6", "CorpusId": 204402481, "PubMed": "33504883" }, "url": "https://www.semanticscholar.org/paper/959d3158e9e28c066134f7c333a6972f62ae0c73", "referenceCount": 40, "citationCount": 81, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics", "Medicine" ] }, { "title": "A large annotated medical image dataset for the development and evaluation of segmentation algorithms", "abstract": "Semantic segmentation of medical images aims to associate a pixel with a label in a medical image without human initialization. The success of semantic segmentation algorithms is contingent on the availability of high-quality imaging data with corresponding labels provided by experts. We sought to create a large collection of annotated medical image datasets of various clinically relevant anatomies available under open source license to facilitate the development of semantic segmentation algorithms. Such a resource would allow: 1) objective assessment of general-purpose segmentation methods through comprehensive benchmarking and 2) open and free access to medical image data for any researcher interested in the problem domain. Through a multi-institutional effort, we generated a large, curated dataset representative of several highly variable segmentation tasks that was used in a crowd-sourced challenge - the Medical Segmentation Decathlon held during the 2018 Medical Image Computing and Computer Aided Interventions Conference in Granada, Spain. Here, we describe these ten labeled image datasets so that these data may be effectively reused by the research community.", "year": 2019, "venue": "arXiv.org", "authors": [ "Amber L. Simpson", "M. Antonelli", "S. Bakas", "M. Bilello", "K. Farahani", "B. Ginneken", "A. Kopp-Schneider", "B. Landman", "G. Litjens", "Bjoern H Menze", "O. Ronneberger", "R. Summers", "Patrick Bilic", "P. Christ", "R. Do", "M. Gollub", "Jennifer Golia-Pernicka", "S. Heckers", "W. Jarnagin", "M. McHugo", "S. Napel", "Eugene Vorontsov", "L. Maier-Hein", "M. Jorge Cardoso" ], "externalIds": { "MAG": "2915126261", "ArXiv": "1902.09063", "DBLP": "journals/corr/abs-1902-09063", "CorpusId": 67855790 }, "url": "https://www.semanticscholar.org/paper/4654aa505e5bcdb089d0df202cd7ceabc9d2d41f", "referenceCount": 35, "citationCount": 741, "influentialCitationCount": 87, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Why rankings of biomedical image analysis competitions should be interpreted with care", "abstract": null, "year": 2018, "venue": "Nature Communications", "authors": [ "L. Maier-Hein", "Matthias Eisenmann", "Annika Reinke", "Sinan Onogur", "Marko Stankovic", "Patrick Godau", "T. Arbel", "H. Bogunović", "A. Bradley", "A. Carass", "Carolin Feldmann", "Alejandro F Frangi", "Peter M. Full", "B. van Ginneken", "A. Hanbury", "Katrin Honauer", "M. Kozubek", "B. Landman", "K. März", "Oskar Maier", "Klaus Maier-Hein", "Bjoern H Menze", "H. Müller", "P. Neher", "W. Niessen", "N. Rajpoot", "G. Sharp", "K. Sirinukunwattana", "S. Speidel", "C. Stock", "D. Stoyanov", "A. Taha", "F. van der Sommen", "Ching-Wei Wang", "M. Weber", "G. Zheng", "P. Jannin", "A. Kopp-Schneider" ], "externalIds": { "PubMedCentral": "6284017", "MAG": "2973627110", "DBLP": "journals/corr/abs-1806-02051", "ArXiv": "1806.02051", "DOI": "10.1038/s41467-018-07619-7", "CorpusId": 54448301, "PubMed": "30523263" }, "url": "https://www.semanticscholar.org/paper/91d23b702b9a59bf75c5162c3250017b526c0e69", "referenceCount": 142, "citationCount": 266, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Time for a change: a tutorial for comparing multiple classifiers through Bayesian analysis", "abstract": "The machine learning community adopted the use of null hypothesis significance testing (NHST) in order to ensure the statistical validity of results. Many scientific fields however realized the shortcomings of frequentist reasoning and in the most radical cases even banned its use in publications. We should do the same: just as we have embraced the Bayesian paradigm in the development of new machine learning methods, so we should also use it in the analysis of our own results. We argue for abandonment of NHST by exposing its fallacies and, more importantly, offer better - more sound and useful - alternatives for it.", "year": 2016, "venue": "Journal of machine learning research", "authors": [ "A. Benavoli", "Giorgio Corani", "J. Demšar", "Marco Zaffalon" ], "externalIds": { "MAG": "2963313239", "DBLP": "journals/corr/BenavoliCDZ16", "ArXiv": "1606.04316", "CorpusId": 9234993 }, "url": "https://www.semanticscholar.org/paper/8ce2c4a374e8b37e3eef080c956f22cfc6ea25d6", "referenceCount": 49, "citationCount": 377, "influentialCitationCount": 40, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Clinical Trials: Superiority-Testing", "abstract": "One of the flaws of modern statistics is that it can produce statistically significant results even if treatment effects are very small. The objective of this work was to provide examples of studies that have been published as unequivocally positive, although the treatment effects were substantially smaller than expected; and 2 to introduce superiority-testing as a novel statistical approach avoiding the risk of statistically significant but clinically irrelevant results. We from recent volumes of the Lancet six original articles of controlled clinical trials that were reported as being positive studies, although they did not meet their expected power. The studies produced only 53 to 83% of the statistical power expected, while the new treatments produced only 46 to 86% of the magnitude of response expected. Instead of a p-value of 0.05 as cut-off criterion for demonstrating superiority a stricter criterion seems to be needed. For that purpose, similar to equivalence-testing and non-inferiority-testing, prior boundaries of superiority have to be defined in the protocol. If the 95% interval of the study turns out to be entirely within these boundaries, then superiority is accepted. Nowadays, too many borderline significant studies are being reported as convincingly positive studies. This is misleading, as it produces overestimated expectations from new treatments. Superiority-testing, as introduced in this paper, is a simple method to avoid this problem.", "year": 2008, "venue": "", "authors": [ "G. Cleophas", "M. Cleophas", "T. Cleophas" ], "externalIds": { "MAG": "1970236331", "DOI": "10.1080/10601330701885108", "CorpusId": 70784905 }, "url": "https://www.semanticscholar.org/paper/4af1e9dc71353d1fa400e82427204dcf5f762bba", "referenceCount": 11, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "MICCAI reproducibility checklist", "abstract": null, "year": 2021, "venue": "miccai2021.org/files/ downloads/MICCAI2021-Reproducibility-Checklist.pdf", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "21 CFR 892.2060 radiological computer-assisted diagnostic software for lesions suspicious of cancer", "abstract": null, "year": 2020, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "CFR: 21 CFR 892.2070", "abstract": null, "year": 2020, "venue": "medical", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Recommended content and format of non-clinical bench performance testing information in premarket submissions: guidance for industry and food and drug administration staff", "abstract": null, "year": 2019, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Statistical Guidance on Reporting Results from Studies Evaluating Diagnostic Tests ; Draft Guidance for Industry and FDA Reviewers Draft Guidance-Not for Implementation", "abstract": null, "year": 2003, "venue": "", "authors": [], "externalIds": { "CorpusId": 38146618 }, "url": "https://www.semanticscholar.org/paper/82c3dd1a2603141a423bc961aefbcff2e961fa3a", "referenceCount": 27, "citationCount": 76, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "A regulatory science perspective on performance assessment of machine learning algorithms in imaging", "abstract": null, "year": null, "venue": "Machine Learning for Brain Disorders", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Artificial intelligence and machine learning (AI/ML)-enabled medical devices", "abstract": null, "year": null, "venue": "AI", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Decentralized Federated Learning with Gradient Tracking over Time-Varying Directed Networks": { "paper_title": "Decentralized Federated Learning with Gradient Tracking over Time-Varying Directed Networks", "arxiv_id": "2409.17189v1", "keyword": "federate learning", "authors": [ "Duong Thuy Anh Nguyen", "Su Wang", "Duong Tung Nguyen", "Angelia Nedich", "H. Vincent Poor" ], "references": [ { "title": "Decentralized Federated Learning Over Imperfect Communication Channels", "abstract": "This paper analyzes the impact of imperfect communication channels on decentralized federated learning (D-FL) and subsequently determines the optimal number of local aggregations per training round, adapting to the network topology and imperfect channels. We start by deriving the bias of locally aggregated D-FL models under imperfect channels from the ideal global models requiring perfect channels and aggregations. The bias reveals that excessive local aggregations can accumulate communication errors and degrade convergence. Another important aspect is that we analyze a convergence upper bound of D-FL based on the bias. By minimizing the bound, the optimal number of local aggregations is identified to balance a trade-off with accumulation of communication errors in the absence of knowledge of the channels. With this knowledge, the impact of communication errors can be alleviated, allowing the convergence upper bound to decrease throughout aggregations. Experiments validate our convergence analysis and also identify the optimal number of local aggregations on two widely considered image classification tasks. It is seen that D-FL, with an optimal number of local aggregations, can outperform its potential alternatives by over 10% in training accuracy.", "year": 2024, "venue": "IEEE Transactions on Communications", "authors": [ "Weicai Li", "Tiejun Lv", "Wei Ni", "Jingbo Zhao", "Ekram Hossain", "H. V. Poor" ], "externalIds": { "ArXiv": "2405.12894", "DBLP": "journals/corr/abs-2405-12894", "DOI": "10.1109/tcomm.2024.3407208", "CorpusId": 269930440 }, "url": "https://www.semanticscholar.org/paper/ced606ecdb0de1ed706603368610aca63a974e51", "referenceCount": 33, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Scientific discovery in the age of artificial intelligence", "abstract": null, "year": 2023, "venue": "Nature", "authors": [ "Hanchen Wang", "Tianfan Fu", "Yuanqi Du", "Wenhao Gao", "Kexin Huang", "Ziming Liu", "P. Chandak", "Shengchao Liu", "Peter Van Katwyk", "Andreea Deac", "Anima Anandkumar", "K. Bergen", "Carla P. Gomes", "Shirley Ho", "Pushmeet Kohli", "Joan Lasenby", "J. Leskovec", "Tie-Yan Liu", "A. Manrai", "Debora S. Marks", "Bharath Ramsundar", "Le Song", "Jimeng Sun", "Jian Tang", "Petar Velickovic", "Max Welling", "Linfeng Zhang", "Connor W. Coley", "Y. Bengio", "M. Zitnik" ], "externalIds": { "DBLP": "journals/nature/WangFD0HLCLKDAB23", "DOI": "10.1038/s41586-023-06221-2", "CorpusId": 260384616, "PubMed": "37532811" }, "url": "https://www.semanticscholar.org/paper/f08060425aa8a212d74185ee23a08329b89abcd2", "referenceCount": 269, "citationCount": 426, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Decentralized Federated Learning: A Survey and Perspective", "abstract": "Federated learning (FL) has been gaining attention for its ability to share knowledge while maintaining user data, protecting privacy, increasing learning efficiency, and reducing communication overhead. Decentralized FL (DFL) is a decentralized network architecture that eliminates the need for a central server in contrast to centralized FL (CFL). DFL enables direct communication between clients, resulting in significant savings in communication resources. In this paper, a comprehensive survey and profound perspective are provided for DFL. First, a review of the methodology, challenges, and variants of CFL is conducted, laying the background of DFL. Then, a systematic and detailed perspective on DFL is introduced, including iteration order, communication protocols, network topologies, paradigm proposals, and temporal variability. Next, based on the definition of DFL, several extended variants and categorizations are proposed with state-of-the-art (SOTA) technologies. Lastly, in addition to summarizing the current challenges in the DFL, some possible solutions and future research directions are also discussed.", "year": 2023, "venue": "IEEE Internet of Things Journal", "authors": [ "Liangqi Yuan", "Lichao Sun", "P. Yu", "Ziran Wang" ], "externalIds": { "ArXiv": "2306.01603", "DBLP": "journals/corr/abs-2306-01603", "DOI": "10.48550/arXiv.2306.01603", "CorpusId": 259064130 }, "url": "https://www.semanticscholar.org/paper/f48d40617405e6fdd9b720d91a0bc557f9c51900", "referenceCount": 241, "citationCount": 33, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Distributed Stochastic Optimization with Gradient Tracking over Time- Varying Directed Networks", "abstract": "We study a distributed method called SAB-TV, which employs gradient tracking to collaboratively minimize the strongly-convex sum of smooth local cost functions for networked agents communicating over a time-varying directed graph. Each agent, assumed to have access to a stochastic first- order oracle for obtaining an unbiased estimate of the gradient of its local cost function, maintains an auxiliary variable to asymptotically track the stochastic gradient of the global cost. The optimal decision and gradient tracking are updated over time through limited information exchange with local neighbors using row- and column-stochastic weights, guaranteeing both consensus and optimality. With a sufficiently small constant step-size, we demonstrate that, in expectation, SAB- TV converges linearly to a neighborhood of the optimal solution. Numerical simulations illustrate the effectiveness of the proposed algorithm.", "year": 2023, "venue": "Asilomar Conference on Signals, Systems and Computers", "authors": [ "Duong Thuy Anh Nguyen", "D. Nguyen", "A. Nedich" ], "externalIds": { "DBLP": "conf/acssc/NguyenNN23", "ArXiv": "2305.00629", "DOI": "10.1109/IEEECONF59524.2023.10477004", "CorpusId": 258426755 }, "url": "https://www.semanticscholar.org/paper/40e9214666328a55873cc5d8afdd0338d540ceb5", "referenceCount": 41, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "CrowdCache: A Decentralized Game-Theoretic Framework for Mobile Edge Content Sharing", "abstract": "Mobile edge computing (MEC) is a promising solution for enhancing the user experience, minimizing content delivery expenses, and reducing backhaul traffic. In this paper, we propose a novel privacy-preserving decentralized game-theoretic framework for resource crowdsourcing in MEC. Our framework models the interactions between a content provider (CP) and multiple mobile edge device users (MEDs) as a non-cooperative game, in which MEDs offer idle storage resources for content caching in exchange for rewards. We introduce efficient decentralized gradient play algorithms for Nash equilibrium (NE) computation by exchanging local information among neighboring MEDs only, thus preventing attackers from learning users' private information. The key challenge in designing such algorithms is that communication among MEDs is not fixed and is facilitated by a sequence of undirected time-varying graphs. Our approach achieves linear convergence to the NE without imposing any assumptions on the values of parameters in the local objective functions, such as requiring strong monotonicity to be stronger than its dependence on other MEDs' actions, which is commonly required in existing literature when the graph is directed time-varying. Extensive simulations demonstrate the effectiveness of our approach in achieving efficient resource outsourcing decisions while preserving the privacy of the edge devices.", "year": 2023, "venue": "International Symposium on Modeling and Optimization in Mobile, Ad-Hoc and Wireless Networks", "authors": [ "Duong Thuy Anh Nguyen", "Jiaming Cheng", "D. Nguyen", "A. Nedich" ], "externalIds": { "ArXiv": "2304.13246", "DBLP": "conf/wiopt/NguyenCNN23", "DOI": "10.23919/WiOpt58741.2023.10349816", "CorpusId": 258331525 }, "url": "https://www.semanticscholar.org/paper/ccd73c71cf34a92276a4029cdc4153489c79a52b", "referenceCount": 25, "citationCount": 9, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Multi-Source to Multi-Target Decentralized Federated Domain Adaptation", "abstract": "Heterogeneity across devices in federated learning (FL) typically refers to statistical (e.g., non-i.i.d. data distributions) and resource (e.g., communication bandwidth) dimensions. In this paper, we focus on another important dimension that has received less attention: varying quantities/distributions of labeled and unlabeled data across devices. In order to leverage all data, we develop a decentralized federated domain adaptation methodology which considers the transfer of ML models from devices with high quality labeled data (called sources) to devices with low quality or unlabeled data (called targets). Our methodology, Source-Target Determination and Link Formation (ST-LF), optimizes both (i) classification of devices into sources and targets and (ii) source-target link formation, in a manner that considers the trade-off between ML model accuracy and communication energy efficiency. To obtain a concrete objective function, we derive a measurable generalization error bound that accounts for estimates of source-target hypothesis deviations and divergences between data distributions. The resulting optimization problem is a mixed-integer signomial program, a class of NP-hard problems, for which we develop an algorithm based on successive convex approximations to solve it tractably. Subsequent numerical evaluations of ST-LF demonstrate that it improves classification accuracy and energy efficiency over state-of-the-art baselines.", "year": 2023, "venue": "IEEE Transactions on Cognitive Communications and Networking", "authors": [ "Su Wang", "Seyyedali Hosseinalipour", "Christopher G. Brinton" ], "externalIds": { "DBLP": "journals/corr/abs-2304-12422", "ArXiv": "2304.12422", "DOI": "10.1109/TCCN.2024.3352976", "CorpusId": 258309364 }, "url": "https://www.semanticscholar.org/paper/78266e02b7620d9bef8b3efbb89fa134cc84aaee", "referenceCount": 60, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "On the Convergence of Decentralized Federated Learning Under Imperfect Information Sharing", "abstract": "Most of the current literature focused on centralized learning is centered around the celebrated average-consensus paradigm and less attention is devoted to scenarios where the communication between the agents may be imperfect. This letter presents three different algorithms of Decentralized Federated Learning (DFL) in the presence of imperfect information sharing modeled as noisy communication channels. The first algorithm, Federated Noisy Decentralized Learning (FedNDL1) comes from the literature, where the noise is added to the algorithm parameters to simulate the scenario of the presence of noisy communication channels. This algorithm shares parameters to form a consensus with the clients based on a communication graph topology through a noisy communication channel. The proposed second algorithm (FedNDL2) is similar to the first algorithm but with added noise to the parameters and it performs the gossip averaging before the gradient optimization. The proposed third algorithm (FedNDL3), on the other hand, shares the gradients through noisy communication channels instead of the parameters. Theoretical and experimental results show that under imperfect information sharing, the third scheme that mixes gradients is more robust in the presence of a noisy channel compared with the algorithms from the literature that mix the parameters.", "year": 2023, "venue": "IEEE Control Systems Letters", "authors": [ "Vishnu Pandi Chellapandi", "Antesh Upadhyay", "Abolfazl Hashemi", "S. Żak" ], "externalIds": { "DBLP": "journals/csysl/ChellapandiUHZ23", "ArXiv": "2303.10695", "DOI": "10.1109/LCSYS.2023.3290470", "CorpusId": 257631923 }, "url": "https://www.semanticscholar.org/paper/09ac1d8aa11dc30d976a86f553a9e2bf0332736a", "referenceCount": 42, "citationCount": 23, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Accelerated $AB$/Push–Pull Methods for Distributed Optimization Over Time-Varying Directed Networks", "abstract": "This article investigates a novel approach for solving the distributed optimization problem, in which multiple agents collaborate to find the global decision that minimizes the sum of their individual cost functions. First, the $AB$/push–pull (ABPP) gradient-based algorithm is considered, which employs row- and column-stochastic weights simultaneously to track the optimal decision and the gradient of the global cost function, ensuring consensus on the optimal decision. Building on this algorithm, we then develop a general algorithm that incorporates acceleration techniques, such as heavy-ball momentum and Nesterov momentum, as well as their combination with nonidentical momentum parameters. Previous literature has established the effectiveness of acceleration methods for various gradient-based distributed algorithms and demonstrated linear convergence for static directed communication networks. In contrast, we focus on time-varying directed communication networks and establish the linear convergence of the methods to the optimal solution, when the agents' cost functions are smooth and strongly convex. In addition, we provide explicit bounds for the step-size value and momentum parameters, based on the properties of the cost functions, the mixing matrices, and the graph connectivity structures. Our numerical results illustrate the benefits of the proposed acceleration techniques on the ABPP algorithm.", "year": 2023, "venue": "IEEE Transactions on Control of Network Systems", "authors": [ "Duong Thuy Anh Nguyen", "D. Nguyen", "A. Nedich" ], "externalIds": { "ArXiv": "2302.01214", "DOI": "10.1109/TCNS.2023.3338236", "CorpusId": 256503624 }, "url": "https://www.semanticscholar.org/paper/4d1d5cceef5f78cd2a81b2eff282ea6e5ab80481", "referenceCount": 50, "citationCount": 14, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Decentralized Federated Learning for Over-Parameterized Models", "abstract": "Modern machine learning, especially deep learning, features models that are often highly expressive and over-parameterized. They can interpolate the data by driving the empirical loss close to zero. We analyze the convergence rate of decentralized stochastic gradient descent (SGD), which is at the core of decentralized federated learning (DFL), for these over-parameterized models. Our analysis covers the setting of decentralized SGD with time-varying networks, local updates and heterogeneous data. We establish strong convergence guarantees with or without the assumption of convex objectives that either improves upon the existing literature or is the first for the regime.", "year": 2022, "venue": "IEEE Conference on Decision and Control", "authors": [ "Tiancheng Qin", "S. Etesami", "César A. Uribe" ], "externalIds": { "DBLP": "conf/cdc/Qin0U22", "DOI": "10.1109/CDC51059.2022.9992924", "CorpusId": 255599124 }, "url": "https://www.semanticscholar.org/paper/0e9ae84396144a0e91126e2336b9bd58d5049833", "referenceCount": 24, "citationCount": 8, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Privacy-preserving Decentralized Federated Learning over Time-varying Communication Graph", "abstract": "Establishing how a set of learners can provide privacy-preserving federated learning in a fully decentralized (peer-to-peer, no coordinator) manner is an open problem. We propose the first privacy-preserving consensus-based algorithm for the distributed learners to achieve decentralized global model aggregation in an environment of high mobility, where participating learners and the communication graph between them may vary during the learning process. In particular, whenever the communication graph changes, the Metropolis-Hastings method [69] is applied to update the weighted adjacency matrix based on the current communication topology. In addition, the Shamir’s secret sharing (SSS) scheme [61] is integrated to facilitate privacy in reaching consensus of the global model. The article establishes the correctness and privacy properties of the proposed algorithm. The computational efficiency is evaluated by a simulation built on a federated learning framework with a real-world dataset.", "year": 2022, "venue": "ACM Transactions on Privacy and Security", "authors": [ "Yang Lu", "Zhengxin Yu", "N. Suri" ], "externalIds": { "DBLP": "journals/corr/abs-2210-00325", "ArXiv": "2210.00325", "DOI": "10.1145/3591354", "CorpusId": 252683591 }, "url": "https://www.semanticscholar.org/paper/97a3b292bb40f8eff4db39994c54c842486f8427", "referenceCount": 79, "citationCount": 10, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "AB\n /Push-Pull method for distributed optimization in time-varying directed networks", "abstract": "In this paper, we study the distributed optimization problem for a system of agents embedded in time-varying directed communication networks. Each agent has its own cost function and agents cooperate to determine the global decision that minimizes the summation of all individual cost functions. We consider the so-called push-pull gradient-based algorithm (termed as AB/Push-Pull) which employs both row- and column-stochastic weights simultaneously to track the optimal decision and the gradient of the global cost while ensuring consensus and optimality. We show that the algorithm converges linearly to the optimal solution over a time-varying directed network for a constant stepsize when the agent's cost function is smooth and strongly convex. The linear convergence of the method has been shown in Saadatniaki et al. (2020), where the multi-step consensus contraction parameters for row- and column-stochastic mixing matrices are not directly related to the underlying graph structure, and the explicit range for the stepsize value is not provided. With respect to Saadatniaki et al. (2020), the novelty of this work is twofold: (1) we establish the one-step consensus contraction for both row- and column-stochastic mixing matrices with the contraction parameters given explicitly in terms of the graph diameter and other graph properties; and (2) we provide explicit upper bounds for the stepsize value in terms of the properties of the cost functions, the mixing matrices, and the graph connectivity structure.", "year": 2022, "venue": "Optimization Methods and Software", "authors": [ "A. Nedich", "Duong Thuy Anh Nguyen", "D. Nguyen" ], "externalIds": { "ArXiv": "2209.06974", "DOI": "10.1080/10556788.2023.2261602", "CorpusId": 252280352 }, "url": "https://www.semanticscholar.org/paper/a5a67ba2d73dcfe2fd2ed63c42642603e42a0c8c", "referenceCount": 40, "citationCount": 12, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "An Improved Analysis of Gradient Tracking for Decentralized Machine Learning", "abstract": "We consider decentralized machine learning over a network where the training data is distributed across $n$ agents, each of which can compute stochastic model updates on their local data. The agent's common goal is to find a model that minimizes the average of all local loss functions. While gradient tracking (GT) algorithms can overcome a key challenge, namely accounting for differences between workers' local data distributions, the known convergence rates for GT algorithms are not optimal with respect to their dependence on the mixing parameter $p$ (related to the spectral gap of the connectivity matrix). We provide a tighter analysis of the GT method in the stochastic strongly convex, convex and non-convex settings. We improve the dependency on $p$ from $\\mathcal{O}(p^{-2})$ to $\\mathcal{O}(p^{-1}c^{-1})$ in the noiseless case and from $\\mathcal{O}(p^{-3/2})$ to $\\mathcal{O}(p^{-1/2}c^{-1})$ in the general stochastic case, where $c \\geq p$ is related to the negative eigenvalues of the connectivity matrix (and is a constant in most practical applications). This improvement was possible due to a new proof technique which could be of independent interest.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Anastasia Koloskova", "Tao Lin", "Sebastian U. Stich" ], "externalIds": { "ArXiv": "2202.03836", "DBLP": "conf/nips/KoloskovaLS21", "CorpusId": 245019876 }, "url": "https://www.semanticscholar.org/paper/964cc7f1fe6f240e2aa0b659c4f2e46cb6bebaf9", "referenceCount": 72, "citationCount": 90, "influentialCitationCount": 16, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Distributed Nash Equilibrium Seeking over Time-Varying Directed Communication Networks", "abstract": "We study distributed algorithms for finding a Nash equilibrium (NE) in a class of non-cooperative convex games under partial information. Specifically, each agent has access only to its own smooth local cost function and can receive information from its neighbors in a time-varying directed communication network. To this end, we propose a distributed gradient play algorithm to compute a NE by utilizing local information exchange among the players. In this algorithm, every agent performs a gradient step to minimize its own cost function while sharing and retrieving information locally among its neighbors. The existing methods impose strong assumptions such as balancedness of the mixing matrices and global knowledge of the network communication structure, including Perron-Frobenius eigenvector of the adjacency matrix and other graph connectivity constants. In contrast, our approach relies only on a reasonable and widely-used assumption of row-stochasticity of the mixing matrices. We analyze the algorithm for time-varying directed graphs and prove its convergence to the NE, when the agents' cost functions are strongly convex and have Lipschitz continuous gradients. Numerical simulations are performed for a Nash-Cournot game to illustrate the efficacy of the proposed algorithm.", "year": 2022, "venue": "arXiv.org", "authors": [ "Duong Thuy Anh Nguyen", "D. Nguyen", "Angelia Nedi'c" ], "externalIds": { "ArXiv": "2201.02323", "DBLP": "journals/corr/abs-2201-02323", "CorpusId": 245827899 }, "url": "https://www.semanticscholar.org/paper/1368a4e73170fc37dec78d420a4dd19397f8e6e5", "referenceCount": 32, "citationCount": 18, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep Federated Learning for Autonomous Driving", "abstract": "Autonomous driving is an active research topic in both academia and industry. However, most of the existing solutions focus on improving the accuracy by training learnable models with centralized large-scale data. Therefore, these methods do not take into account the user’s privacy. In this paper, we present a new approach to learn autonomous driving policy while respecting privacy concerns. We propose a peer-to-peer Deep Federated Learning (DFL) approach to train deep architectures in a fully decentralized manner and remove the need for central orchestration. We design a new Federated Autonomous Driving network (FADNet) that can improve the model stability, ensure convergence, and handle imbalanced data distribution problems while is being trained with federated learning methods. Intensively experimental results on three datasets show that our approach with FADNet and DFL achieves superior accuracy compared with other recent methods. Furthermore, our approach can maintain privacy by not collecting user data to a central server. Our source code can be found at: https://github.com/aioz-ai/FADNet", "year": 2021, "venue": "2022 IEEE Intelligent Vehicles Symposium (IV)", "authors": [ "A. Nguyen", "Tuong Khanh Long Do", "Minh-Ngoc Tran", "Binh X. Nguyen", "C. Duong", "T. Phan", "Erman Tjiputra", "Quang D. Tran" ], "externalIds": { "DBLP": "journals/corr/abs-2110-05754", "ArXiv": "2110.05754", "DOI": "10.1109/iv51971.2022.9827020", "CorpusId": 238634353 }, "url": "https://www.semanticscholar.org/paper/fbf1ad3e430aa5b98bb0efede68b66d94014834c", "referenceCount": 63, "citationCount": 81, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Fully Decentralized Federated Learning-Based On-Board Mission for UAV Swarm System", "abstract": "To handle the data explosion in the era of Internet-of-things, it is of interest to investigate the decentralized network, with the aim at relaxing the burden at the central server along with preserving data privacy. In this work, we develop a fully decentralized federated learning (FL) framework with an inexact stochastic parallel random walk alternating direction method of multipliers (ISPW-ADMM). Performing more efficient communication and enhanced privacy preservation compared with the current state-of-the-art, the proposed ISPW-ADMM can be partially immune to the effect of time-varying dynamic network and stochastic data collection, while still in fast convergence. Benefiting from the stochastic gradients and biased first-order moment estimation, the proposed framework can be applied to any decentralized FL tasks over time-varying graphs. Thus, to demonstrate the practicability of such a framework in providing fast convergence, high communication efficiency, noise robustness for a specific on-board mission to some extent, we study the extreme learning machine-based FL model beamforming design in unmanned aerial vehicle communications, as verified by the numerical simulations.", "year": 2021, "venue": "IEEE Communications Letters", "authors": [ "Yue Xiao", "Yu Ye", "Shaocheng Huang", "L. Hao", "Zheng Ma", "Ming Xiao", "S. Mumtaz", "O. Dobre" ], "externalIds": { "DBLP": "journals/icl/XiaoYHHMXMD21", "MAG": "3178974671", "DOI": "10.1109/lcomm.2021.3095362", "CorpusId": 237768937 }, "url": "https://www.semanticscholar.org/paper/2236af1c46f10ca32a9f6d89b7c645f71d9491d9", "referenceCount": 0, "citationCount": 23, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "An accelerated distributed optimization algorithm over time-varying digraphs with column-stochastic matrices", "abstract": "In this paper, the unconstrained distributed convex optimization problem over time-varying unbalanced directed graphs with column matrices is considered. To accelerate the existing distributed algorithm, a heavy-ball based convex optimization algorithm is proposed and its convergence proof is provided by the small gain theorem. Moreover, a nesterov acceleration method is added in the previous algorithm for further accelerating the convergence rate. Finally, some simulations are presented for illustrating the effectiveness.", "year": 2021, "venue": "Chinese Control and Decision Conference", "authors": [ "Xiasheng Shi", "Hanlin Liu", "Jiahao Chen", "Xuesong Wang" ], "externalIds": { "DOI": "10.1109/CCDC52312.2021.9602039", "CorpusId": 244777181 }, "url": "https://www.semanticscholar.org/paper/2b85e7df0801c83a9e76d642ddb5f817330f036c", "referenceCount": 0, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Decentralized stochastic optimization algorithms using uncoordinated step-sizes over unbalanced directed networks", "abstract": null, "year": 2021, "venue": "Signal Processing", "authors": [ "Jinhui Hu", "Liang Ran", "Zhenyuan Du", "Huaqing Li" ], "externalIds": { "DBLP": "journals/sigpro/HuRDL21", "MAG": "3107688171", "DOI": "10.1016/j.sigpro.2020.107894", "CorpusId": 229414098 }, "url": "https://www.semanticscholar.org/paper/5b49090070f4f08c852d8dc8eac876b15474e870", "referenceCount": 18, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Device Sampling for Heterogeneous Federated Learning: Theory, Algorithms, and Implementation", "abstract": "The conventional federated learning (FedL) architecture distributes machine learning (ML) across worker devices by having them train local models that are periodically aggregated by a server. FedL ignores two important characteristics of contemporary wireless networks, however: (i) the network may contain heterogeneous communication/computation resources, while (ii) there may be significant overlaps in devices’ local data distributions. In this work, we develop a novel optimization methodology that jointly accounts for these factors via intelligent device sampling complemented by device-to-device (D2D) offloading. Our optimization aims to select the best combination of sampled nodes and data offloading configuration to maximize FedL training accuracy subject to realistic constraints on the network topology and device capabilities. Theoretical analysis of the D2D offloading subproblem leads to new FedL convergence bounds and an efficient sequential convex optimizer. Using this result, we develop a sampling methodology based on graph convolutional networks (GCNs) which learns the relationship between network attributes, sampled nodes, and resulting offloading that maximizes FedL accuracy. Through evaluation on real-world datasets and network measurements from our IoT testbed, we find that our methodology while sampling less than 5% of all devices outperforms conventional FedL substantially both in terms of trained model accuracy and required resource utilization.", "year": 2021, "venue": "IEEE Conference on Computer Communications", "authors": [ "Su Wang", "Mengyuan Lee", "Seyyedali Hosseinalipour", "Roberto Morabito", "M. Chiang", "Christopher G. Brinton" ], "externalIds": { "DBLP": "conf/infocom/0007LHMCB21", "ArXiv": "2101.00787", "DOI": "10.1109/INFOCOM42981.2021.9488906", "CorpusId": 230437685 }, "url": "https://www.semanticscholar.org/paper/ceaba0f963b2319e067dfe51c76231f7b52cab2c", "referenceCount": 49, "citationCount": 101, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Privacy and Robustness in Federated Learning: Attacks and Defenses", "abstract": "As data are increasingly being stored in different silos and societies becoming more aware of data privacy issues, the traditional centralized training of artificial intelligence (AI) models is facing efficiency and privacy challenges. Recently, federated learning (FL) has emerged as an alternative solution and continues to thrive in this new reality. Existing FL protocol designs have been shown to be vulnerable to adversaries within or outside of the system, compromising data privacy and system robustness. Besides training powerful global models, it is of paramount importance to design FL systems that have privacy guarantees and are resistant to different types of adversaries. In this article, we conduct a comprehensive survey on privacy and robustness in FL over the past five years. Through a concise introduction to the concept of FL and a unique taxonomy covering: 1) threat models; 2) privacy attacks and defenses; and 3) poisoning attacks and defenses, we provide an accessible review of this important topic. We highlight the intuitions, key techniques, and fundamental assumptions adopted by various attacks and defenses. Finally, we discuss promising future research directions toward robust and privacy-preserving FL, and their interplays with the multidisciplinary goals of FL.", "year": 2020, "venue": "IEEE Transactions on Neural Networks and Learning Systems", "authors": [ "L. Lyu", "Han Yu", "Xingjun Ma", "Lichao Sun", "Jun Zhao", "Qiang Yang", "Philip S. Yu" ], "externalIds": { "MAG": "3111919937", "DBLP": "journals/tnn/LyuYMCSZYY24", "ArXiv": "2012.06337", "DOI": "10.1109/TNNLS.2022.3216981", "CorpusId": 228373690, "PubMed": "36355741" }, "url": "https://www.semanticscholar.org/paper/8448010d9adad18bf36070c012770a10ecb21c76", "referenceCount": 223, "citationCount": 263, "influentialCitationCount": 14, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Distributed Nesterov Gradient and Heavy-Ball Double Accelerated Asynchronous Optimization", "abstract": "In this article, we come up with a novel Nesterov gradient and heavy-ball double accelerated distributed synchronous optimization algorithm, called NHDA, and adopt a general asynchronous model to further propose an effective asynchronous algorithm, called ASY-NHDA, for distributed optimization problem over directed graphs, where each agent has access to a local objective function and computes the optimal solution via communicating only with its immediate neighbors. Our goal is to minimize a sum of all local objective functions satisfying strong convexity and Lipschitz continuity. Consider a general asynchronous model, where agents communicate with their immediate neighbors and start a new computation independently, that is, agents can communicate with their neighbors at any time without any coordination and use delayed information from their in-neighbors to compute a new update. Delays are arbitrary, unpredictable, and time-varying but bounded. The theoretical analysis of NHDA is based on analyzing the interaction among the consensus, the gradient tracking, and the optimization processes. As for the analysis of ASY-NHDA, we equivalently transform the asynchronous system into an augmented synchronous system without delays and prove its convergence through using the generalized small gain theorem. The results show that NHDA and ASY-NHDA converge to the optimal solution at a linear convergence as long as the largest step size is positive and less than an explicitly estimated upper bound, and the largest momentum parameter is nonnegative and less than an upper bound. Finally, we demonstrate the advantages of ASY-NHDA through simulations.", "year": 2020, "venue": "IEEE Transactions on Neural Networks and Learning Systems", "authors": [ "Huaqing Li", "Huqiang Cheng", "Z. Wang", "Guo-cheng Wu" ], "externalIds": { "MAG": "3092460168", "DBLP": "journals/tnn/LiCWW21", "DOI": "10.1109/TNNLS.2020.3027381", "CorpusId": 222351207, "PubMed": "33048761" }, "url": "https://www.semanticscholar.org/paper/be3a280419ada55ecabed954f706d8ea5259e886", "referenceCount": 0, "citationCount": 23, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "On Hyperparameter Optimization of Machine Learning Algorithms: Theory and Practice", "abstract": null, "year": 2020, "venue": "Neurocomputing", "authors": [ "Li Yang", "A. Shami" ], "externalIds": { "ArXiv": "2007.15745", "MAG": "3045004532", "DBLP": "journals/corr/abs-2007-15745", "DOI": "10.1016/j.neucom.2020.07.061", "CorpusId": 220919678 }, "url": "https://www.semanticscholar.org/paper/2e5d2f2dc01b150dffc163a9f457848e9b5b5c38", "referenceCount": 131, "citationCount": 1454, "influentialCitationCount": 47, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "An Efficient Framework for Clustered Federated Learning", "abstract": "We address the problem of federated learning (FL) where users are distributed and partitioned into clusters. This setup captures settings where different groups of users have their own objectives (learning tasks) but by aggregating their data with others in the same cluster (same learning task), they can leverage the strength in numbers in order to perform more efficient federated learning. For this new framework of clustered federated learning, we propose the Iterative Federated Clustering Algorithm (IFCA), which alternately estimates the cluster identities of the users and optimizes model parameters for the user clusters via gradient descent. We analyze the convergence rate of this algorithm first in a linear model with squared loss and then for generic strongly convex and smooth loss functions. We show that in both settings, with good initialization, IFCA is guaranteed to converge, and discuss the optimality of the statistical error rate. In particular, for the linear model with two clusters, we can guarantee that our algorithm converges as long as the initialization is slightly better than random. When the clustering structure is ambiguous, we propose to train the models by combining IFCA with the weight sharing technique in multi-task learning. In the experiments, we show that our algorithm can succeed even if we relax the requirements on initialization with random initialization and multiple restarts. We also present experimental results showing that our algorithm is efficient in non-convex problems such as neural networks. We demonstrate the benefits of IFCA over the baselines on several clustered FL benchmarks.", "year": 2020, "venue": "IEEE Transactions on Information Theory", "authors": [ "Avishek Ghosh", "Jichan Chung", "Dong Yin", "K. Ramchandran" ], "externalIds": { "MAG": "3034163621", "DBLP": "journals/tit/GhoshCYR22", "ArXiv": "2006.04088", "DOI": "10.1109/TIT.2022.3192506", "CorpusId": 219530697 }, "url": "https://www.semanticscholar.org/paper/06bc35120d2775bb4c26d6fd1ba68d1befcd84cb", "referenceCount": 59, "citationCount": 665, "influentialCitationCount": 115, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "S-ADDOPT: Decentralized Stochastic First-Order Optimization Over Directed Graphs", "abstract": "In this letter, we study decentralized stochastic optimization to minimize a sum of smooth and strongly convex cost functions when the functions are distributed over a directed network of nodes. In contrast to the existing work, we use gradient tracking to improve certain aspects of the resulting algorithm. In particular, we propose the S-ADDOPT algorithm that assumes a stochastic first-order oracle at each node and show that for a constant step-size $\\alpha $ , each node converges linearly inside an error ball around the optimal solution, the size of which is controlled by $\\alpha $ . For decaying step-sizes $\\mathcal {O}$ (1/ ${k}$ ), we show that S-ADDOPT reaches the exact solution sublinearly at $\\mathcal {O}$ (1/ ${k}$ ) and its convergence is asymptotically network-independent. Thus the asymptotic behavior of S-ADDOPT is comparable to the centralized stochastic gradient descent. Numerical experiments over both strongly convex and non-convex problems illustrate the convergence behavior and the performance comparison of the proposed algorithm.", "year": 2020, "venue": "IEEE Control Systems Letters", "authors": [ "Muhammad I. Qureshi", "Ran Xin", "S. Kar", "U. Khan" ], "externalIds": { "MAG": "3047043330", "DBLP": "journals/corr/abs-2005-07785", "ArXiv": "2005.07785", "DOI": "10.1109/LCSYS.2020.3006420", "CorpusId": 218674586 }, "url": "https://www.semanticscholar.org/paper/03f565ddff60631000c45ce4158172be7fab0b4b", "referenceCount": 36, "citationCount": 24, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science", "Engineering" ] }, { "title": "Decentralized Optimization Over Time-Varying Directed Graphs With Row and Column-Stochastic Matrices", "abstract": "In this article, we provide a distributed optimization algorithm, termed as TV-$\\mathcal {AB}$, that minimizes a sum of convex functions over time-varying, random directed graphs. Contrary to the existing work, the algorithm we propose does not require eigenvector estimation to estimate the (non-$\\mathbf {1}$) Perron eigenvector of a stochastic matrix. Instead, the proposed approach relies on a novel information mixing approach that exploits both row- and column-stochastic weights to achieve agreement toward the optimal solution when the underlying graph is directed. We show that TV-$\\mathcal {AB}$ converges linearly to the optimal solution when the global objective is smooth and strongly convex, and the underlying time-varying graphs exhibit bounded connectivity, i.e., a union of every $C$ consecutive graphs is strongly connected. We derive the convergence results based on the stability analysis of a linear system of inequalities along with a matrix perturbation argument. Simulations confirm the findings in this article.", "year": 2020, "venue": "IEEE Transactions on Automatic Control", "authors": [ "Fakhteh Saadatniaki", "Ran Xin", "Usman A. Khan" ], "externalIds": { "MAG": "3003613721", "DBLP": "journals/tac/SaadatniakiXK20", "DOI": "10.1109/TAC.2020.2969721", "CorpusId": 263883809 }, "url": "https://www.semanticscholar.org/paper/61903864136865bfda49b671a8ec4ebf89052a64", "referenceCount": 60, "citationCount": 63, "influentialCitationCount": 8, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "On the Influence of Bias-Correction on Distributed Stochastic Optimization", "abstract": "Various bias-correction methods such as EXTRA, gradient tracking methods, and exact diffusion have been proposed recently to solve distributed deterministic optimization problems. These methods employ constant step-sizes and converge linearly to the exact solution under proper conditions. However, their performance under stochastic and adaptive settings is less explored. It is still unknown whether, when and why these bias-correction methods can outperform their traditional counterparts with noisy gradient and constant step-sizes. This work studies the performance of exact diffusion under the stochastic and adaptive setting, and provides conditions under which exact diffusion has superior steady-state mean-square deviation (MSD) performance than traditional algorithms without bias-correction. In particular, it is proven that this superiority is more evident over sparsely-connected network topologies such as lines, cycles, or grids. Conditions are also provided under which exact diffusion method can or degrade the performance of traditional methods. Simulations are provided to validate the theoretical findings.", "year": 2019, "venue": "IEEE Transactions on Signal Processing", "authors": [ "K. Yuan", "Sulaiman A. Alghunaim", "Bicheng Ying", "A. H. Sayed" ], "externalIds": { "DBLP": "journals/tsp/YuanAYS20", "MAG": "2960456725", "ArXiv": "1903.10956", "DOI": "10.1109/TSP.2020.3008605", "CorpusId": 195886355 }, "url": "https://www.semanticscholar.org/paper/c48d5ba8473aa01fc12a89f7ffe4da8e408afa56", "referenceCount": 44, "citationCount": 56, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Distributed stochastic optimization with gradient tracking over strongly-connected networks", "abstract": "In this paper, we study distributed stochastic optimization to minimize a sum of smooth and strongly-convex local cost functions over a network of agents, communicating over a strongly-connected graph. Assuming that each agent has access to a stochastic first-order oracle $\\left( {\\mathcal{S}\\mathcal{F}\\mathcal{O}} \\right)$, we propose a novel distributed method, called $\\mathcal{S} - \\mathcal{A}\\mathcal{B}$, where each agent uses an auxiliary variable to asymptotically track the gradient of the global cost in expectation. The $\\mathcal{S} - \\mathcal{A}\\mathcal{B}$ algorithm employs rowand column-stochastic weights simultaneously to ensure both consensus and optimality. Since doubly-stochastic weights are not used, $\\mathcal{S} - \\mathcal{A}\\mathcal{B}$ is applicable to arbitrary strongly-connected graphs. We show that under a sufficiently small constant step-size, $\\mathcal{S} - \\mathcal{A}\\mathcal{B}$ converges linearly (in expected mean-square sense) to a neighborhood of the global minimizer. We present numerical simulations based on real-world data sets to illustrate the theoretical results.", "year": 2019, "venue": "IEEE Conference on Decision and Control", "authors": [ "Ran Xin", "Anit Kumar Sahu", "U. Khan", "S. Kar" ], "externalIds": { "DBLP": "conf/cdc/XinSKK19", "MAG": "3011020921", "ArXiv": "1903.07266", "DOI": "10.1109/CDC40024.2019.9029217", "CorpusId": 81981049 }, "url": "https://www.semanticscholar.org/paper/60b5bdebcd1214b8a1ee1e1b1fd38c26631a6c50", "referenceCount": 38, "citationCount": 106, "influentialCitationCount": 13, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Beyond Inferring Class Representatives: User-Level Privacy Leakage From Federated Learning", "abstract": "Federated learning, i.e., a mobile edge computing framework for deep learning, is a recent advance in privacy-preserving machine learning, where the model is trained in a decentralized manner by the clients, i.e., data curators, preventing the server from directly accessing those private data from the clients. This learning mechanism significantly challenges the attack from the server side. Although the state-of-the-art attacking techniques that incorporated the advance of Generative adversarial networks (GANs) could construct class representatives of the global data distribution among all clients, it is still challenging to distinguishably attack a specific client (i.e., user-level privacy leakage), which is a stronger privacy threat to precisely recover the private data from a specific client. This paper gives the first attempt to explore user-level privacy leakage against the federated learning by the attack from a malicious server. We propose a framework incorporating GAN with a multi-task discriminator, which simultaneously discriminates category, reality, and client identity of input samples. The novel discrimination on client identity enables the generator to recover user specified private data. Unlike existing works that tend to interfere the training process of the federated learning, the proposed method works “invisibly” on the server side. The experimental results demonstrate the effectiveness of the proposed attacking approach and the superior to the state-of-the-art.", "year": 2018, "venue": "IEEE Conference on Computer Communications", "authors": [ "Zhibo Wang", "Mengkai Song", "Zhifei Zhang", "Yang Song", "Qian Wang", "H. Qi" ], "externalIds": { "MAG": "2902229711", "DBLP": "conf/infocom/WangSZSWQ19", "ArXiv": "1812.00535", "DOI": "10.1109/INFOCOM.2019.8737416", "CorpusId": 54436587 }, "url": "https://www.semanticscholar.org/paper/33c3f816bde8ee63ee9f2e60d4387b9390696371", "referenceCount": 27, "citationCount": 692, "influentialCitationCount": 28, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Push–Pull Gradient Methods for Distributed Optimization in Networks", "abstract": "In this article, we focus on solving a distributed convex optimization problem in a network, where each agent has its own convex cost function and the goal is to minimize the sum of the agents’ cost functions while obeying the network connectivity structure. In order to minimize the sum of the cost functions, we consider new distributed gradient-based methods where each node maintains two estimates, namely an estimate of the optimal decision variable and an estimate of the gradient for the average of the agents’ objective functions. From the viewpoint of an agent, the information about the gradients is pushed to the neighbors, whereas the information about the decision variable is pulled from the neighbors, hence giving the name “push–pull gradient methods.” The methods utilize two different graphs for the information exchange among agents and, as such, unify the algorithms with different types of distributed architecture, including decentralized (peer to peer), centralized (master–slave), and semicentralized (leader–follower) architectures. We show that the proposed algorithms and their many variants converge linearly for strongly convex and smooth objective functions over a network (possibly with unidirectional data links) in both synchronous and asynchronous random-gossip settings. In particular, under the random-gossip setting, “push–pull” is the first class of algorithms for distributed optimization over directed graphs. Moreover, we numerically evaluate our proposed algorithms in both scenarios, and show that they outperform other existing linearly convergent schemes, especially for ill-conditioned problems and networks that are not well balanced.", "year": 2018, "venue": "IEEE Transactions on Automatic Control", "authors": [ "Shi Pu", "Wei Shi", "Jinming Xu", "A. Nedić" ], "externalIds": { "DBLP": "journals/tac/PuSXN21", "MAG": "3006548771", "ArXiv": "1810.06653", "DOI": "10.1109/TAC.2020.2972824", "CorpusId": 212903457 }, "url": "https://www.semanticscholar.org/paper/97349817ca8fc96b04ee57cf03de0ab847f145ed", "referenceCount": 43, "citationCount": 257, "influentialCitationCount": 37, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Distributed Heavy-Ball: A Generalization and Acceleration of First-Order Methods With Gradient Tracking", "abstract": "We study distributed optimization to minimize a sum of smooth and strongly-convex functions. Recent work on this problem uses gradient tracking to achieve linear convergence to the exact global minimizer. However, a connection among different approaches has been unclear. In this paper, we first show that many of the existing first-order algorithms are related with a simple state transformation, at the heart of which lies a recently introduced algorithm known as $\\mathcal {AB}$. We then present distributed heavy-ball, denoted as $\\mathcal {AB}m$, that combines $\\mathcal {AB}$ with a momentum term and uses nonidentical local step-sizes. By simultaneously implementing both row- and column-stochastic weights, $\\mathcal {AB}m$ removes the conservatism in the related work due to doubly stochastic weights or eigenvector estimation. $\\mathcal {AB}m$ thus naturally leads to optimization and average consensus over both undirected and directed graphs. We show that $\\mathcal {AB}m$ has a global $R$-linear rate when the largest step-size and momentum parameter are positive and sufficiently small. We numerically show that $\\mathcal {AB}m$ achieves acceleration, particularly when the objective functions are ill-conditioned.", "year": 2018, "venue": "IEEE Transactions on Automatic Control", "authors": [ "Ran Xin", "U. Khan" ], "externalIds": { "DBLP": "journals/tac/XinK20", "MAG": "2973303672", "ArXiv": "1808.02942", "DOI": "10.1109/TAC.2019.2942513", "CorpusId": 52431144 }, "url": "https://www.semanticscholar.org/paper/3520d00586a1623d3dcaa945fd19bc0486001990", "referenceCount": 70, "citationCount": 128, "influentialCitationCount": 17, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Distributed stochastic gradient tracking methods", "abstract": null, "year": 2018, "venue": "Mathematical programming", "authors": [ "Shi Pu", "A. Nedić" ], "externalIds": { "DBLP": "journals/mp/PuN21", "MAG": "2806868256", "ArXiv": "1805.11454", "DOI": "10.1007/s10107-020-01487-0", "CorpusId": 44061760 }, "url": "https://www.semanticscholar.org/paper/67f9ed5384c542a0ccd2c4f9e533809edbd406a3", "referenceCount": 57, "citationCount": 253, "influentialCitationCount": 38, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Adaptive Federated Learning in Resource Constrained Edge Computing Systems", "abstract": "Emerging technologies and applications including Internet of Things, social networking, and crowd-sourcing generate large amounts of data at the network edge. Machine learning models are often built from the collected data, to enable the detection, classification, and prediction of future events. Due to bandwidth, storage, and privacy concerns, it is often impractical to send all the data to a centralized location. In this paper, we consider the problem of learning model parameters from data distributed across multiple edge nodes, without sending raw data to a centralized place. Our focus is on a generic class of machine learning models that are trained using gradient-descent-based approaches. We analyze the convergence bound of distributed gradient descent from a theoretical point of view, based on which we propose a control algorithm that determines the best tradeoff between local update and global parameter aggregation to minimize the loss function under a given resource budget. The performance of the proposed algorithm is evaluated via extensive experiments with real datasets, both on a networked prototype system and in a larger-scale simulated environment. The experimentation results show that our proposed approach performs near to the optimum with various machine learning models and different data distributions.", "year": 2018, "venue": "IEEE Journal on Selected Areas in Communications", "authors": [ "Shiqiang Wang", "Tiffany Tuor", "Theodoros Salonidis", "K. Leung", "C. Makaya", "T. He", "K. Chan" ], "externalIds": { "MAG": "2952696465", "DBLP": "journals/jsac/WangTSLMHC19", "ArXiv": "1804.05271", "DOI": "10.1109/JSAC.2019.2904348", "CorpusId": 51921962 }, "url": "https://www.semanticscholar.org/paper/e2e0e226f1f74ff65c0de3e5ad565bcd8b9710da", "referenceCount": 52, "citationCount": 1502, "influentialCitationCount": 125, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "FROST—Fast row-stochastic optimization with uncoordinated step-sizes", "abstract": null, "year": 2018, "venue": "EURASIP Journal on Advances in Signal Processing", "authors": [ "Ran Xin", "Chenguang Xi", "U. Khan" ], "externalIds": { "MAG": "2794887289", "ArXiv": "1803.09169", "DBLP": "journals/ejasp/XinXK19", "DOI": "10.1186/s13634-018-0596-y", "CorpusId": 86422815 }, "url": "https://www.semanticscholar.org/paper/14a3568c462a190991c403dfe4a18cba8a992c43", "referenceCount": 76, "citationCount": 82, "influentialCitationCount": 8, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Convergence Rates for Distributed Stochastic Optimization Over Random Networks", "abstract": "We establish the $O(\\frac{1}{k})$ convergence rate for distributed stochastic gradient methods that operate over strongly convex costs and random networks. The considered class of methods is standard - each node performs a weighted average of its own and its neighbors' solution estimates (consensus), and takes a negative step with respect to a noisy version of its local function's gradient (innovation). The underlying communication network is modeled through a sequence of temporally independent identically distributed (i.i.d.) Laplacian matrices such that the underlying graphs are connected on average; the local gradient noises are also i.i.d. in time, have finite second moment, and possibly unbounded support. We show that, after a careful setting of the consensus and innovations potentials (weights), the distributed stochastic gradient method achieves a (order-optimal) $O(\\frac{1}{k})$ convergence rate in the mean square distance from the solution. To the best of our knowledge, this is the first order-optimal convergence rate result on distributed strongly convex stochastic optimization when the network is random and the gradient noises have unbounded support. Simulation examples confirm the theoretical findings.", "year": 2018, "venue": "IEEE Conference on Decision and Control", "authors": [ "D. Jakovetić", "D. Bajović", "Anit Kumar Sahu", "S. Kar" ], "externalIds": { "MAG": "2789912105", "DBLP": "conf/cdc/JakoveticBSK18", "ArXiv": "1803.07836", "DOI": "10.1109/CDC.2018.8619228", "CorpusId": 52083147 }, "url": "https://www.semanticscholar.org/paper/aff0c1a0e78ae06cca68cf2580e51c9c707e3b9b", "referenceCount": 21, "citationCount": 63, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "A Linear Algorithm for Optimization Over Directed Graphs With Geometric Convergence", "abstract": "In this letter, we study distributed optimization, where a network of agents, abstracted as a directed graph, collaborates to minimize the average of locally known convex functions. Most of the existing approaches over directed graphs are based on push-sum (type) techniques, which use an independent algorithm to asymptotically learn either the left or right eigenvector of the underlying weight matrices. This strategy causes additional computation, communication, and nonlinearity in the algorithm. In contrast, we propose a linear algorithm based on an inexact gradient method and a gradient estimation technique. Under the assumptions that each local function is strongly convex with Lipschitz-continuous gradients, we show that the proposed algorithm geometrically converges to the global minimizer with a sufficiently small step-size. We present simulations to illustrate the theoretical findings.", "year": 2018, "venue": "IEEE Control Systems Letters", "authors": [ "Ran Xin", "U. Khan" ], "externalIds": { "MAG": "2950066423", "DBLP": "journals/csysl/XinK18", "ArXiv": "1803.02503", "DOI": "10.1109/LCSYS.2018.2834316", "CorpusId": 46897706 }, "url": "https://www.semanticscholar.org/paper/b96ad9307d166ae06ff5c7782258aa9c8547f6e3", "referenceCount": 29, "citationCount": 227, "influentialCitationCount": 27, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Convergence of Asynchronous Distributed Gradient Methods Over Stochastic Networks", "abstract": "We consider distributed optimization problems in which a number of agents are to seek the global optimum of a sum of cost functions through only local information sharing. In this paper, we are particularly interested in scenarios, where agents are operating asynchronously over stochastic networks subject to random failures. Most existing algorithms require coordinated and decaying stepsizes to ensure zero gap between the estimated value of each agent and the exact optimum, restricting it from asynchronous implementation and resulting in slower convergence results. To deal with this issue, we develop a new asynchronous distributed gradient method (AsynDGM) based on consensus theory. The proposed algorithm not only allows for asynchronous implementation in a completely distributed manner but also, most importantly, is able to seek the exact optimum even with constant stepsizes. We will show that the assumption of boundedness of gradients, which is widely used in the literature, can be dropped by instead imposing the standard Lipschitz continuity condition on gradients. Moreover, we derive an upper bound of stepsize within which the proposed AsynDGM can achieve a linear convergence rate for strongly convex functions with Lipschitz gradients. A canonical example of sensor fusion problems is provided to illustrate the effectiveness of the proposed algorithm.", "year": 2018, "venue": "IEEE Transactions on Automatic Control", "authors": [ "Jinming Xu", "Shanying Zhu", "Y. Soh", "Lihua Xie" ], "externalIds": { "DBLP": "journals/tac/XuZSX18", "MAG": "2737743075", "DOI": "10.1109/TAC.2017.2730481", "CorpusId": 46075495 }, "url": "https://www.semanticscholar.org/paper/895490ae9498d620a7b69ea955c64a480ffbb4db", "referenceCount": 45, "citationCount": 176, "influentialCitationCount": 16, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Network Topology and Communication-Computation Tradeoffs in Decentralized Optimization", "abstract": "In decentralized optimization, nodes cooperate to minimize an overall objective function that is the sum (or average) of per-node private objective functions. Algorithms interleave local computations with communication among all or a subset of the nodes. Motivated by a variety of applications..decentralized estimation in sensor networks, fitting models to massive data sets, and decentralized control of multirobot systems, to name a few..significant advances have been made toward the development of robust, practical algorithms with theoretical performance guarantees. This paper presents an overview of recent work in this area. In general, rates of convergence depend not only on the number of nodes involved and the desired level of accuracy, but also on the structure and nature of the network over which nodes communicate (e.g., whether links are directed or undirected, static or time varying). We survey the state-of-theart algorithms and their analyses tailored to these different scenarios, highlighting the role of the network topology.", "year": 2017, "venue": "Proceedings of the IEEE", "authors": [ "A. Nedić", "Alexander Olshevsky", "M. Rabbat" ], "externalIds": { "MAG": "2962771678", "ArXiv": "1709.08765", "DBLP": "journals/pieee/NedicOR18", "DOI": "10.1109/JPROC.2018.2817461", "CorpusId": 9297955 }, "url": "https://www.semanticscholar.org/paper/2025ca4373efd5c8e5c04177bfda224364f3d682", "referenceCount": 122, "citationCount": 467, "influentialCitationCount": 40, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Capacity of Wireless Networks With Directed Energy Links in the Presence of Obstacles", "abstract": "In this paper, we study the capacity of wireless networks with directed energy (DE) links in the presence of obstacles. DE links are highly focused wireless links that can be treated as “pencil beam,” an example of which is the E-band link (71–76 and 81–86 GHz) newly made available by the FCC. The 10-GHz spectrum of E-band is 50-times that of the entire cellular spectrum and provides the much needed spectrum in today’s world of exponential growth in mobile applications. Since the performance of highly focused DE links are highly susceptible to the presence of obstacles and real-world applications typically involve obstacles, it is important to study the capacity of wireless networks with DE links in the presence of obstacles, which is the subject of this paper. In the following, we first provide a study of probability distribution of DE links in the presence of obstacles and investigate how the number, shape, and size of obstacles impact the DE link probability distribution. Then, based on the probability distribution of DE links, we derive the capacity scaling laws for wireless networks in the presence of obstacles. Furthermore, our results can be extended to obstacles with arbitrary shapes quite accurately.", "year": 2017, "venue": "IEEE Transactions on Wireless Communications", "authors": [ "Yousef Jaradat", "Hong Huang", "M. Masoud", "Ismael Janoud" ], "externalIds": { "MAG": "2620467146", "DBLP": "journals/twc/JaradatHMJ17", "DOI": "10.1109/TWC.2017.2707487", "CorpusId": 43530575 }, "url": "https://www.semanticscholar.org/paper/dda65c8821532128ee29b00aa8786bbfda25e4af", "referenceCount": 20, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Can Decentralized Algorithms Outperform Centralized Algorithms? A Case Study for Decentralized Parallel Stochastic Gradient Descent", "abstract": "Most distributed machine learning systems nowadays, including TensorFlow and CNTK, are built in a centralized fashion. One bottleneck of centralized algorithms lies on high communication cost on the central node. Motivated by this, we ask, can decentralized algorithms be faster than its centralized counterpart? \nAlthough decentralized PSGD (D-PSGD) algorithms have been studied by the control community, existing analysis and theory do not show any advantage over centralized PSGD (C-PSGD) algorithms, simply assuming the application scenario where only the decentralized network is available. In this paper, we study a D-PSGD algorithm and provide the first theoretical analysis that indicates a regime in which decentralized algorithms might outperform centralized algorithms for distributed stochastic gradient descent. This is because D-PSGD has comparable total computational complexities to C-PSGD but requires much less communication cost on the busiest node. We further conduct an empirical study to validate our theoretical analysis across multiple frameworks (CNTK and Torch), different network configurations, and computation platforms up to 112 GPUs. On network configurations with low bandwidth or high latency, D-PSGD can be up to one order of magnitude faster than its well-optimized centralized counterparts.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "Xiangru Lian", "Ce Zhang", "Huan Zhang", "Cho-Jui Hsieh", "Wei Zhang", "Ji Liu" ], "externalIds": { "MAG": "2950830975", "DBLP": "conf/nips/LianZZHZL17", "ArXiv": "1705.09056", "CorpusId": 1467846 }, "url": "https://www.semanticscholar.org/paper/3f1ab8b484f7881a68c8562ff908390742e4ba90", "referenceCount": 53, "citationCount": 1080, "influentialCitationCount": 247, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Harnessing smoothness to accelerate distributed optimization", "abstract": "There has been a growing effort in studying the distributed optimization problem over a network. The objective is to optimize a global function formed by a sum of local functions, using only local computation and communication. Literature has developed consensus-based distributed (sub)gradient descent (DGD) methods and has shown that they have the same convergence rate O(log t/√t) as the centralized (sub)gradient methods (CGD) when the function is convex but possibly nonsmooth. However, when the function is convex and smooth, under the framework of DGD, it is unclear how to harness the smoothness to obtain a faster convergence rate comparable to CGD's convergence rate. In this paper, we propose a distributed algorithm that, despite using the same amount of communication per iteration as DGD, can effectively harnesses the function smoothness and converge to the optimum with a rate of O(1/t). If the objective function is further strongly convex, our algorithm has a linear convergence rate. Both rates match the convergence rate of CGD. The key step in our algorithm is a novel gradient estimation scheme that uses history information to achieve fast and accurate estimation of the average gradient. To motivate the necessity of history information, we also show that it is impossible for a class of distributed algorithms like DGD to achieve a linear convergence rate without using history information even if the objective function is strongly convex and smooth.", "year": 2016, "venue": "IEEE Conference on Decision and Control", "authors": [ "Guannan Qu", "Na Li" ], "externalIds": { "DBLP": "conf/cdc/QuL16", "MAG": "2400034325", "DOI": "10.1109/CDC.2016.7798263", "CorpusId": 14736053 }, "url": "https://www.semanticscholar.org/paper/ef3c50a4d034d866e125d1d8340ba0842a124dc9", "referenceCount": 51, "citationCount": 514, "influentialCitationCount": 75, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Linear Convergence in Optimization Over Directed Graphs With Row-Stochastic Matrices", "abstract": "This paper considers a distributed optimization problem over a multiagent network, in which the objective function is a sum of individual cost functions at the agents. We focus on the case when communication between the agents is described by a directed graph. Existing distributed optimization algorithms for directed graphs require at least the knowledge of the neighbors’ out-degree at each agent (due to the requirement of column-stochastic matrices). In contrast, our algorithm requires no such knowledge. Moreover, the proposed algorithm achieves the best known rate of convergence for this class of problems, $O(\\mu ^k)$ for $0<\\mu <1$ , where $k$ is the number of iterations, given that the objective functions are strongly convex and have Lipschitz-continuous gradients. Numerical experiments are also provided to illustrate the theoretical findings.", "year": 2016, "venue": "IEEE Transactions on Automatic Control", "authors": [ "Chenguang Xi", "V. Mai", "Ran Xin", "E. Abed", "U. Khan" ], "externalIds": { "MAG": "2964103300", "DBLP": "journals/tac/XiMXAK18", "ArXiv": "1611.06160", "DOI": "10.1109/TAC.2018.2797164", "CorpusId": 52897430 }, "url": "https://www.semanticscholar.org/paper/74aaa7e68ecc5f3efc0765ea745b9c0da328df27", "referenceCount": 48, "citationCount": 149, "influentialCitationCount": 14, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Geometrical Convergence Rate for Distributed Optimization with Time-Varying Directed Graphs and Uncoordinated Step-Sizes", "abstract": "This paper studies a class of distributed optimization algorithms by a set of agents, where each agent has only access to its own local convex objective function, and jointly minimizes the sum of the functions. The communications among agents are described by a sequence of time-varying directed graphs which are assumed to be uniformly strongly connected. A column stochastic mixing matrices is employed in the algorithm, which also exactly steers all the agents to asymptotically converge to a global and consensual optimal solution even under the assumption that the step-sizes are uncoordinated. Two fairly standard conditions for achieving the geometrical convergence rate are established under the assumption that the objective functions are strong convexity and have Lipschitz continuous gradient. The theoretical analysis shows that the distributed algorithm is capable of driving the whole network to geometrically converge to an optimal solution of the convex optimization problem as long as the uncoordinated step-sizes do not exceed some upper bounds. We also give an explicit analysis for the convergence rate of our algorithm through a different approach. Finally, simulation results illustrate the feasibility of the proposed algorithm and the theoretical analysis throughout this paper.", "year": 2016, "venue": "", "authors": [ "Qingguo Lu", "Huaqing Li" ], "externalIds": { "MAG": "2951353918", "ArXiv": "1611.00990", "CorpusId": 22037803 }, "url": "https://www.semanticscholar.org/paper/0da1ad1c5d32011a5acf7a0c7e125c0ad0ae82db", "referenceCount": 28, "citationCount": 40, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Geometrically convergent distributed optimization with uncoordinated step-sizes", "abstract": "A recent algorithmic family for distributed optimization, DIGing's, have been shown to have geometric convergence over time-varying undirected/directed graphs [1]. Nevertheless, an identical step-size for all agents is needed. In this paper, we study the convergence rates of the Adapt-Then-Combine (ATC) variation of the DIGing algorithm under uncoordinated step-sizes. We show that the ATC variation of DIGing algorithm converges geometrically fast even if the step-sizes are different among the agents. In addition, our analysis implies that the ATC structure can accelerate convergence compared to the distributed gradient descent (DGD) structure which has been used in the original DIGing algorithm.", "year": 2016, "venue": "American Control Conference", "authors": [ "A. Nedić", "Alexander Olshevsky", "Wei Shi", "César A. Uribe" ], "externalIds": { "DBLP": "conf/amcc/NedicOSU17", "ArXiv": "1609.05877", "MAG": "2523259601", "DOI": "10.23919/ACC.2017.7963560", "CorpusId": 16824807 }, "url": "https://www.semanticscholar.org/paper/2ad5ab506b79cc11d30d0d6af2590c13e0ee5354", "referenceCount": 34, "citationCount": 134, "influentialCitationCount": 22, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "ADD-OPT: Accelerated Distributed Directed Optimization", "abstract": "In this paper, we consider distributed optimization problems where the goal is to minimize a sum of objective functions over a multiagent network. We focus on the case when the interagent communication is described by a strongly connected, directed graph. The proposed algorithm, Accelerated Distributed Directed OPTimization (ADD-OPT), achieves the best known convergence rate for this class of problems,  $O(\\mu ^{k}),0<\\mu <1$, given strongly convex, objective functions with globally Lipschitz-continuous gradients, where $k$ is the number of iterations. Moreover, ADD-OPT supports a wider and more realistic range of step sizes in contrast to existing work. In particular, we show that ADD-OPT converges for arbitrarily small (positive) step sizes. Simulations further illustrate our results.", "year": 2016, "venue": "IEEE Transactions on Automatic Control", "authors": [ "Chenguang Xi", "Ran Xin", "U. Khan" ], "externalIds": { "MAG": "2476071355", "DBLP": "journals/tac/XiXK18", "ArXiv": "1607.04757", "DOI": "10.1109/TAC.2017.2737582", "CorpusId": 13832332 }, "url": "https://www.semanticscholar.org/paper/4b103a7380daf5d2cd7fd40bdac9dac542a196b1", "referenceCount": 53, "citationCount": 181, "influentialCitationCount": 14, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Achieving Geometric Convergence for Distributed Optimization Over Time-Varying Graphs", "abstract": "This paper considers the problem of distributed optimization over time-varying graphs. For the case of undirected graphs, we introduce a distributed algorithm, referred to as DIGing, based on a combination of a distributed inexact gradient method and a gradient tracking technique. The DIGing algorithm uses doubly stochastic mixing matrices and employs fixed step-sizes and, yet, drives all the agents' iterates to a global and consensual minimizer. When the graphs are directed, in which case the implementation of doubly stochastic mixing matrices is unrealistic, we construct an algorithm that incorporates the push-sum protocol into the DIGing structure, thus obtaining Push-DIGing algorithm. The Push-DIGing uses column stochastic matrices and fixed step-sizes, but it still converges to a global and consensual minimizer. Under the strong convexity assumption, we prove that the algorithms converge at R-linear (geometric) rates as long as the step-sizes do not exceed some upper bounds. We establish explicit estimates for the convergence rates. When the graph is undirected it shows that DIGing scales polynomially in the number of agents. We also provide some numerical experiments to demonstrate the efficacy of the proposed algorithms and to validate our theoretical findings.", "year": 2016, "venue": "SIAM Journal on Optimization", "authors": [ "A. Nedić", "Alexander Olshevsky", "Wei Shi" ], "externalIds": { "MAG": "2465749664", "DBLP": "journals/siamjo/NedicOS17", "ArXiv": "1607.03218", "DOI": "10.1137/16M1084316", "CorpusId": 3876732 }, "url": "https://www.semanticscholar.org/paper/4da06ee53eb97c863db1bc66c4bdb61e89f6c911", "referenceCount": 78, "citationCount": 856, "influentialCitationCount": 156, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Optimization Methods for Large-Scale Machine Learning", "abstract": "This paper provides a review and commentary on the past, present, and future of numerical optimization algorithms in the context of machine learning applications. Through case studies on text classification and the training of deep neural networks, we discuss how optimization problems arise in machine learning and what makes them challenging. A major theme of our study is that large-scale machine learning represents a distinctive setting in which the stochastic gradient (SG) method has traditionally played a central role while conventional gradient-based nonlinear optimization techniques typically falter. Based on this viewpoint, we present a comprehensive theory of a straightforward, yet versatile SG algorithm, discuss its practical behavior, and highlight opportunities for designing algorithms with improved performance. This leads to a discussion about the next generation of optimization methods for large-scale machine learning, including an investigation of two main streams of research on techniques that diminish noise in the stochastic directions and methods that make use of second-order derivative approximations.", "year": 2016, "venue": "SIAM Review", "authors": [ "L. Bottou", "Frank E. Curtis", "J. Nocedal" ], "externalIds": { "ArXiv": "1606.04838", "MAG": "2950363690", "DBLP": "journals/siamrev/BottouCN18", "DOI": "10.1137/16M1080173", "CorpusId": 3119488 }, "url": "https://www.semanticscholar.org/paper/d21703674ae562bae4a849a75847cdd9ead417df", "referenceCount": 183, "citationCount": 2887, "influentialCitationCount": 392, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Communication-Efficient Learning of Deep Networks from Decentralized Data", "abstract": "Modern mobile devices have access to a wealth of data suitable for learning models, which in turn can greatly improve the user experience on the device. For example, language models can improve speech recognition and text entry, and image models can automatically select good photos. However, this rich data is often privacy sensitive, large in quantity, or both, which may preclude logging to the data center and training there using conventional approaches. We advocate an alternative that leaves the training data distributed on the mobile devices, and learns a shared model by aggregating locally-computed updates. We term this decentralized approach Federated Learning. \nWe present a practical method for the federated learning of deep networks based on iterative model averaging, and conduct an extensive empirical evaluation, considering five different model architectures and four datasets. These experiments demonstrate the approach is robust to the unbalanced and non-IID data distributions that are a defining characteristic of this setting. Communication costs are the principal constraint, and we show a reduction in required communication rounds by 10-100x as compared to synchronized stochastic gradient descent.", "year": 2016, "venue": "International Conference on Artificial Intelligence and Statistics", "authors": [ "H. B. McMahan", "Eider Moore", "Daniel Ramage", "S. Hampson", "B. A. Y. Arcas" ], "externalIds": { "MAG": "2950745363", "DBLP": "conf/aistats/McMahanMRHA17", "ArXiv": "1602.05629", "CorpusId": 14955348 }, "url": "https://www.semanticscholar.org/paper/d1dbf643447405984eeef098b1b320dee0b3b8a7", "referenceCount": 50, "citationCount": 13806, "influentialCitationCount": 3346, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Machine learning: Trends, perspectives, and prospects", "abstract": "Machine learning addresses the question of how to build computers that improve automatically through experience. It is one of today’s most rapidly growing technical fields, lying at the intersection of computer science and statistics, and at the core of artificial intelligence and data science. Recent progress in machine learning has been driven both by the development of new learning algorithms and theory and by the ongoing explosion in the availability of online data and low-cost computation. The adoption of data-intensive machine-learning methods can be found throughout science, technology and commerce, leading to more evidence-based decision-making across many walks of life, including health care, manufacturing, education, financial modeling, policing, and marketing.", "year": 2015, "venue": "Science", "authors": [ "Michael I. Jordan", "T. Mitchell" ], "externalIds": { "MAG": "1901616594", "DOI": "10.1126/science.aaa8415", "CorpusId": 677218, "PubMed": "26185243" }, "url": "https://www.semanticscholar.org/paper/d422df8bff4e677a3077635db116679d25142bfc", "referenceCount": 71, "citationCount": 5713, "influentialCitationCount": 117, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Hybrid Random/Deterministic Parallel Algorithms for Convex and Nonconvex Big Data Optimization", "abstract": "We propose a decomposition framework for the parallel optimization of the sum of a differentiable (possibly nonconvex) function and a nonsmooth (possibly nonseparable), convex one. The latter term is usually employed to enforce structure in the solution, typically sparsity. The main contribution of this work is a novel parallel, hybrid random/deterministic decomposition scheme wherein, at each iteration, a subset of (block) variables is updated at the same time by minimizing a convex surrogate of the original nonconvex function. To tackle huge-scale problems, the (block) variables to be updated are chosen according to a mixed random and deterministic procedure, which captures the advantages of both pure deterministic and random update-based schemes. Almost sure convergence of the proposed scheme is established. Numerical results show that on huge-scale problems the proposed hybrid random/deterministic algorithm compares favorably to random and deterministic schemes on both convex and nonconvex problems.", "year": 2014, "venue": "IEEE Transactions on Signal Processing", "authors": [ "Amir Daneshmand", "F. Facchinei", "V. Kungurtsev", "G. Scutari" ], "externalIds": { "MAG": "1570769529", "DBLP": "journals/corr/DaneshmandFKS14", "ArXiv": "1407.4504", "DOI": "10.1109/TSP.2015.2436357", "CorpusId": 14474509 }, "url": "https://www.semanticscholar.org/paper/57d244563636132100259bbd7547375a1e0ec134", "referenceCount": 66, "citationCount": 61, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "SAGA: A Fast Incremental Gradient Method With Support for Non-Strongly Convex Composite Objectives", "abstract": "In this work we introduce a new optimisation method called SAGA in the spirit of SAG, SDCA, MISO and SVRG, a set of recently proposed incremental gradient algorithms with fast linear convergence rates. SAGA improves on the theory behind SAG and SVRG, with better theoretical convergence rates, and has support for composite objectives where a proximal operator is used on the regulariser. Unlike SDCA, SAGA supports non-strongly convex problems directly, and is adaptive to any inherent strong convexity of the problem. We give experimental results showing the effectiveness of our method.", "year": 2014, "venue": "Neural Information Processing Systems", "authors": [ "Aaron Defazio", "F. Bach", "Simon Lacoste-Julien" ], "externalIds": { "MAG": "2135482703", "DBLP": "journals/corr/DefazioBL14", "ArXiv": "1407.0202", "CorpusId": 218654665 }, "url": "https://www.semanticscholar.org/paper/4daec165c1f4aa1206b0d91c0b26f0287d1ef52d", "referenceCount": 13, "citationCount": 1738, "influentialCitationCount": 303, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Stochastic Gradient-Push for Strongly Convex Functions on Time-Varying Directed Graphs", "abstract": "We investigate the convergence rate of the recently proposed subgradient-push method for distributed optimization over time-varying directed graphs. The subgradient-push method can be implemented in a distributed way without requiring knowledge of either the number of agents or the graph sequence; each node is only required to know its out-degree at each time. Our main result is a convergence rate of O((ln t)/t) for strongly convex functions with Lipschitz gradients even if only stochastic gradient samples are available; this is asymptotically faster than the O((ln t)/√t) rate previously known for (general) convex functions.", "year": 2014, "venue": "IEEE Transactions on Automatic Control", "authors": [ "A. Nedić", "Alexander Olshevsky" ], "externalIds": { "MAG": "1588593629", "DBLP": "journals/corr/NedicO14", "ArXiv": "1406.2075", "DOI": "10.1109/TAC.2016.2529285", "CorpusId": 15987556 }, "url": "https://www.semanticscholar.org/paper/071728b63ef021096703ca9f170af30b0ba2fd52", "referenceCount": 43, "citationCount": 299, "influentialCitationCount": 31, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Introductory Lectures on Convex Optimization - A Basic Course", "abstract": null, "year": 2014, "venue": "Applied Optimization", "authors": [ "Y. Nesterov" ], "externalIds": { "MAG": "2124541940", "DBLP": "books/sp/Nesterov04", "DOI": "10.1007/978-1-4419-8853-9", "CorpusId": 62288331 }, "url": "https://www.semanticscholar.org/paper/d0b0c3e5a1e768490bc9b759685930541957508b", "referenceCount": 0, "citationCount": 5843, "influentialCitationCount": 970, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Distributed optimization over time-varying directed graphs", "abstract": "We consider distributed optimization by a collection of nodes, each having access to its own convex function, whose collective goal is to minimize the sum of the functions. The communications between nodes are described by a time-varying sequence of directed graphs, which is uniformly strongly connected. For such communications, assuming that every node knows its out-degree, we develop a broadcast-based algorithm, termed the subgradient-push, which steers every node to an optimal value under a standard assumption of subgradient boundedness. The subgradient-push requires no knowledge of either the number of agents or the graph sequence to implement. Our analysis shows that the subgradient-push algorithm converges at a rate of O (ln t/√t), where the constant depends on the initial values at the nodes, the subgradient norms, and, more interestingly, on both the consensus speed and the imbalances of influence among the nodes.", "year": 2013, "venue": "IEEE Conference on Decision and Control", "authors": [ "A. Nedić", "Alexander Olshevsky" ], "externalIds": { "ArXiv": "1303.2289", "MAG": "2952931788", "DBLP": "conf/cdc/NedicO13", "DOI": "10.1109/CDC.2013.6760975", "CorpusId": 8361755 }, "url": "https://www.semanticscholar.org/paper/8c87fff1f5de9e4ac96b315b4ab434a9c0ac4499", "referenceCount": 33, "citationCount": 941, "influentialCitationCount": 114, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "A Survey on Machine-Learning Techniques in Cognitive Radios", "abstract": "In this survey paper, we characterize the learning problem in cognitive radios (CRs) and state the importance of artificial intelligence in achieving real cognitive communications systems. We review various learning problems that have been studied in the context of CRs classifying them under two main categories: Decision-making and feature classification. Decision-making is responsible for determining policies and decision rules for CRs while feature classification permits identifying and classifying different observation models. The learning algorithms encountered are categorized as either supervised or unsupervised algorithms. We describe in detail several challenging learning issues that arise in cognitive radio networks (CRNs), in particular in non-Markovian environments and decentralized networks, and present possible solution methods to address them. We discuss similarities and differences among the presented algorithms and identify the conditions under which each of the techniques may be applied.", "year": 2013, "venue": "IEEE Communications Surveys and Tutorials", "authors": [ "M. Bkassiny", "Yang Li", "S. Jayaweera" ], "externalIds": { "MAG": "2091005538", "DBLP": "journals/comsur/BkassinyLJ13", "DOI": "10.1109/SURV.2012.100412.00017", "CorpusId": 16279144 }, "url": "https://www.semanticscholar.org/paper/73b51b02a061e2eae2eebe2ceae45872ea7d509d", "referenceCount": 200, "citationCount": 501, "influentialCitationCount": 18, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Push-Sum Distributed Dual Averaging for convex optimization", "abstract": "Recently there has been a significant amount of research on developing consensus based algorithms for distributed optimization motivated by applications that vary from large scale machine learning to wireless sensor networks. This work describes and proves convergence of a new algorithm called Push-Sum Distributed Dual Averaging which combines a recent optimization algorithm [1] with a push-sum consensus protocol [2]. As we discuss, the use of push-sum has significant advantages. Restricting to doubly stochastic consensus protocols is not required and convergence to the true average consensus is guaranteed without knowing the stationary distribution of the update matrix in advance. Furthermore, the communication semantics of just summing the incoming information make this algorithm truly asynchronous and allow a clean analysis when varying intercommunication intervals and communication delays are modelled. We include experiments in simulation and on a small cluster to complement the theoretical analysis.", "year": 2012, "venue": "IEEE Conference on Decision and Control", "authors": [ "Konstantinos I. Tsianos", "Sean F. Lawlor", "M. Rabbat" ], "externalIds": { "MAG": "2063403497", "DBLP": "conf/cdc/TsianosLR12", "DOI": "10.1109/CDC.2012.6426375", "CorpusId": 2074953 }, "url": "https://www.semanticscholar.org/paper/853bfa230ecf566ddf8f6d8d68e6f12f9a60e2cc", "referenceCount": 16, "citationCount": 289, "influentialCitationCount": 12, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Asynchronous Broadcast-Based Convex Optimization Over a Network", "abstract": "We consider a distributed multi-agent network system where each agent has its own convex objective function, which can be evaluated with stochastic errors. The problem consists of minimizing the sum of the agent functions over a commonly known constraint set, but without a central coordinator and without agents sharing the explicit form of their objectives. We propose an asynchronous broadcast-based algorithm where the communications over the network are subject to random link failures. We investigate the convergence properties of the algorithm for a diminishing (random) stepsize and a constant stepsize, where each agent chooses its own stepsize independently of the other agents. Under some standard conditions on the gradient errors, we establish almost sure convergence of the method to an optimal point for diminishing stepsize. For constant stepsize, we establish some error bounds on the expected distance from the optimal point and the expected function value. We also provide numerical results.", "year": 2011, "venue": "IEEE Transactions on Automatic Control", "authors": [ "A. Nedić" ], "externalIds": { "DBLP": "journals/tac/Nedic11", "MAG": "2130263842", "DOI": "10.1109/TAC.2010.2079650", "CorpusId": 7381190 }, "url": "https://www.semanticscholar.org/paper/176e9b40b6aad3dcb5ca01a479489139d691ad83", "referenceCount": 36, "citationCount": 284, "influentialCitationCount": 31, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Directed diffusion for wireless sensor networking", "abstract": "Advances in processor, memory, and radio technology will enable small and cheap nodes capable of sensing, communication, and computation. Networks of such nodes can coordinate to perform distributed sensing of environmental phenomena. In this paper, we explore the directed-diffusion paradigm for such coordination. Directed diffusion is data-centric in that all communication is for named data. All nodes in a directed-diffusion-based network are application aware. This enables diffusion to achieve energy savings by selecting empirically good paths and by caching and processing data in-network (e.g., data aggregation). We explore and evaluate the use of directed diffusion for a simple remote-surveillance sensor network analytically and experimentally. Our evaluation indicates that directed diffusion can achieve significant energy savings and can outperform idealized traditional schemes (e.g., omniscient multicast) under the investigated scenarios.", "year": 2003, "venue": "TNET", "authors": [ "C. Intanagonwiwat", "R. Govindan", "D. Estrin", "J. Heidemann", "Fabio Silva" ], "externalIds": { "DBLP": "journals/ton/IntanagonwiwatGEHS03", "MAG": "2120417050", "DOI": "10.1109/TNET.2002.808417", "CorpusId": 1044574 }, "url": "https://www.semanticscholar.org/paper/fef2ed5c7c4eca9594ed91283f7ece6d754d3c66", "referenceCount": 42, "citationCount": 2733, "influentialCitationCount": 176, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Matrix analysis", "abstract": "Linear algebra and matrix theory are fundamental tools in mathematical and physical science, as well as fertile fields for research. This new edition of the acclaimed text presents results of both classic and recent matrix analyses using canonical forms as a unifying theme, and demonstrates their importance in a variety of applications. The authors have thoroughly revised, updated, and expanded on the first edition. The book opens with an extended summary of useful concepts and facts and includes numerous new topics and features, such as: - New sections on the singular value and CS decompositions - New applications of the Jordan canonical form - A new section on the Weyr canonical form - Expanded treatments of inverse problems and of block matrices - A central role for the Von Neumann trace theorem - A new appendix with a modern list of canonical forms for a pair of Hermitian matrices and for a symmetric-skew symmetric pair - Expanded index with more than 3,500 entries for easy reference - More than 1,100 problems and exercises, many with hints, to reinforce understanding and develop auxiliary themes such as finite-dimensional quantum systems, the compound and adjugate matrices, and the Loewner ellipsoid - A new appendix provides a collection of problem-solving hints.", "year": 1985, "venue": "", "authors": [ "R. Horn", "Charles R. Johnson" ], "externalIds": { "DBLP": "books/daglib/0019187", "MAG": "391578156", "DOI": "10.1017/CBO9780511810817", "CorpusId": 43188859 }, "url": "https://www.semanticscholar.org/paper/721f54f6fa32f5f02c5124a2b73ce5f4280b4eaf", "referenceCount": 0, "citationCount": 26557, "influentialCitationCount": 2115, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Federated Learning under Arbitrary Communication Patterns", "abstract": "Federated Learning is a distributed learning setting where the goal is to train a centralized model with training data distributed over a large number of heterogeneous clients, each with unreliable and relatively slow network connections. A common optimization approach used in federated learning is based on the idea of local SGD : each client runs some number of SGD steps locally and then the updated local models are averaged to form the updated global model on the coordinating server. In this paper, we investigate the performance of an asynchronous version of local SGD wherein the clients can communicate with the server at arbitrary time intervals. Our main result shows that for smooth strongly convex and smooth nonconvex functions we achieve convergence rates that match the synchronous version that requires all clients to communicate simultaneously.", "year": 2021, "venue": "International Conference on Machine Learning", "authors": [ "Dmitrii Avdiukhin", "S. Kasiviswanathan" ], "externalIds": { "DBLP": "conf/icml/AvdiukhinK21", "CorpusId": 235825373 }, "url": "https://www.semanticscholar.org/paper/d9e7f1e1544da1bf527f12c114cb4cbd9b4a73e2", "referenceCount": 36, "citationCount": 49, "influentialCitationCount": 6, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Fully Decentralized Federated Learning", "abstract": "We consider the problem of training a machine learning model over a network of users in a fully decentralized framework. The users take a Bayesian-like approach via the introduction of a belief over the model parameter space. We propose a distributed learning algorithm in which users update their belief by aggregate information from their one-hop neighbors to learn a model that best fits the observations over the entire network. In addition, we also obtain sufficient conditions to ensure that the probability of error is small for every user in the network. Finally, we discuss approximations required for applying this algorithm for training Neural Networks.", "year": 2018, "venue": "", "authors": [ "Anusha Lalitha" ], "externalIds": { "CorpusId": 202597056 }, "url": "https://www.semanticscholar.org/paper/2ecc7707aa49b7baa2f4bbc5d8491d0d464bdb9c", "referenceCount": 6, "citationCount": 143, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Some methods of speeding up the convergence of iteration methods", "abstract": null, "year": 1964, "venue": "", "authors": [ "Boris Polyak" ], "externalIds": { "MAG": "1988720110", "DOI": "10.1016/0041-5553(64)90137-5", "CorpusId": 120243018 }, "url": "https://www.semanticscholar.org/paper/4b53e3f719ff983eef867c6d8deac5dbe38aecb4", "referenceCount": 4, "citationCount": 2692, "influentialCitationCount": 251, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] } ] }, "Historical Trajectory Assisted Zeroth-Order Federated Optimization": { "paper_title": "Historical Trajectory Assisted Zeroth-Order Federated Optimization", "arxiv_id": "2409.15955v2", "keyword": "federate learning", "authors": [ "Xiaoyu He", "Chenlin Wu", "Zike Li", "Zibin Zheng" ], "references": [ { "title": "SAM: An Efficient Approach With Selective Aggregation of Models in Federated Learning", "abstract": "Federated learning (FL) is a promising distributed learning mechanism that revolutionizes our interaction with data in the IoT ecosystem. Due to the rapidly growing scale of smart devices and the limited transmission resources of networks, a simple, consistent, and scalable FL framework aiming to address the communication bottleneck is urgently needed. In this work, we propose an efficient approach with selective aggregation of models (SAMs) to mitigate the communication overload in FL systems. The introduction of SAM enables each local client to upload its model with a certain probability, resulting in a significant reduction in costly communication expenses. We design the algorithm for SAM, analyze the convergence bound on nonconvex objectives for heterogeneous data, which illustrates the impact of the selection probability as well as the set size of participating clients on the system performance, and assess the conservation for the network resource utilization by modeling queuing systems. We conduct various experiments to evaluate the performance of SAM, whose outcomes suggest that significant alleviation of the communication bottleneck can be accomplished with marginal cost of performance loss. It will also be shown that SAM is a communication-efficient method that can be freely applied to other frameworks.", "year": 2024, "venue": "IEEE Internet of Things Journal", "authors": [ "Yuchen Shi", "Pingyi Fan", "Zheqi Zhu", "Chenghui Peng", "Fei Wang", "K. Letaief" ], "externalIds": { "DBLP": "journals/iotj/ShiFZPWL24", "DOI": "10.1109/JIOT.2024.3373822", "CorpusId": 268351573 }, "url": "https://www.semanticscholar.org/paper/a14412e6009e1885229a8257fbd36dea200688f6", "referenceCount": 43, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Revisiting Zeroth-Order Optimization for Memory-Efficient LLM Fine-Tuning: A Benchmark", "abstract": "In the evolving landscape of natural language processing (NLP), fine-tuning pre-trained Large Language Models (LLMs) with first-order (FO) optimizers like SGD and Adam has become standard. Yet, as LLMs grow {in size}, the substantial memory overhead from back-propagation (BP) for FO gradient computation presents a significant challenge. Addressing this issue is crucial, especially for applications like on-device training where memory efficiency is paramount. This paper proposes a shift towards BP-free, zeroth-order (ZO) optimization as a solution for reducing memory costs during LLM fine-tuning, building on the initial concept introduced by MeZO. Unlike traditional ZO-SGD methods, our work expands the exploration to a wider array of ZO optimization techniques, through a comprehensive, first-of-its-kind benchmarking study across five LLM families (Roberta, OPT, LLaMA, Vicuna, Mistral), three task complexities, and five fine-tuning schemes. Our study unveils previously overlooked optimization principles, highlighting the importance of task alignment, the role of the forward gradient method, and the balance between algorithm complexity and fine-tuning performance. We further introduce novel enhancements to ZO optimization, including block-wise descent, hybrid training, and gradient sparsity. Our study offers a promising direction for achieving further memory-efficient LLM fine-tuning. Codes to reproduce all our experiments are at https://github.com/ZO-Bench/ZO-LLM .", "year": 2024, "venue": "International Conference on Machine Learning", "authors": [ "Yihua Zhang", "Pingzhi Li", "Junyuan Hong", "Jiaxiang Li", "Yimeng Zhang", "Wenqing Zheng", "Pin-Yu Chen", "Jason D. Lee", "Wotao Yin", "Mingyi Hong", "Zhangyang Wang", "Sijia Liu", "Tianlong Chen" ], "externalIds": { "DBLP": "conf/icml/ZhangLHLZZCLY0W24", "ArXiv": "2402.11592", "DOI": "10.48550/arXiv.2402.11592", "CorpusId": 267750841 }, "url": "https://www.semanticscholar.org/paper/e81c91cd71a3310e33e1bffc713aec4de608f40b", "referenceCount": 87, "citationCount": 12, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Federated Learning of Large Language Models with Parameter-Efficient Prompt Tuning and Adaptive Optimization", "abstract": "Federated learning (FL) is a promising paradigm to enable collaborative model training with decentralized data. However, the training process of Large Language Models (LLMs) generally incurs the update of significant parameters, which limits the applicability of FL techniques to tackle the LLMs in real scenarios. Prompt tuning can significantly reduce the number of parameters to update, but it either incurs performance degradation or low training efficiency. The straightforward utilization of prompt tuning in the FL often raises non-trivial communication costs and dramatically degrades performance. In addition, the decentralized data is generally non-Independent and Identically Distributed (non-IID), which brings client drift problems and thus poor performance. This paper proposes a Parameter-efficient prompt Tuning approach with Adaptive Optimization, i.e., FedPepTAO, to enable efficient and effective FL of LLMs. First, an efficient partial prompt tuning approach is proposed to improve performance and efficiency simultaneously. Second, a novel adaptive optimization method is developed to address the client drift problems on both the device and server sides to enhance performance further. Extensive experiments based on 10 datasets demonstrate the superb performance (up to 60.8\\% in terms of accuracy) and efficiency (up to 97.59\\% in terms of training time) of FedPepTAO compared with 9 baseline approaches. Our code is available at https://github.com/llm-eff/FedPepTAO.", "year": 2023, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Tianshi Che", "Ji Liu", "Yang Zhou", "Jiaxiang Ren", "Jiwen Zhou", "Victor S. Sheng", "H. Dai", "D. Dou" ], "externalIds": { "DBLP": "journals/corr/abs-2310-15080", "ArXiv": "2310.15080", "DOI": "10.48550/arXiv.2310.15080", "CorpusId": 264436414 }, "url": "https://www.semanticscholar.org/paper/67ffe6037cf058b8c5b39f59693c4c349cc1e456", "referenceCount": 96, "citationCount": 26, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Efficient Federated Prompt Tuning for Black-box Large Pre-trained Models", "abstract": "With the blowout development of pre-trained models (PTMs), the efficient tuning of these models for diverse downstream applications has emerged as a pivotal research concern. Although recent investigations into prompt tuning have provided promising avenues, three salient challenges persist: (1) memory constraint: the continuous growth in the size of open-source PTMs renders fine-tuning, even a fraction of their parameters, challenging for many practitioners. (2) model privacy: existing PTMs often function as public API services, with their parameters inaccessible for effective or tailored fine-tuning. (3) data privacy: the fine-tuning of PTMs necessitates high-quality datasets, which are typically localized and not shared to public. To optimally harness each local dataset while navigating memory constraints and preserving privacy, we propose Federated Black-Box Prompt Tuning (Fed-BBPT). This innovative approach eschews reliance on parameter architectures and private dataset access, instead capitalizing on a central server that aids local users in collaboratively training a prompt generator through regular aggregation. Local users leverage API-driven learning via a zero-order optimizer, obviating the need for PTM deployment. Relative to extensive fine-tuning, Fed-BBPT proficiently sidesteps memory challenges tied to PTM storage and fine-tuning on local machines, tapping into comprehensive, high-quality, yet private training datasets. A thorough evaluation across 40 datasets spanning CV and NLP tasks underscores the robustness of our proposed model.", "year": 2023, "venue": "arXiv.org", "authors": [ "Zihao Lin", "Yan Sun", "Yifan Shi", "Xueqian Wang", "Lifu Huang", "Li Shen", "Dacheng Tao" ], "externalIds": { "ArXiv": "2310.03123", "DBLP": "journals/corr/abs-2310-03123", "DOI": "10.48550/arXiv.2310.03123", "CorpusId": 263671921 }, "url": "https://www.semanticscholar.org/paper/ab90da70bf4671bb95d8e7ff97b2cce19768c579", "referenceCount": 95, "citationCount": 8, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FedBPT: Efficient Federated Black-box Prompt Tuning for Large Language Models", "abstract": "Pre-trained language models (PLM) have revolutionized the NLP landscape, achieving stellar performances across diverse tasks. These models, while benefiting from vast training data, often require fine-tuning on specific data to cater to distinct downstream tasks. However, this data adaptation process has inherent security and privacy concerns, primarily when leveraging user-generated, device-residing data. Federated learning (FL) provides a solution, allowing collaborative model fine-tuning without centralized data collection. However, applying FL to finetune PLMs is hampered by challenges, including restricted model parameter access, high computational requirements, and communication overheads. This paper introduces Federated Black-box Prompt Tuning (FedBPT), a framework designed to address these challenges. FedBPT does not require the clients to access the model parameters. By focusing on training optimal prompts and utilizing gradient-free optimization methods, FedBPT reduces the number of exchanged variables, boosts communication efficiency, and minimizes computational and storage costs. Experiments highlight the framework's ability to drastically cut communication and memory costs while maintaining competitive performance. Ultimately, FedBPT presents a promising solution for efficient, privacy-preserving fine-tuning of PLM in the age of large language models.", "year": 2023, "venue": "International Conference on Machine Learning", "authors": [ "Jingwei Sun", "Ziyue Xu", "Hongxu Yin", "Dong Yang", "Daguang Xu", "Yiran Chen", "Holger Roth" ], "externalIds": { "ArXiv": "2310.01467", "DBLP": "conf/icml/00020Y0XLD0R24", "DOI": "10.48550/arXiv.2310.01467", "CorpusId": 263608989 }, "url": "https://www.semanticscholar.org/paper/187f4521e6080f93fc2a26bf91b4e7d64f94e18a", "referenceCount": 54, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FedZeN: Towards superlinear zeroth-order federated learning via incremental Hessian estimation", "abstract": "Federated learning is a distributed learning framework that allows a set of clients to collaboratively train a model under the orchestration of a central server, without sharing raw data samples. Although in many practical scenarios the derivatives of the objective function are not available, only few works have considered the federated zeroth-order setting, in which functions can only be accessed through a budgeted number of point evaluations. In this work we focus on convex optimization and design the first federated zeroth-order algorithm to estimate the curvature of the global objective, with the purpose of achieving superlinear convergence. We take an incremental Hessian estimator whose error norm converges linearly, and we adapt it to the federated zeroth-order setting, sampling the random search directions from the Stiefel manifold for improved performance. In particular, both the gradient and Hessian estimators are built at the central server in a communication-efficient and privacy-preserving way by leveraging synchronized pseudo-random number generators. We provide a theoretical analysis of our algorithm, named FedZeN, proving local quadratic convergence with high probability and global linear convergence up to zeroth-order precision. Numerical simulations confirm the superlinear convergence rate and show that our algorithm outperforms the federated zeroth-order methods available in the literature.", "year": 2023, "venue": "arXiv.org", "authors": [ "A. Maritan", "S. Dey", "L. Schenato" ], "externalIds": { "DBLP": "journals/corr/abs-2309-17174", "ArXiv": "2309.17174", "DOI": "10.48550/arXiv.2309.17174", "CorpusId": 263310971 }, "url": "https://www.semanticscholar.org/paper/6a17203d0ebed9b7dcc7c4d367e0929d47eb937e", "referenceCount": 20, "citationCount": 6, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Federated Zeroth-Order Optimization using Trajectory-Informed Surrogate Gradients", "abstract": "Federated optimization, an emerging paradigm which finds wide real-world applications such as federated learning, enables multiple clients (e.g., edge devices) to collaboratively optimize a global function. The clients do not share their local datasets and typically only share their local gradients. However, the gradient information is not available in many applications of federated optimization, which hence gives rise to the paradigm of federated zeroth-order optimization (ZOO). Existing federated ZOO algorithms suffer from the limitations of query and communication inefficiency, which can be attributed to (a) their reliance on a substantial number of function queries for gradient estimation and (b) the significant disparity between their realized local updates and the intended global updates. To this end, we (a) introduce trajectory-informed gradient surrogates which is able to use the history of function queries during optimization for accurate and query-efficient gradient estimation, and (b) develop the technique of adaptive gradient correction using these gradient surrogates to mitigate the aforementioned disparity. Based on these, we propose the federated zeroth-order optimization using trajectory-informed surrogate gradients (FZooS) algorithm for query- and communication-efficient federated ZOO. Our FZooS achieves theoretical improvements over the existing approaches, which is supported by our real-world experiments such as federated black-box adversarial attack and federated non-differentiable metric optimization.", "year": 2023, "venue": "arXiv.org", "authors": [ "Yao Shu", "Xiaoqiang Lin", "Zhongxiang Dai", "B. Low" ], "externalIds": { "DBLP": "journals/corr/abs-2308-04077", "ArXiv": "2308.04077", "DOI": "10.48550/arXiv.2308.04077", "CorpusId": 260704338 }, "url": "https://www.semanticscholar.org/paper/d2c4b5aa659e52d43ad8572f9a413756f1ee288b", "referenceCount": 34, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Communication-Efficient Federated Multitask Learning Over Wireless Networks", "abstract": "This article investigates the scheduling framework of the federated multitask learning (FMTL) problem with a hard-cooperation structure over wireless networks, in which the scheduling becomes more challenging due to the different convergence behaviors of different tasks. Based on the special model structure, we propose a dynamic user and task scheduling scheme with a block-wise incremental gradient aggregation algorithm, in which the neural network model is decomposed into a common feature-extraction module and $M$ task-specific modules. Different block gradients with respect to different modules can be scheduled separately. We further propose a Lyapunov-drift-based scheduling scheme that minimizes the overall communication latency by utilizing both the instantaneous data importance and the channel state information. We prove that the proposed scheme can converge almost surely to a KKT solution of the training problem such that the data-distortion issue is resolved. Simulation results illustrate that the proposed scheme significantly reduces the communication latency compared to the state-of-the-art baseline schemes.", "year": 2023, "venue": "IEEE Internet of Things Journal", "authors": [ "Haoyu Ma", "Huayan Guo", "V. Lau" ], "externalIds": { "DBLP": "journals/iotj/MaGL23", "DOI": "10.1109/JIOT.2022.3201310", "CorpusId": 251822650 }, "url": "https://www.semanticscholar.org/paper/621fac3a1ee0997119ee027b742e443bfaf01c66", "referenceCount": 58, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FedALA: Adaptive Local Aggregation for Personalized Federated Learning", "abstract": "A key challenge in federated learning (FL) is the statistical heterogeneity that impairs the generalization of the global model on each client. To address this, we propose a method Federated learning with Adaptive Local Aggregation (FedALA) by capturing the desired information in the global model for client models in personalized FL. The key component of FedALA is an Adaptive Local Aggregation (ALA) module, which can adaptively aggregate the downloaded global model and local model towards the local objective on each client to initialize the local model before training in each iteration. To evaluate the effectiveness of FedALA, we conduct extensive experiments with five benchmark datasets in computer vision and natural language processing domains. FedALA outperforms eleven state-of-the-art baselines by up to 3.27% in test accuracy. Furthermore, we also apply ALA module to other federated learning methods and achieve up to 24.19% improvement in test accuracy. Code is available at https://github.com/TsingZ0/FedALA.", "year": 2022, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Jianqing Zhang", "Yang Hua", "Hao Wang", "Tao Song", "Zhengui Xue", "Ruhui Ma", "Haibing Guan" ], "externalIds": { "DBLP": "conf/aaai/ZhangHWSXMG23", "ArXiv": "2212.01197", "DOI": "10.1609/aaai.v37i9.26330", "CorpusId": 254220922 }, "url": "https://www.semanticscholar.org/paper/8022ea93560908bb57b5ff0a668f21079d0ccdaa", "referenceCount": 36, "citationCount": 90, "influentialCitationCount": 8, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Generalizing Gaussian Smoothing for Random Search", "abstract": "Gaussian smoothing (GS) is a derivative-free optimization (DFO) algorithm that estimates the gradient of an objective using perturbations of the current parameters sampled from a standard normal distribution. We generalize it to sampling perturbations from a larger family of distributions. Based on an analysis of DFO for non-convex functions, we propose to choose a distribution for perturbations that minimizes the mean squared error (MSE) of the gradient estimate. We derive three such distributions with provably smaller MSE than Gaussian smoothing. We conduct evaluations of the three sampling distributions on linear regression, reinforcement learning, and DFO benchmarks in order to validate our claims. Our proposal improves on GS with the same computational complexity, and are usually competitive with and often outperform Guided ES and Orthogonal ES, two computationally more expensive algorithms that adapt the covariance matrix of normally distributed perturbations.", "year": 2022, "venue": "International Conference on Machine Learning", "authors": [ "Katelyn Gao", "Ozan Sener" ], "externalIds": { "ArXiv": "2211.14721", "DBLP": "conf/icml/GaoS22", "DOI": "10.48550/arXiv.2211.14721", "CorpusId": 250340702 }, "url": "https://www.semanticscholar.org/paper/85fafd42bacd2795b7c74e47ba7815cf2070d2b1", "referenceCount": 36, "citationCount": 10, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Zeroth-Order Negative Curvature Finding: Escaping Saddle Points without Gradients", "abstract": "We consider escaping saddle points of nonconvex problems where only the function evaluations can be accessed. Although a variety of works have been proposed, the majority of them require either second or first-order information, and only a few of them have exploited zeroth-order methods, particularly the technique of negative curvature finding with zeroth-order methods which has been proven to be the most efficient method for escaping saddle points. To fill this gap, in this paper, we propose two zeroth-order negative curvature finding frameworks that can replace Hessian-vector product computations without increasing the iteration complexity. We apply the proposed frameworks to ZO-GD, ZO-SGD, ZO-SCSG, ZO-SPIDER and prove that these ZO algorithms can converge to $(\\epsilon,\\delta)$-approximate second-order stationary points with less query complexity compared with prior zeroth-order works for finding local minima.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Hualin Zhang", "Huan Xiong", "Bin Gu" ], "externalIds": { "DBLP": "journals/corr/abs-2210-01496", "ArXiv": "2210.01496", "DOI": "10.48550/arXiv.2210.01496", "CorpusId": 252692992 }, "url": "https://www.semanticscholar.org/paper/0ebc455d8c5ca3c71658b71b237272c764577395", "referenceCount": 56, "citationCount": 6, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Adaptive Evolution Strategies for Stochastic Zeroth-Order Optimization", "abstract": "We consider solving a class of unconstrained optimization problems in which only stochastic estimates of the objective functions are available. Existing stochastic optimization methods are mainly extended from gradient-based methods, faced with the challenges of noisy function evaluations, hardness in choosing step-sizes, and probably ill-conditioned landscapes. This paper presents a stochastic evolution strategy (SES) framework and several adaptation schemes to avoid these challenges. The SES framework combines the ideas of population sampling and minibatch sampling in exploiting the zeroth-order gradient information, efficiently reducing the noise in both data selection and gradient approximation. In addition, it admits approximating the gradients using a non-isotropic Gaussian distribution to better capture the curvature information of the landscapes. Based on this framework, we implement a step-size adaptation rule and two covariance matrix adaptation rules, where the former can automatically tune the step-sizes and the latter are intended to cope with ill-conditioning. For SES with certain fixed step-sizes, we establish a nearly optimal convergence rate over smooth landscapes. We also show that using the adaptive step-sizes allows convergence at a slightly slower rate but without the need to know the smoothness constant. Several numerical experiments on machine learning problems verify the above theoretical results and suggest that the adaptive SES methods show much promise.", "year": 2022, "venue": "IEEE Transactions on Emerging Topics in Computational Intelligence", "authors": [ "Xiaoyu He", "Zibin Zheng", "Zefeng Chen", "Yuren Zhou" ], "externalIds": { "DBLP": "journals/tetci/HeZCZ22", "DOI": "10.1109/TETCI.2022.3146330", "CorpusId": 247044920 }, "url": "https://www.semanticscholar.org/paper/603158382a7577aaf365c8a21a421de4ac351e9f", "referenceCount": 65, "citationCount": 4, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FedPrompt: Communication-Efficient and Privacy-Preserving Prompt Tuning in Federated Learning", "abstract": "Federated learning (FL) has enabled global model training on decentralized data in a privacy-preserving way. However, for tasks that utilize pre-trained language models (PLMs) with massive parameters, there are considerable communication costs. Prompt tuning, which tunes soft prompts without modifying PLMs, has achieved excellent performance as a new learning paradigm. In this paper, we want to combine these methods and explore the effect of prompt tuning under FL. We propose \"FedPrompt\" studying prompt tuning in a model split aggregation way using FL, and prove that split aggregation greatly reduces the communication cost, only 0.01% of the PLMs’ parameters, with little decrease on accuracy both on IID and Non-IID data distribution. We further conduct backdoor attacks by data poisoning on FedPrompt. Experiments show that attack achieve a quite low attack success rate and can not inject backdoor effectively, proving the robustness of FedPrompt.", "year": 2022, "venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", "authors": [ "Haodong Zhao", "Wei Du", "Fang Li", "Peixuan Li", "Gongshen Liu" ], "externalIds": { "DBLP": "conf/icassp/ZhaoDLLL23", "ArXiv": "2208.12268", "DOI": "10.1109/ICASSP49357.2023.10095356", "CorpusId": 252762685 }, "url": "https://www.semanticscholar.org/paper/15abd9759bc65f560abf74eb5bf14ce40a0c7526", "referenceCount": 50, "citationCount": 40, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Communication-Efficient Stochastic Zeroth-Order Optimization for Federated Learning", "abstract": "Federated learning (FL), as an emerging edge artificial intelligence paradigm, enables many edge devices to collaboratively train a global model without sharing their private data. To enhance the training efficiency of FL, various algorithms have been proposed, ranging from first-order to second-order methods. However, these algorithms cannot be applied in scenarios where the gradient information is not available, e.g., federated black-box attack and federated hyperparameter tuning. To address this issue, in this paper we propose a derivative-free federated zeroth-order optimization (FedZO) algorithm featured by performing multiple local updates based on stochastic gradient estimators in each communication round and enabling partial device participation. Under non-convex settings, we derive the convergence performance of the FedZO algorithm on non-independent and identically distributed data and characterize the impact of the numbers of local iterates and participating edge devices on the convergence. To enable communication-efficient FedZO over wireless networks, we further propose an over-the-air computation (AirComp) assisted FedZO algorithm. With an appropriate transceiver design, we show that the convergence of AirComp-assisted FedZO can still be preserved under certain signal-to-noise ratio conditions. Simulation results demonstrate the effectiveness of the FedZO algorithm and validate the theoretical observations.", "year": 2022, "venue": "IEEE Transactions on Signal Processing", "authors": [ "Wenzhi Fang", "Ziyi Yu", "Yuning Jiang", "Yuanming Shi", "C. Jones", "Yong Zhou" ], "externalIds": { "ArXiv": "2201.09531", "DBLP": "journals/corr/abs-2201-09531", "DOI": "10.1109/TSP.2022.3214122", "CorpusId": 246240536 }, "url": "https://www.semanticscholar.org/paper/55b2e2252244c83cb20b3d74ae26200d7cce6f61", "referenceCount": 53, "citationCount": 41, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Tackling System and Statistical Heterogeneity for Federated Learning with Adaptive Client Sampling", "abstract": "Federated learning (FL) algorithms usually sample a fraction of clients in each round (partial participation) when the number of participants is large and the server’s communication bandwidth is limited. Recent works on the convergence analysis of FL have focused on unbiased client sampling, e.g., sampling uniformly at random, which suffers from slow wall-clock time for convergence due to high degrees of system heterogeneity and statistical heterogeneity. This paper aims to design an adaptive client sampling algorithm that tackles both system and statistical heterogeneity to minimize the wall-clock convergence time. We obtain a new tractable convergence bound for FL algorithms with arbitrary client sampling probabilities. Based on the bound, we analytically establish the relationship between the total learning time and sampling probabilities, which results in a non-convex optimization problem for training time minimization. We design an efficient algorithm for learning the unknown parameters in the convergence bound and develop a low-complexity algorithm to approximately solve the non-convex problem. Experimental results from both hardware prototype and simulation demonstrate that our proposed sampling scheme significantly reduces the convergence time compared to several baseline sampling schemes. Notably, our scheme in hardware prototype spends 73% less time than the uniform sampling baseline for reaching the same target loss.", "year": 2021, "venue": "IEEE Conference on Computer Communications", "authors": [ "Bing Luo", "Wenli Xiao", "Shiqiang Wang", "Jianwei Huang", "L. Tassiulas" ], "externalIds": { "ArXiv": "2112.11256", "DBLP": "conf/infocom/LuoXWHT22", "DOI": "10.1109/INFOCOM48880.2022.9796935", "CorpusId": 245353679 }, "url": "https://www.semanticscholar.org/paper/16748bfd9e120dd8c6ebab1b591694ba04b3942c", "referenceCount": 47, "citationCount": 123, "influentialCitationCount": 10, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Adaptive sampling quasi-Newton methods for zeroth-order stochastic optimization", "abstract": null, "year": 2021, "venue": "Mathematical Programming Computation", "authors": [ "Raghu Bollapragada", "Stefan M. Wild" ], "externalIds": { "DBLP": "journals/mpc/BollapragadaW23", "ArXiv": "2109.12213", "DOI": "10.1007/s12532-023-00233-9", "CorpusId": 237941128 }, "url": "https://www.semanticscholar.org/paper/ebebad5d9d7c4d4aa350b20290705a97814ee1ac", "referenceCount": 67, "citationCount": 9, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Self-Guided Evolution Strategies with Historical Estimated Gradients", "abstract": "Evolution Strategies (ES) are a class of black-box optimization algorithms and have been widely applied to solve problems, e.g., in reinforcement learning (RL), where the true gradient is unavailable. ES estimate the gradient of an objective function with respect to the parameters by randomly sampling search directions and evaluating parameter perturbations in these directions. However, the gradient estimator of ES tends to have a high variance for high-dimensional optimization, thus requiring a large number of samples and making ES inefficient. In this paper, we propose a new ES algorithm SGES, which utilizes historical estimated gradients to construct a low-dimensional subspace for sampling search directions, and adjusts the importance of this subspace adaptively. We prove that the variance of the gradient estimator of SGES can be much smaller than that of Vanilla ES; meanwhile, its bias can be well bounded. Empirical results on benchmark black-box functions and a set of popular RL tasks exhibit the superior performance of SGES over state-of-the-art ES algorithms.", "year": 2020, "venue": "International Joint Conference on Artificial Intelligence", "authors": [ "Fei Liu", "Ziniu Li", "Chao Qian" ], "externalIds": { "MAG": "3034606848", "DBLP": "conf/ijcai/LiuLQ20", "DOI": "10.24963/ijcai.2020/205", "CorpusId": 220480934 }, "url": "https://www.semanticscholar.org/paper/ad154aa2b6442d724d1093752143392334548e34", "referenceCount": 28, "citationCount": 21, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Psychology" ] }, { "title": "Adaptive Federated Optimization", "abstract": "Federated learning is a distributed machine learning paradigm in which a large number of clients coordinate with a central server to learn a model without sharing their own training data. Due to the heterogeneity of the client datasets, standard federated optimization methods such as Federated Averaging (FedAvg) are often difficult to tune and exhibit unfavorable convergence behavior. In non-federated settings, adaptive optimization methods have had notable success in combating such issues. In this work, we propose federated versions of adaptive optimizers, including Adagrad, Adam, and Yogi, and analyze their convergence in the presence of heterogeneous data for general nonconvex settings. Our results highlight the interplay between client heterogeneity and communication efficiency. We also perform extensive experiments on these methods and show that the use of adaptive optimizers can significantly improve the performance of federated learning.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Sashank J. Reddi", "Zachary B. Charles", "M. Zaheer", "Zachary Garrett", "Keith Rush", "Jakub Konecný", "Sanjiv Kumar", "H. B. McMahan" ], "externalIds": { "DBLP": "conf/iclr/ReddiCZGRKKM21", "ArXiv": "2003.00295", "MAG": "3008187686", "CorpusId": 211678094 }, "url": "https://www.semanticscholar.org/paper/47c528344fedb6cb67a38e43d095b41c34715330", "referenceCount": 54, "citationCount": 1149, "influentialCitationCount": 234, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Hessian-Aware Zeroth-Order Optimization for Black-Box Adversarial Attack", "abstract": "Zeroth-order optimization is an important research topic in machine learning. In recent years, it has become a key tool in black-box adversarial attack to neural network based image classifiers. However, existing zeroth-order optimization algorithms rarely extract second-order information of the model function. In this paper, we utilize the second-order information of the objective function and propose a novel \\textit{Hessian-aware zeroth-order algorithm} called \\texttt{ZO-HessAware}. Our theoretical result shows that \\texttt{ZO-HessAware} has an improved zeroth-order convergence rate and query complexity under structured Hessian approximation, where we propose a few approximation methods for estimating Hessian. Our empirical studies on the black-box adversarial attack problem validate that our algorithm can achieve improved success rates with a lower query complexity.", "year": 2018, "venue": "arXiv.org", "authors": [ "Haishan Ye", "Zhichao Huang", "Cong Fang", "C. J. Li", "T. Zhang" ], "externalIds": { "MAG": "2907014936", "DBLP": "journals/corr/abs-1812-11377", "ArXiv": "1812.11377", "CorpusId": 57189486 }, "url": "https://www.semanticscholar.org/paper/9f15050231fcdc21deacebb314f39fc133ce2c9e", "referenceCount": 28, "citationCount": 39, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Zeroth-Order Nonconvex Stochastic Optimization: Handling Constraints, High Dimensionality, and Saddle Points", "abstract": null, "year": 2018, "venue": "Foundations of Computational Mathematics", "authors": [ "K. Balasubramanian", "Saeed Ghadimi" ], "externalIds": { "MAG": "3136297071", "DBLP": "journals/focm/Balasubramanian22", "DOI": "10.1007/s10208-021-09499-8", "CorpusId": 58004708 }, "url": "https://www.semanticscholar.org/paper/5dfaeee6b6c0de066c036e4dcc7712ab8b894f83", "referenceCount": 88, "citationCount": 102, "influentialCitationCount": 26, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Guided evolutionary strategies: augmenting random search with surrogate gradients", "abstract": "Many applications in machine learning require optimizing a function whose true gradient is unknown, but where surrogate gradient information (directions that may be correlated with, but not necessarily identical to, the true gradient) is available instead. This arises when an approximate gradient is easier to compute than the full gradient (e.g. in meta-learning or unrolled optimization), or when a true gradient is intractable and is replaced with a surrogate (e.g. in certain reinforcement learning applications, or when using synthetic gradients). We propose Guided Evolutionary Strategies, a method for optimally using surrogate gradient directions along with random search. We define a search distribution for evolutionary strategies that is elongated along a guiding subspace spanned by the surrogate gradients. This allows us to estimate a descent direction which can then be passed to a first-order optimizer. We analytically and numerically characterize the tradeoffs that result from tuning how strongly the search distribution is stretched along the guiding subspace, and we use this to derive a setting of the hyperparameters that works well across problems. Finally, we apply our method to example problems, demonstrating an improvement over both standard evolutionary strategies and first-order methods (that directly follow the surrogate gradient). We provide a demo of Guided ES at this https URL", "year": 2018, "venue": "International Conference on Machine Learning", "authors": [ "Niru Maheswaranathan", "Luke Metz", "G. Tucker", "Dami Choi", "Jascha Narain Sohl-Dickstein" ], "externalIds": { "MAG": "2946774369", "DBLP": "conf/icml/Maheswaranathan19", "CorpusId": 174800475 }, "url": "https://www.semanticscholar.org/paper/72cd0216fb7e9889b61e58776ae1a242f98ab067", "referenceCount": 45, "citationCount": 93, "influentialCitationCount": 15, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Fashion-MNIST: a Novel Image Dataset for Benchmarking Machine Learning Algorithms", "abstract": "We present Fashion-MNIST, a new dataset comprising of 28x28 grayscale images of 70,000 fashion products from 10 categories, with 7,000 images per category. The training set has 60,000 images and the test set has 10,000 images. Fashion-MNIST is intended to serve as a direct drop-in replacement for the original MNIST dataset for benchmarking machine learning algorithms, as it shares the same image size, data format and the structure of training and testing splits. The dataset is freely available at this https URL", "year": 2017, "venue": "arXiv.org", "authors": [ "Han Xiao", "Kashif Rasul", "Roland Vollgraf" ], "externalIds": { "MAG": "2750384547", "DBLP": "journals/corr/abs-1708-07747", "ArXiv": "1708.07747", "CorpusId": 702279 }, "url": "https://www.semanticscholar.org/paper/f9c602cc436a9ea2f9e7db48c77d924e09ce3c32", "referenceCount": 6, "citationCount": 7771, "influentialCitationCount": 1471, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Communication-Efficient Learning of Deep Networks from Decentralized Data", "abstract": "Modern mobile devices have access to a wealth of data suitable for learning models, which in turn can greatly improve the user experience on the device. For example, language models can improve speech recognition and text entry, and image models can automatically select good photos. However, this rich data is often privacy sensitive, large in quantity, or both, which may preclude logging to the data center and training there using conventional approaches. We advocate an alternative that leaves the training data distributed on the mobile devices, and learns a shared model by aggregating locally-computed updates. We term this decentralized approach Federated Learning. \nWe present a practical method for the federated learning of deep networks based on iterative model averaging, and conduct an extensive empirical evaluation, considering five different model architectures and four datasets. These experiments demonstrate the approach is robust to the unbalanced and non-IID data distributions that are a defining characteristic of this setting. Communication costs are the principal constraint, and we show a reduction in required communication rounds by 10-100x as compared to synchronized stochastic gradient descent.", "year": 2016, "venue": "International Conference on Artificial Intelligence and Statistics", "authors": [ "H. B. McMahan", "Eider Moore", "Daniel Ramage", "S. Hampson", "B. A. Y. Arcas" ], "externalIds": { "MAG": "2950745363", "DBLP": "conf/aistats/McMahanMRHA17", "ArXiv": "1602.05629", "CorpusId": 14955348 }, "url": "https://www.semanticscholar.org/paper/d1dbf643447405984eeef098b1b320dee0b3b8a7", "referenceCount": 50, "citationCount": 13806, "influentialCitationCount": 3346, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Random Gradient-Free Minimization of Convex Functions", "abstract": null, "year": 2015, "venue": "Foundations of Computational Mathematics", "authors": [ "Y. Nesterov", "V. Spokoiny" ], "externalIds": { "MAG": "2149479912", "DBLP": "journals/focm/NesterovS17", "DOI": "10.1007/s10208-015-9296-2", "CorpusId": 2147817 }, "url": "https://www.semanticscholar.org/paper/21a0b0fbdde1aee56fe10e69e897decaf21f43a6", "referenceCount": 60, "citationCount": 969, "influentialCitationCount": 229, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Stochastic First- and Zeroth-Order Methods for Nonconvex Stochastic Programming", "abstract": "In this paper, we introduce a new stochastic approximation (SA) type algorithm, namely the randomized stochastic gradient (RSG) method, for solving an important class of nonlinear (possibly nonconvex) stochastic programming (SP) problems. We establish the complexity of this method for computing an approximate stationary point of a nonlinear programming problem. We also show that this method possesses a nearly optimal rate of convergence if the problem is convex. We discuss a variant of the algorithm which consists of applying a post-optimization phase to evaluate a short list of solutions generated by several independent runs of the RSG method, and show that such modification allows to improve significantly the large-deviation properties of the algorithm. These methods are then specialized for solving a class of simulation-based optimization problems in which only stochastic zeroth-order information is available.", "year": 2013, "venue": "SIAM Journal on Optimization", "authors": [ "Saeed Ghadimi", "Guanghui Lan" ], "externalIds": { "MAG": "2963470657", "ArXiv": "1309.5549", "DBLP": "journals/corr/GhadimiL13", "DOI": "10.1137/120880811", "CorpusId": 14112046 }, "url": "https://www.semanticscholar.org/paper/8424a9e5a4456a2c45a42e392b9c01cd0c5c9467", "referenceCount": 41, "citationCount": 1384, "influentialCitationCount": 235, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "The MNIST Database of Handwritten Digit Images for Machine Learning Research [Best of the Web]", "abstract": "In this issue, “Best of the Web” presents the modified National Institute of Standards and Technology (MNIST) resources, consisting of a collection of handwritten digit images used extensively in optical character recognition and machine learning research.", "year": 2012, "venue": "IEEE Signal Processing Magazine", "authors": [ "L. Deng" ], "externalIds": { "MAG": "2007339694", "DBLP": "journals/spm/Deng12", "DOI": "10.1109/MSP.2012.2211477", "CorpusId": 5280072 }, "url": "https://www.semanticscholar.org/paper/46f74231b9afeb0c290d6d550043c55045284e5f", "referenceCount": 7, "citationCount": 3704, "influentialCitationCount": 561, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "RCV1: A New Benchmark Collection for Text Categorization Research", "abstract": "Reuters Corpus Volume I (RCV1) is an archive of over 800,000 manually categorized newswire stories recently made available by Reuters, Ltd. for research purposes. Use of this data for research on text categorization requires a detailed understanding of the real world constraints under which the data was produced. Drawing on interviews with Reuters personnel and access to Reuters documentation, we describe the coding policy and quality control procedures used in producing the RCV1 data, the intended semantics of the hierarchical category taxonomies, and the corrections necessary to remove errorful data. We refer to the original data as RCV1-v1, and the corrected data as RCV1-v2. We benchmark several widely used supervised learning methods on RCV1-v2, illustrating the collection's properties, suggesting new directions for research, and providing baseline results for future studies. We make available detailed, per-category experimental results, as well as corrected versions of the category assignments and taxonomy structures, via online appendices.", "year": 2004, "venue": "Journal of machine learning research", "authors": [ "D. Lewis", "Yiming Yang", "T. Rose", "Fan Li" ], "externalIds": { "MAG": "2150102617", "DBLP": "journals/jmlr/LewisYRL04", "CorpusId": 11027141 }, "url": "https://www.semanticscholar.org/paper/2abe6b9ea1b13653b7384e9c8ef14b0d87e20cfc", "referenceCount": 51, "citationCount": 2989, "influentialCitationCount": 373, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Zeroth-order methods for nondifferentiable, nonconvex, and hierarchical federated optimization", "abstract": null, "year": null, "venue": "Advances in Neural Information Processing Systems", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "FedRepOpt: Gradient Re-parametrized Optimizers in Federated Learning": { "paper_title": "FedRepOpt: Gradient Re-parametrized Optimizers in Federated Learning", "arxiv_id": "2409.15898v3", "keyword": "federate learning", "authors": [ "Kin Wai Lau", "Yasar Abbas Ur Rehman", "Pedro Porto Buarque de Gusmão", "Lai-Man Po", "Lan Ma", "Yuyang Xie" ], "references": [ { "title": "L-DAWA: Layer-wise Divergence Aware Weight Aggregation in Federated Self-Supervised Visual Representation Learning", "abstract": "The ubiquity of camera-enabled devices has led to large amounts of unlabeled image data being produced at the edge. The integration of self-supervised learning (SSL) and federated learning (FL) into one coherent system can potentially offer data privacy guarantees while also advancing the quality and robustness of the learned visual representations without needing to move data around. However, client bias and divergence during FL aggregation caused by data heterogeneity limits the performance of learned visual representations on downstream tasks. In this paper, we propose a new aggregation strategy termed Layer-wise Divergence Aware Weight Aggregation (L-DAWA) to mitigate the influence of client bias and divergence during FL aggregation. The proposed method aggregates weights at the layer-level according to the measure of angular divergence between the clients’ model and the global model. Extensive experiments with cross-silo and cross-device settings on CIFAR-10/100 and Tiny ImageNet datasets demonstrate that our methods are effective and obtain new SOTA performance on both contrastive and non-contrastive SSL approaches.", "year": 2023, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Yasar Abbas Ur Rehman", "Yan Gao", "Pedro Gusmão", "Mina Alibeigi", "Jiajun Shen", "Nicholas D. Lane" ], "externalIds": { "DBLP": "conf/iccv/RehmanGGASL23", "ArXiv": "2307.07393", "DOI": "10.1109/ICCV51070.2023.01509", "CorpusId": 259924538 }, "url": "https://www.semanticscholar.org/paper/0b38470d17625bb5342d6c4d3fb43d914ed33a2d", "referenceCount": 55, "citationCount": 9, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Why Batch Normalization Damage Federated Learning on Non-IID Data?", "abstract": "As a promising distributed learning paradigm, federated learning (FL) involves training deep neural network (DNN) models at the network edge while protecting the privacy of the edge clients. To train a large-scale DNN model, batch normalization (BN) has been regarded as a simple and effective means to accelerate the training and improve the generalization capability. However, recent findings indicate that BN can significantly impair the performance of FL in the presence of non-i.i.d. data. While several FL algorithms have been proposed to address this issue, their performance still falls significantly when compared to the centralized scheme. Furthermore, none of them have provided a theoretical explanation of how the BN damages the FL convergence. In this article, we present the first convergence analysis to show that under the non-i.i.d. data, the mismatch between the local and global statistical parameters in BN causes the gradient deviation between the local and global models, which, as a result, slows down and biases the FL convergence. In view of this, we develop a new FL algorithm that is tailored to BN, called FedTAN, which is capable of achieving robust FL performance under a variety of data distributions via iterative layer-wise parameter aggregation. Comprehensive experimental results demonstrate the superiority of the proposed FedTAN over existing baselines for training BN-based DNN models.", "year": 2023, "venue": "IEEE Transactions on Neural Networks and Learning Systems", "authors": [ "Yanmeng Wang", "Qingjiang Shi", "Tsung-Hui Chang" ], "externalIds": { "ArXiv": "2301.02982", "DBLP": "journals/corr/abs-2301-02982", "DOI": "10.48550/arXiv.2301.02982", "CorpusId": 255546430, "PubMed": "37910415" }, "url": "https://www.semanticscholar.org/paper/237c7ced7aa9e139f492752df374324c2908ab18", "referenceCount": 43, "citationCount": 21, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "RepGhost: A Hardware-Efficient Ghost Module via Re-parameterization", "abstract": "Feature reuse has been a key technique in light-weight convolutional neural networks (CNNs) architecture design. Current methods usually utilize a concatenation operator to keep large channel numbers cheaply (thus large network capacity) by reusing feature maps from other layers. Although concatenation is parameters- and FLOPs-free, its computational cost on hardware devices is non-negligible. To address this, this paper provides a new perspective to realize feature reuse implicitly and more efficiently instead of concatenation. A novel hardware-efficient RepGhost module is proposed for implicit feature reuse via reparameterization, instead of using concatenation operator. Based on the RepGhost module, we develop our efficient RepGhost bottleneck and RepGhostNet. Experiments on ImageNet and COCO benchmarks demonstrate that our RepGhostNet is much more effective and efficient than GhostNet and MobileNetV3 on mobile devices. Specially, our RepGhostNet surpasses GhostNet 0.5x by 2.5% Top-1 accuracy on ImageNet dataset with less parameters and comparable latency on an ARM-based mobile device. Code and model weights are available at https://github.com/ChengpengChen/RepGhost.", "year": 2022, "venue": "arXiv.org", "authors": [ "Chengpeng Chen", "Zichao Guo", "Haien Zeng", "Pengfei Xiong", "Jian Dong" ], "externalIds": { "DBLP": "journals/corr/abs-2211-06088", "ArXiv": "2211.06088", "DOI": "10.48550/arXiv.2211.06088", "CorpusId": 253499097 }, "url": "https://www.semanticscholar.org/paper/d8d754d93d4a4fcc62838429fd36f795cb8f5d98", "referenceCount": 55, "citationCount": 21, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Re-parameterizing Your Optimizers rather than Architectures", "abstract": "The well-designed structures in neural networks reflect the prior knowledge incorporated into the models. However, though different models have various priors, we are used to training them with model-agnostic optimizers such as SGD. In this paper, we propose to incorporate model-specific prior knowledge into optimizers by modifying the gradients according to a set of model-specific hyper-parameters. Such a methodology is referred to as Gradient Re-parameterization, and the optimizers are named RepOptimizers. For the extreme simplicity of model structure, we focus on a VGG-style plain model and showcase that such a simple model trained with a RepOptimizer, which is referred to as RepOpt-VGG, performs on par with or better than the recent well-designed models. From a practical perspective, RepOpt-VGG is a favorable base model because of its simple structure, high inference speed and training efficiency. Compared to Structural Re-parameterization, which adds priors into models via constructing extra training-time structures, RepOptimizers require no extra forward/backward computations and solve the problem of quantization. We hope to spark further research beyond the realms of model structure design. Code and models \\url{https://github.com/DingXiaoH/RepOptimizers}.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Xiaohan Ding", "Honghao Chen", "X. Zhang", "Kaiqiang Huang", "Jungong Han", "Guiguang Ding" ], "externalIds": { "DBLP": "journals/corr/abs-2205-15242", "ArXiv": "2205.15242", "DOI": "10.48550/arXiv.2205.15242", "CorpusId": 249191923 }, "url": "https://www.semanticscholar.org/paper/b65de6b99535e9c3b07fd672b363d4496306eafb", "referenceCount": 61, "citationCount": 43, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Federated Learning Based on Dynamic Regularization", "abstract": "We propose a novel federated learning method for distributively training neural network models, where the server orchestrates cooperation between a subset of randomly chosen devices in each round. We view Federated Learning problem primarily from a communication perspective and allow more device level computations to save transmission costs. We point out a fundamental dilemma, in that the minima of the local-device level empirical loss are inconsistent with those of the global empirical loss. Different from recent prior works, that either attempt inexact minimization or utilize devices for parallelizing gradient computation, we propose a dynamic regularizer for each device at each round, so that in the limit the global and device solutions are aligned. We demonstrate both through empirical results on real and synthetic data as well as analytical results that our scheme leads to efficient training, in both convex and non-convex settings, while being fully agnostic to device heterogeneity and robust to large number of devices, partial participation and unbalanced data.", "year": 2021, "venue": "International Conference on Learning Representations", "authors": [ "D. A. E. Acar", "Yue Zhao", "Ramon Matas Navarro", "Matthew Mattina", "P. Whatmough", "Venkatesh Saligrama" ], "externalIds": { "DBLP": "journals/corr/abs-2111-04263", "ArXiv": "2111.04263", "CorpusId": 235614315 }, "url": "https://www.semanticscholar.org/paper/5a3d70689925df014c46d1cd50dfc8a368cb4c86", "referenceCount": 42, "citationCount": 594, "influentialCitationCount": 110, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Federated Learning for Open Banking", "abstract": null, "year": 2021, "venue": "Federated Learning", "authors": [ "Guodong Long", "Yue Tan", "Jing Jiang", "Chengqi Zhang" ], "externalIds": { "DBLP": "journals/corr/abs-2108-10749", "ArXiv": "2108.10749", "MAG": "3107100345", "DOI": "10.1007/978-3-030-63076-8_17", "CorpusId": 227232157 }, "url": "https://www.semanticscholar.org/paper/ec0a52bc9923275f7436b38fbc8c79f10cb83cf2", "referenceCount": 43, "citationCount": 203, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Collaborative Unsupervised Visual Representation Learning from Decentralized Data", "abstract": "Unsupervised representation learning has achieved outstanding performances using centralized data available on the Internet. However, the increasing awareness of privacy protection limits sharing of decentralized unlabeled image data that grows explosively in multiple parties (e.g., mobile phones and cameras). As such, a natural problem is how to leverage these data to learn visual representations for downstream tasks while preserving data privacy. To address this problem, we propose a novel federated unsupervised learning framework, FedU. In this framework, each party trains models from unlabeled data independently using contrastive learning with an online network and a target network. Then, a central server aggregates trained models and updates clients’ models with the aggregated model. It preserves data privacy as each party only has access to its raw data. Decentralized data among multiple parties are normally non-independent and identically distributed (non-IID), leading to performance degradation. To tackle this challenge, we propose two simple but effective methods: 1) We design the communication protocol to upload only the encoders of online networks for server aggregation and update them with the aggregated encoder; 2) We introduce a new module to dynamically decide how to update predictors based on the divergence caused by non-IID. The predictor is the other component of the online network. Extensive experiments and ablations demonstrate the effectiveness and significance of FedU. It outperforms training with only one party by over 5% and other methods by over 14% in linear and semi-supervised evaluation on non-IID data.", "year": 2021, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Weiming Zhuang", "Xin Gan", "Yonggang Wen", "Shuai Zhang", "Shuai Yi" ], "externalIds": { "ArXiv": "2108.06492", "DBLP": "conf/iccv/ZhuangG0ZY21", "DOI": "10.1109/ICCV48922.2021.00487", "CorpusId": 237091191 }, "url": "https://www.semanticscholar.org/paper/104ce1a96bf780ba4ffc44faf6935be4ec9e6ba8", "referenceCount": 44, "citationCount": 75, "influentialCitationCount": 21, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Communication-Efficient Federated Learning with Adaptive Parameter Freezing", "abstract": "Federated learning allows edge devices to collaboratively train a global model by synchronizing their local updates without sharing private data. Yet, with limited network bandwidth at the edge, communication often becomes a severe bottleneck. In this paper, we find that it is unnecessary to always synchronize the full model in the entire training process, because many parameters gradually stabilize prior to the ultimate model convergence, and can thus be excluded from being synchronized at an early stage. This allows us to reduce the communication overhead without compromising the model accuracy. However, challenges are that the local parameters excluded from global synchronization may diverge on different clients, and meanwhile some parameters may stabilize only temporally. To address these challenges, we propose a novel scheme called Adaptive Parameter Freezing (APF), which fixes (freezes) the non-synchronized stable parameters in intermittent periods. Specifically, the freezing periods are tentatively adjusted in an additively-increase and multiplicatively-decrease manner, depending on if the previously-frozen parameters remain stable in subsequent iterations. We implemented APF as a Python module in PyTorch. Our extensive array of experimental results show that APF can reduce data transfer by over 60%.", "year": 2021, "venue": "IEEE International Conference on Distributed Computing Systems", "authors": [ "Chen Chen", "Hongao Xu", "Wei Wang", "Baochun Li", "Bo Li", "Li Chen", "Gong Zhang" ], "externalIds": { "DBLP": "conf/icdcs/ChenXWLLCZ21", "DOI": "10.1109/ICDCS51616.2021.00010", "CorpusId": 234101214 }, "url": "https://www.semanticscholar.org/paper/d79e8947f481c8ad5703611964a9f6c9bddce551", "referenceCount": 43, "citationCount": 49, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Privacy-Preserved Federated Learning for Autonomous Driving", "abstract": "In recent years, the privacy issue in Vehicular Edge Computing (VEC) has gained a lot of concern. The privacy problem is even more severe in autonomous driving business than the other businesses in VEC such as ordinary navigation. Federated learning (FL), which is a privacy-preserved strategy proposed by Google, has become a hot trend to solve the privacy problem in many fields including VEC. Therefore, we introduce FL into autonomous driving to preserve vehicular privacy by keeping original data in a local vehicle and sharing the training model parameter only with the help of MEC server. Moreover, different from the common assumption of honest MEC server and honest vehicle in former studies, we take the malicious MEC servers and malicious vehicles into account. First, we consider honest-but-curious MEC server and malicious vehicles and propose a traceable identity-based privacy preserving scheme to protect the vehicular message privacy where improved Dijk-Gentry-Halevi-Vaikutanathan (DGHV) algorithm is proposed and a blockchain-based Reputation-based Incentive Autonomous Driving Mechanism (RIADM) is adopted. Further, when the case comes to the non-credibility of both parties where semi-honest MEC server and malicious vehicles are considered, we propose an anonymous identity-based privacy preserving scheme to protect the identity privacy of vehicles with Zero-Knowledge Proof (ZKP). Based on the simulation of virtual autonomous driving based on real-world road images, it is verified that our proposes scheme can reduce 73.7 % training loss of autonomous driving, increase the accuracy to around 5.55 % while keeps effective privacy of message and identity under the threat of dishonest MEC server and vehicles.", "year": 2021, "venue": "IEEE transactions on intelligent transportation systems (Print)", "authors": [ "Yijing Li", "Xiaofeng Tao", "Xuefei Zhang", "Junjie Liu", "Jin Xu" ], "externalIds": { "MAG": "3175663678", "DBLP": "journals/tits/LiTZL022", "DOI": "10.1109/tits.2021.3081560", "CorpusId": 237925933 }, "url": "https://www.semanticscholar.org/paper/fecfd8fe8b6ad03ac81467a838d8878ee650de48", "referenceCount": 0, "citationCount": 112, "influentialCitationCount": 6, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "End-to-End Speech Recognition from Federated Acoustic Models", "abstract": "Training Automatic Speech Recognition (ASR) models under federated learning (FL) settings has attracted a lot of attention recently. However, the FL scenarios often presented in the literature are artificial and fail to capture the complexity of real FL systems. In this paper, we construct a challenging and realistic ASR federated experimental setup consisting of clients with heterogeneous data distributions using the French and Italian sets of the CommonVoice dataset, a large heterogeneous dataset containing thousands of different speakers, acoustic environments and noises. We present the first empirical study on an attention-based sequence-to-sequence End-to-End (E2E) ASR model with three aggregation weighting strategies – standard FedAvg, loss-based aggregation and a novel word error rate (WER)-based aggregation, compared in two realistic FL scenarios: cross-silo with 10 clients and cross-device with 2K and 4K clients. This 4K cross-device ASR experiment is the largest ever performed. Our first-of-its-kind analysis on E2E ASR from heterogeneous and realistic federated acoustic models provides the foundations for future research and development of realistic FL ASR applications.", "year": 2021, "venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", "authors": [ "Yan Gao", "Titouan Parcollet", "Javier Fernández-Marqués", "P. P. B. D. Gusmão", "Daniel J. Beutel", "N. Lane" ], "externalIds": { "DBLP": "conf/icassp/GaoPZFGBL22", "ArXiv": "2104.14297", "DOI": "10.1109/icassp43922.2022.9747161", "CorpusId": 233444129 }, "url": "https://www.semanticscholar.org/paper/389a4e99384ca820dc71741572be425f4b7fb021", "referenceCount": 39, "citationCount": 39, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Federated Learning for Internet of Things: A Comprehensive Survey", "abstract": "The Internet of Things (IoT) is penetrating many facets of our daily life with the proliferation of intelligent services and applications empowered by artificial intelligence (AI). Traditionally, AI techniques require centralized data collection and processing that may not be feasible in realistic application scenarios due to the high scalability of modern IoT networks and growing data privacy concerns. Federated Learning (FL) has emerged as a distributed collaborative AI approach that can enable many intelligent IoT applications, by allowing for AI training at distributed IoT devices without the need for data sharing. In this article, we provide a comprehensive survey of the emerging applications of FL in IoT networks, beginning from an introduction to the recent advances in FL and IoT to a discussion of their integration. Particularly, we explore and analyze the potential of FL for enabling a wide range of IoT services, including IoT data sharing, data offloading and caching, attack detection, localization, mobile crowdsensing, and IoT privacy and security. We then provide an extensive survey of the use of FL in various key IoT applications such as smart healthcare, smart transportation, Unmanned Aerial Vehicles (UAVs), smart cities, and smart industry. The important lessons learned from this review of the FL-IoT services and applications are also highlighted. We complete this survey by highlighting the current challenges and possible directions for future research in this booming area.", "year": 2021, "venue": "IEEE Communications Surveys and Tutorials", "authors": [ "Dinh C. Nguyen", "Ming Ding", "P. Pathirana", "A. Seneviratne", "Jun Li", "F. I. H. Vincent Poor" ], "externalIds": { "ArXiv": "2104.07914", "DBLP": "journals/comsur/NguyenDPSLP21", "DOI": "10.1109/COMST.2021.3075439", "CorpusId": 233289549 }, "url": "https://www.semanticscholar.org/paper/b408358a5d2300e1fc6cc1a58a18d45a2b75420d", "referenceCount": 212, "citationCount": 603, "influentialCitationCount": 23, "isOpenAccess": true, "fieldsOfStudy": [ "Engineering", "Computer Science" ] }, { "title": "Diverse Branch Block: Building a Convolution as an Inception-like Unit", "abstract": "We propose a universal building block of Convolutional Neural Network (ConvNet) to improve the performance without any inference-time costs. The block is named Diverse Branch Block (DBB), which enhances the representational capacity of a single convolution by combining diverse branches of different scales and complexities to enrich the feature space, including sequences of convolutions, multiscale convolutions, and average pooling. After training, a DBB can be equivalently converted into a single conv layer for deployment. Unlike the advancements of novel ConvNet architectures, DBB complicates the training-time microstructure while maintaining the macro architecture, so that it can be used as a drop-in replacement for regular conv layers of any architecture. In this way, the model can be trained to reach a higher level of performance and then transformed into the original inference-time structure for inference. DBB improves ConvNets on image classification (up to 1.9% higher top-1 accuracy on ImageNet), object detection and semantic segmentation. The PyTorch code and models are released at https://github.com/DingXiaoH/DiverseBranchBlock.", "year": 2021, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Xiaohan Ding", "X. Zhang", "Jungong Han", "Guiguang Ding" ], "externalIds": { "DBLP": "conf/cvpr/Ding0HD21", "ArXiv": "2103.13425", "DOI": "10.1109/CVPR46437.2021.01074", "CorpusId": 232352703 }, "url": "https://www.semanticscholar.org/paper/1f34aee634327c6d4f13f9018c34df4d3239ecb6", "referenceCount": 39, "citationCount": 200, "influentialCitationCount": 22, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FedBN: Federated Learning on Non-IID Features via Local Batch Normalization", "abstract": "The emerging paradigm of federated learning (FL) strives to enable collaborative training of deep models on the network edge without centrally aggregating raw data and hence improving data privacy. In most cases, the assumption of independent and identically distributed samples across local clients does not hold for federated learning setups. Under this setting, neural network training performance may vary significantly according to the data distribution and even hurt training convergence. Most of the previous work has focused on a difference in the distribution of labels or client shifts. Unlike those settings, we address an important problem of FL, e.g., different scanners/sensors in medical imaging, different scenery distribution in autonomous driving (highway vs. city), where local clients store examples with different distributions compared to other clients, which we denote as feature shift non-iid. In this work, we propose an effective method that uses local batch normalization to alleviate the feature shift before averaging models. The resulting scheme, called FedBN, outperforms both classical FedAvg, as well as the state-of-the-art for non-iid data (FedProx) on our extensive experiments. These empirical results are supported by a convergence analysis that shows in a simplified setting that FedBN has a faster convergence rate than FedAvg. Code is available at https://github.com/med-air/FedBN.", "year": 2021, "venue": "International Conference on Learning Representations", "authors": [ "Xiaoxiao Li", "Meirui Jiang", "Xiaofei Zhang", "Michael Kamp", "Q. Dou" ], "externalIds": { "DBLP": "journals/corr/abs-2102-07623", "ArXiv": "2102.07623", "CorpusId": 231924480 }, "url": "https://www.semanticscholar.org/paper/2c0f4711c9c124a8dc056eaee82a2ca5ef276da8", "referenceCount": 51, "citationCount": 636, "influentialCitationCount": 111, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Collaborative Federated Learning for Healthcare: Multi-Modal COVID-19 Diagnosis at the Edge", "abstract": "Despite significant improvements over the last few years, cloud-based healthcare applications continue to suffer from poor adoption due to their limitations in meeting stringent security, privacy, and quality of service requirements (such as low latency). The edge computing trend, along with techniques for distributed machine learning such as federated learning, has gained popularity as a viable solution in such settings. In this paper, we leverage the capabilities of edge computing in medicine by evaluating the potential of intelligent processing of clinical data at the edge. We utilized the emerging concept of clustered federated learning (CFL) for an automatic COVID-19 diagnosis. We evaluate the performance of the proposed framework under different experimental setups on two benchmark datasets. Promising results are obtained on both datasets resulting in comparable results against the central baseline where the specialized models (i.e., each on a specific image modality) are trained with central data, and improvements of 16% and 11% in overall F1-Scores have been achieved over the trained model trained (using multi-modal COVID-19 data) in the CFL setup on X-ray and Ultrasound datasets, respectively. We also discussed the associated challenges, technologies, and techniques available for deploying ML at the edge in such privacy and delay-sensitive applications.", "year": 2021, "venue": "IEEE Open Journal of the Computer Society", "authors": [ "A. Qayyum", "Kashif Ahmad", "Muhammad Ahtazaz Ahsan", "Ala Al-Fuqaha", "Junaid Qadir" ], "externalIds": { "ArXiv": "2101.07511", "DBLP": "journals/ojcomps/QayyumAAA022", "DOI": "10.1109/OJCS.2022.3206407", "CorpusId": 231639122 }, "url": "https://www.semanticscholar.org/paper/9f965d15beb475ca28b2987d0260edec5f5ef6a1", "referenceCount": 75, "citationCount": 147, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "RepVGG: Making VGG-style ConvNets Great Again", "abstract": "We present a simple but powerful architecture of convolutional neural network, which has a VGG-like inference-time body composed of nothing but a stack of 3 × 3 convolution and ReLU, while the training-time model has a multi-branch topology. Such decoupling of the training-time and inference-time architecture is realized by a structural re-parameterization technique so that the model is named RepVGG. On ImageNet, RepVGG reaches over 80% top-1 accuracy, which is the first time for a plain model, to the best of our knowledge. On NVIDIA 1080Ti GPU, RepVGG models run 83% faster than ResNet-50 or 101% faster than ResNet-101 with higher accuracy and show favorable accuracy-speed trade-off compared to the state-of-the-art models like EfficientNet and RegNet. The code and trained models are available at https://github.com/megvii-model/RepVGG.", "year": 2021, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Xiaohan Ding", "X. Zhang", "Ningning Ma", "Jungong Han", "Guiguang Ding", "Jian Sun" ], "externalIds": { "ArXiv": "2101.03697", "DBLP": "journals/corr/abs-2101-03697", "MAG": "3120184084", "DOI": "10.1109/CVPR46437.2021.01352", "CorpusId": 231572790 }, "url": "https://www.semanticscholar.org/paper/2b8088253e2378fce001a090fe923b81e8dedf25", "referenceCount": 46, "citationCount": 1189, "influentialCitationCount": 164, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Flower: A Friendly Federated Learning Research Framework", "abstract": "Federated Learning (FL) has emerged as a promising technique for edge devices to collaboratively learn a shared prediction model, while keeping their training data on the device, thereby decoupling the ability to do machine learning from the need to store the data in the cloud. However, FL is difficult to implement realistically, both in terms of scale and systems heterogeneity. Although there are a number of research frameworks available to simulate FL algorithms, they do not support the study of scalable FL workloads on heterogeneous edge devices. In this paper, we present Flower -- a comprehensive FL framework that distinguishes itself from existing platforms by offering new facilities to execute large-scale FL experiments and consider richly heterogeneous FL device scenarios. Our experiments show Flower can perform FL experiments up to 15M in client size using only a pair of high-end GPUs. Researchers can then seamlessly migrate experiments to real devices to examine other parts of the design space. We believe Flower provides the community with a critical new tool for FL study and development.", "year": 2020, "venue": "", "authors": [ "Daniel J. Beutel", "Taner Topal", "Akhil Mathur", "Xinchi Qiu", "Titouan Parcollet", "N. Lane" ], "externalIds": { "ArXiv": "2007.14390", "CorpusId": 220831008 }, "url": "https://www.semanticscholar.org/paper/a199a03e11b68c4132be880b5fcabc57251bc477", "referenceCount": 46, "citationCount": 579, "influentialCitationCount": 55, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Federated Learning in the Sky: Aerial-Ground Air Quality Sensing Framework With UAV Swarms", "abstract": "Due to air quality significantly affects human health, it is becoming increasingly important to accurately and timely predict the air quality index (AQI). To this end, this article proposes a new federated learning (FL)-based aerial-ground air quality sensing framework for fine-grained 3-D air quality monitoring and forecasting. Specifically, in the air, this framework leverages a lightweight Dense-MobileNet model to achieve energy-efficient end-to-end learning from haze features of haze images taken by unmanned aerial vehicles (UAVs) for predicting AQI scale distribution. Furthermore, the FL framework not only allows various organizations or institutions to collaboratively learn a well-trained global model to monitor AQI without compromising privacy but also expands the scope of UAV swarms monitoring. For ground sensing systems, we propose a graph convolutional neural network-based long short-term memory (GC-LSTM) model to achieve accurate, real time, and future AQI inference. The GC-LSTM model utilizes the topological structure of the ground monitoring station to capture the spatiotemporal correlation of historical observation data, which helps the aerial-ground sensing system to achieve accurate AQI inference. Through extensive case studies on a real-world data set, numerical results show that the proposed framework can achieve accurate and energy-efficient AQI sensing without compromising the privacy of raw data.", "year": 2020, "venue": "IEEE Internet of Things Journal", "authors": [ "Yi Liu", "Jiangtian Nie", "Xuandi Li", "Syed Hassan Ahmed", "Wei Yang Bryan Lim", "C. Miao" ], "externalIds": { "DBLP": "journals/iotj/LiuNLALM21", "ArXiv": "2007.12004", "MAG": "3044274646", "DOI": "10.1109/JIOT.2020.3021006", "CorpusId": 220714028 }, "url": "https://www.semanticscholar.org/paper/6a2fdc12f45b101c10716c23cdd4fb703091bd2c", "referenceCount": 53, "citationCount": 122, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Engineering", "Computer Science" ] }, { "title": "Adaptive Federated Optimization", "abstract": "Federated learning is a distributed machine learning paradigm in which a large number of clients coordinate with a central server to learn a model without sharing their own training data. Due to the heterogeneity of the client datasets, standard federated optimization methods such as Federated Averaging (FedAvg) are often difficult to tune and exhibit unfavorable convergence behavior. In non-federated settings, adaptive optimization methods have had notable success in combating such issues. In this work, we propose federated versions of adaptive optimizers, including Adagrad, Adam, and Yogi, and analyze their convergence in the presence of heterogeneous data for general nonconvex settings. Our results highlight the interplay between client heterogeneity and communication efficiency. We also perform extensive experiments on these methods and show that the use of adaptive optimizers can significantly improve the performance of federated learning.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Sashank J. Reddi", "Zachary B. Charles", "M. Zaheer", "Zachary Garrett", "Keith Rush", "Jakub Konecný", "Sanjiv Kumar", "H. B. McMahan" ], "externalIds": { "DBLP": "conf/iclr/ReddiCZGRKKM21", "ArXiv": "2003.00295", "MAG": "3008187686", "CorpusId": 211678094 }, "url": "https://www.semanticscholar.org/paper/47c528344fedb6cb67a38e43d095b41c34715330", "referenceCount": 54, "citationCount": 1149, "influentialCitationCount": 234, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Federated Learning with Matched Averaging", "abstract": "Federated learning allows edge devices to collaboratively learn a shared model while keeping the training data on device, decoupling the ability to do model training from the need to store the data in the cloud. We propose Federated matched averaging (FedMA) algorithm designed for federated learning of modern neural network architectures e.g. convolutional neural networks (CNNs) and LSTMs. FedMA constructs the shared global model in a layer-wise manner by matching and averaging hidden elements (i.e. channels for convolution layers; hidden states for LSTM; neurons for fully connected layers) with similar feature extraction signatures. Our experiments indicate that FedMA outperforms popular state-of-the-art federated learning algorithms on deep CNN and LSTM architectures trained on real world datasets, while improving the communication efficiency.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Hongyi Wang", "M. Yurochkin", "Yuekai Sun", "Dimitris Papailiopoulos", "Y. Khazaeni" ], "externalIds": { "DBLP": "journals/corr/abs-2002-06440", "MAG": "3006017224", "ArXiv": "2002.06440", "CorpusId": 211132598 }, "url": "https://www.semanticscholar.org/paper/998620638cfc7c6b5d7b95fa8645f75723d78372", "referenceCount": 29, "citationCount": 933, "influentialCitationCount": 88, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "SCAFFOLD: Stochastic Controlled Averaging for Federated Learning", "abstract": "Federated Averaging (FedAvg) has emerged as the algorithm of choice for federated learning due to its simplicity and low communication cost. However, in spite of recent research efforts, its performance is not fully understood. We obtain tight convergence rates for FedAvg and prove that it suffers from `client-drift' when the data is heterogeneous (non-iid), resulting in unstable and slow convergence. \nAs a solution, we propose a new algorithm (SCAFFOLD) which uses control variates (variance reduction) to correct for the `client-drift' in its local updates. We prove that SCAFFOLD requires significantly fewer communication rounds and is not affected by data heterogeneity or client sampling. Further, we show that (for quadratics) SCAFFOLD can take advantage of similarity in the client's data yielding even faster convergence. The latter is the first result to quantify the usefulness of local-steps in distributed optimization.", "year": 2019, "venue": "International Conference on Machine Learning", "authors": [ "Sai Praneeth Karimireddy", "Satyen Kale", "M. Mohri", "Sashank J. Reddi", "Sebastian U. Stich", "A. Suresh" ], "externalIds": { "MAG": "3006555759", "DBLP": "conf/icml/KarimireddyKMRS20", "CorpusId": 214069261 }, "url": "https://www.semanticscholar.org/paper/fc7b1823bd8b59a590d0bc33bd7a145518fd71c5", "referenceCount": 80, "citationCount": 2033, "influentialCitationCount": 374, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Accelerating Federated Learning via Momentum Gradient Descent", "abstract": "Federated learning (FL) provides a communication-efficient approach to solve machine learning problems concerning distributed data, without sending raw data to a central server. However, existing works on FL only utilize first-order gradient descent (GD) and do not consider the preceding iterations to gradient update which can potentially accelerate convergence. In this article, we consider momentum term which relates to the last iteration. The proposed momentum federated learning (MFL) uses momentum gradient descent (MGD) in the local update step of FL system. We establish global convergence properties of MFL and derive an upper bound on MFL convergence rate. Comparing the upper bounds on MFL and FL convergence rates, we provide conditions in which MFL accelerates the convergence. For different machine learning models, the convergence performance of MFL is evaluated based on experiments with MNIST and CIFAR-10 datasets. Simulation results confirm that MFL is globally convergent and further reveal significant convergence improvement over FL.", "year": 2019, "venue": "IEEE Transactions on Parallel and Distributed Systems", "authors": [ "Wei Liu", "Li Chen", "Yunfei Chen", "Wenyi Zhang" ], "externalIds": { "MAG": "3007279825", "DBLP": "journals/corr/abs-1910-03197", "ArXiv": "1910.03197", "DOI": "10.1109/TPDS.2020.2975189", "CorpusId": 203902376 }, "url": "https://www.semanticscholar.org/paper/c55c7e0203f3add64ac3d3f60943ae201907ed68", "referenceCount": 41, "citationCount": 248, "influentialCitationCount": 20, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "CMFL: Mitigating Communication Overhead for Federated Learning", "abstract": "Federated Learning enables mobile users to collaboratively learn a global prediction model by aggregating their individual updates without sharing the privacy-sensitive data. As mobile devices usually have limited data plan and slow network connections to the central server where the global model is maintained, mitigating the communication overhead is of paramount importance. While existing works mainly focus on reducing the total bits transferred in each update via data compression, we study an orthogonal approach that identifies irrelevant updates made by clients and precludes them from being uploaded for reduced network footprint. Following this idea, we propose Communication-Mitigated Federated Learning (CMFL) in this paper. CMFL provides clients with feedback information regarding the global tendency of model updating. Each client checks if its update aligns with this global tendency and is relevant enough to model improvement. By avoiding uploading those irrelevant updates to the server, CMFL can substantially reduce the communication overhead while still guaranteeing the learning convergence. CMFL is shown to achieve general improvement in communication efficiency for almost all of the existing federated learning schemes. We evaluate CMFL through extensive simulations and EC2 emulations. Compared with vanilla Federated Learning, CMFL yields 13.97x communication efficiency in terms of the reduction of network footprint. When applied to Federated Multi-Task Learning, CMFL improves the communication efficiency by 5.7x with 4% higher prediction accuracy.", "year": 2019, "venue": "IEEE International Conference on Distributed Computing Systems", "authors": [ "Luping Wang", "Wei Wang", "Bo Li" ], "externalIds": { "MAG": "2982464076", "DBLP": "conf/icdcs/WangWL19", "DOI": "10.1109/ICDCS.2019.00099", "CorpusId": 204781679 }, "url": "https://www.semanticscholar.org/paper/e928c7bbb0089abf591a2b4861d8885b6d762e5b", "referenceCount": 33, "citationCount": 342, "influentialCitationCount": 23, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Federated Learning with Non-IID Data", "abstract": "Federated learning enables resource-constrained edge compute devices, such as mobile phones and IoT devices, to learn a shared model for prediction, while keeping the training data local. This decentralized approach to train models provides privacy, security, regulatory and economic benefits. In this work, we focus on the statistical challenge of federated learning when local data is non-IID. We first show that the accuracy of federated learning reduces significantly, by up to 55% for neural networks trained for highly skewed non-IID data, where each client device trains only on a single class of data. We further show that this accuracy reduction can be explained by the weight divergence, which can be quantified by the earth mover's distance (EMD) between the distribution over classes on each device and the population distribution. As a solution, we propose a strategy to improve training on non-IID data by creating a small subset of data which is globally shared between all the edge devices. Experiments show that accuracy can be increased by 30% for the CIFAR-10 dataset with only 5% globally shared data.", "year": 2018, "venue": "arXiv.org", "authors": [ "Yue Zhao", "Meng Li", "Liangzhen Lai", "Naveen Suda", "Damon Civin", "V. Chandra" ], "externalIds": { "DBLP": "journals/corr/abs-1806-00582", "MAG": "2807006176", "ArXiv": "1806.00582", "DOI": "10.48550/arXiv.1806.00582", "CorpusId": 46936175 }, "url": "https://www.semanticscholar.org/paper/5cfc112c932e38df95a0ba35009688735d1a386b", "referenceCount": 31, "citationCount": 2133, "influentialCitationCount": 226, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Squeeze-and-Excitation Networks", "abstract": "Convolutional neural networks are built upon the convolution operation, which extracts informative features by fusing spatial and channel-wise information together within local receptive fields. In order to boost the representational power of a network, several recent approaches have shown the benefit of enhancing spatial encoding. In this work, we focus on the channel relationship and propose a novel architectural unit, which we term the \"Squeeze-and-Excitation\" (SE) block, that adaptively recalibrates channel-wise feature responses by explicitly modelling interdependencies between channels. We demonstrate that by stacking these blocks together, we can construct SENet architectures that generalise extremely well across challenging datasets. Crucially, we find that SE blocks produce significant performance improvements for existing state-of-the-art deep architectures at minimal additional computational cost. SENets formed the foundation of our ILSVRC 2017 classification submission which won first place and significantly reduced the top-5 error to 2.251%, achieving a ~25% relative improvement over the winning entry of 2016. Code and models are available at https://github.com/hujie-frank/SENet.", "year": 2017, "venue": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition", "authors": [ "Jie Hu", "Li Shen", "Samuel Albanie", "Gang Sun", "E. Wu" ], "externalIds": { "MAG": "2963420686", "DBLP": "journals/corr/abs-1709-01507", "ArXiv": "1709.01507", "DOI": "10.1109/CVPR.2018.00745", "CorpusId": 140309863 }, "url": "https://www.semanticscholar.org/paper/fb37561499573109fc2cebb6a7b08f44917267dd", "referenceCount": 85, "citationCount": 22696, "influentialCitationCount": 1786, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Fashion-MNIST: a Novel Image Dataset for Benchmarking Machine Learning Algorithms", "abstract": "We present Fashion-MNIST, a new dataset comprising of 28x28 grayscale images of 70,000 fashion products from 10 categories, with 7,000 images per category. The training set has 60,000 images and the test set has 10,000 images. Fashion-MNIST is intended to serve as a direct drop-in replacement for the original MNIST dataset for benchmarking machine learning algorithms, as it shares the same image size, data format and the structure of training and testing splits. The dataset is freely available at this https URL", "year": 2017, "venue": "arXiv.org", "authors": [ "Han Xiao", "Kashif Rasul", "Roland Vollgraf" ], "externalIds": { "MAG": "2750384547", "DBLP": "journals/corr/abs-1708-07747", "ArXiv": "1708.07747", "CorpusId": 702279 }, "url": "https://www.semanticscholar.org/paper/f9c602cc436a9ea2f9e7db48c77d924e09ce3c32", "referenceCount": 6, "citationCount": 7771, "influentialCitationCount": 1471, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "The iNaturalist Species Classification and Detection Dataset", "abstract": "Existing image classification datasets used in computer vision tend to have a uniform distribution of images across object categories. In contrast, the natural world is heavily imbalanced, as some species are more abundant and easier to photograph than others. To encourage further progress in challenging real world conditions we present the iNaturalist species classification and detection dataset, consisting of 859,000 images from over 5,000 different species of plants and animals. It features visually similar species, captured in a wide variety of situations, from all over the world. Images were collected with different camera types, have varying image quality, feature a large class imbalance, and have been verified by multiple citizen scientists. We discuss the collection of the dataset and present extensive baseline experiments using state-of-the-art computer vision classification and detection models. Results show that current non-ensemble based methods achieve only 67% top one classification accuracy, illustrating the difficulty of the dataset. Specifically, we observe poor results for classes with small numbers of training examples suggesting more attention is needed in low-shot learning.", "year": 2017, "venue": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition", "authors": [ "Grant Van Horn", "Oisin Mac Aodha", "Yang Song", "Yin Cui", "Chen Sun", "Alexander Shepard", "Hartwig Adam", "P. Perona", "Serge J. Belongie" ], "externalIds": { "DBLP": "conf/cvpr/HornASCSSAPB18", "MAG": "2950181225", "DOI": "10.1109/CVPR.2018.00914", "CorpusId": 29156801 }, "url": "https://www.semanticscholar.org/paper/05eb6eb4ea7d2b332295dfa5aeb64d5f47c1e628", "referenceCount": 53, "citationCount": 1365, "influentialCitationCount": 92, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Gaia: Geo-Distributed Machine Learning Approaching LAN Speeds", "abstract": "Machine learning (ML) is widely used to derive useful information from large-scale data (such as user activities, pictures, and videos) generated at increasingly rapid rates, all over the world. Unfortunately, it is infeasible to move all this globally-generated data to a centralized data center before running an ML algorithm over it--moving large amounts of raw data over wide-area networks (WANs) can be extremely slow, and is also subject to the constraints of privacy and data sovereignty laws. This motivates the need for a geo-distributed ML system spanning multiple data centers. Unfortunately, communicating over WANs can significantly degrade ML system performance (by as much as 53.7× in our study) because the communication overwhelms the limited WAN bandwidth. \n \nOur goal in this work is to develop a geo-distributed ML system that (1) employs an intelligent communication mechanism over WANs to efficiently utilize the scarce WAN bandwidth, while retaining the accuracy and correctness guarantees of an ML algorithm; and (2) is generic and flexible enough to run a wide range of ML algorithms, without requiring any changes to the algorithms. \n \nTo this end, we introduce a new, general geo-distributed ML system, Gaia, that decouples the communication within a data center from the communication between data centers, enabling different communication and consistency models for each. We present a new ML synchronization model, Approximate Synchronous Parallel (ASP), whose key idea is to dynamically eliminate insignificant communication between data centers while still guaranteeing the correctness of ML algorithms. Our experiments on our prototypes of Gaia running across 11 Amazon EC2 global regions and on a cluster that emulates EC2 WAN bandwidth show that Gaia provides 1.8-53.5× speedup over two state-of-the-art distributed ML systems, and is within 0.94-1.40× of the speed of running the same ML algorithm on machines on a local area network (LAN).", "year": 2017, "venue": "Symposium on Networked Systems Design and Implementation", "authors": [ "Kevin Hsieh", "A. Harlap", "Nandita Vijaykumar", "Dimitris Konomis", "G. Ganger", "Phillip B. Gibbons", "O. Mutlu" ], "externalIds": { "DBLP": "conf/nsdi/HsiehHVKGGM17", "MAG": "2604783387", "CorpusId": 11438116 }, "url": "https://www.semanticscholar.org/paper/6cad4a36102b6387259f56cc2f09dd7a994ba8fa", "referenceCount": 84, "citationCount": 389, "influentialCitationCount": 47, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Communication Quantization for Data-Parallel Training of Deep Neural Networks", "abstract": "We study data-parallel training of deep neural networks on high-performance computing infrastructure. The key problem with scaling data-parallel training is avoiding severe communication/computation imbalance. We explore quantizing gradient updates before communication to reduce bandwidth requirements and compare it against a baseline implementation that uses the MPI allreduce routine. We port two existing quantization approaches, one-bit and threshold, and develop our own adaptive quantization algorithm. The performance of these algorithms is evaluated and compared with MPI_Allreduce when training models for the MNIST dataset and on a synthetic benchmark. On an HPC system, MPI_Allreduce outperforms the existing quantization approaches. Our adaptive quantization is comparable or superior for large layers without sacrificing accuracy. It is 1.76 times faster than the next best approach for the largest layers in our benchmark and achieves near-linear speedup in data-parallel training.", "year": 2016, "venue": "Workshop on Machine Learning in High Performance Computing Environments", "authors": [ "Nikoli Dryden", "Tim Moon", "S. A. Jacobs", "B. V. Essen" ], "externalIds": { "MAG": "2563343794", "DBLP": "conf/sc/DrydenMJE16", "DOI": "10.1109/MLHPC.2016.4", "CorpusId": 1712156 }, "url": "https://www.semanticscholar.org/paper/cc1f98ec6fd3ae77468ee8adbe38ea0a445196e5", "referenceCount": 21, "citationCount": 192, "influentialCitationCount": 15, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Federated Learning: Strategies for Improving Communication Efficiency", "abstract": "Federated Learning is a machine learning setting where the goal is to train a high-quality centralized model while training data remains distributed over a large number of clients each with unreliable and relatively slow network connections. We consider learning algorithms for this setting where on each round, each client independently computes an update to the current model based on its local data, and communicates this update to a central server, where the client-side updates are aggregated to compute a new global model. The typical clients in this setting are mobile phones, and communication efficiency is of the utmost importance. In this paper, we propose two ways to reduce the uplink communication costs: structured updates, where we directly learn an update from a restricted space parametrized using a smaller number of variables, e.g. either low-rank or a random mask; and sketched updates, where we learn a full model update and then compress it using a combination of quantization, random rotations, and subsampling before sending it to the server. Experiments on both convolutional and recurrent networks show that the proposed methods can reduce the communication cost by two orders of magnitude.", "year": 2016, "venue": "arXiv.org", "authors": [ "Jakub Konecný", "H. B. McMahan", "Felix X. Yu", "Peter Richtárik", "A. Suresh", "D. Bacon" ], "externalIds": { "DBLP": "journals/corr/KonecnyMYRSB16", "ArXiv": "1610.05492", "MAG": "2535838896", "CorpusId": 14999259 }, "url": "https://www.semanticscholar.org/paper/7fcb90f68529cbfab49f471b54719ded7528d0ef", "referenceCount": 26, "citationCount": 4138, "influentialCitationCount": 225, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "QSGD: Communication-Efficient SGD via Gradient Quantization and Encoding", "abstract": "Parallel implementations of stochastic gradient descent (SGD) have received significant research attention, thanks to its excellent scalability properties. A fundamental barrier when parallelizing SGD is the high bandwidth cost of communicating gradient updates between nodes; consequently, several lossy compresion heuristics have been proposed, by which nodes only communicate quantized gradients. Although effective in practice, these heuristics do not always guarantee convergence, and it is not clear whether they can be improved. In this paper, we propose Quantized SGD (QSGD), a family of compression schemes for gradient updates which provides convergence guarantees. QSGD allows the user to smoothly trade off \\emph{communication bandwidth} and \\emph{convergence time}: nodes can adjust the number of bits sent per iteration, at the cost of possibly higher variance. We show that this trade-off is inherent, in the sense that improving it past some threshold would violate information-theoretic lower bounds. QSGD guarantees convergence for convex and non-convex objectives, under asynchrony, and can be extended to stochastic variance-reduced techniques. When applied to training deep neural networks for image classification and automated speech recognition, QSGD leads to significant reductions in end-to-end training time. For example, on 16GPUs, we can train the ResNet152 network to full accuracy on ImageNet 1.8x faster than the full-precision variant.", "year": 2016, "venue": "Neural Information Processing Systems", "authors": [ "Dan Alistarh", "Demjan Grubic", "Jerry Li", "Ryota Tomioka", "Milan Vojnovic" ], "externalIds": { "MAG": "2769644379", "DBLP": "conf/nips/AlistarhG0TV17", "CorpusId": 263894534 }, "url": "https://www.semanticscholar.org/paper/c9d64aaa2007b60ef7814acc895dd90f15578a20", "referenceCount": 45, "citationCount": 1077, "influentialCitationCount": 193, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Communication-Efficient Learning of Deep Networks from Decentralized Data", "abstract": "Modern mobile devices have access to a wealth of data suitable for learning models, which in turn can greatly improve the user experience on the device. For example, language models can improve speech recognition and text entry, and image models can automatically select good photos. However, this rich data is often privacy sensitive, large in quantity, or both, which may preclude logging to the data center and training there using conventional approaches. We advocate an alternative that leaves the training data distributed on the mobile devices, and learns a shared model by aggregating locally-computed updates. We term this decentralized approach Federated Learning. \nWe present a practical method for the federated learning of deep networks based on iterative model averaging, and conduct an extensive empirical evaluation, considering five different model architectures and four datasets. These experiments demonstrate the approach is robust to the unbalanced and non-IID data distributions that are a defining characteristic of this setting. Communication costs are the principal constraint, and we show a reduction in required communication rounds by 10-100x as compared to synchronized stochastic gradient descent.", "year": 2016, "venue": "International Conference on Artificial Intelligence and Statistics", "authors": [ "H. B. McMahan", "Eider Moore", "Daniel Ramage", "S. Hampson", "B. A. Y. Arcas" ], "externalIds": { "MAG": "2950745363", "DBLP": "conf/aistats/McMahanMRHA17", "ArXiv": "1602.05629", "CorpusId": 14955348 }, "url": "https://www.semanticscholar.org/paper/d1dbf643447405984eeef098b1b320dee0b3b8a7", "referenceCount": 50, "citationCount": 13806, "influentialCitationCount": 3346, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Very Deep Convolutional Networks for Large-Scale Image Recognition", "abstract": "In this work we investigate the effect of the convolutional network depth on its accuracy in the large-scale image recognition setting. Our main contribution is a thorough evaluation of networks of increasing depth using an architecture with very small (3x3) convolution filters, which shows that a significant improvement on the prior-art configurations can be achieved by pushing the depth to 16-19 weight layers. These findings were the basis of our ImageNet Challenge 2014 submission, where our team secured the first and the second places in the localisation and classification tracks respectively. We also show that our representations generalise well to other datasets, where they achieve state-of-the-art results. We have made our two best-performing ConvNet models publicly available to facilitate further research on the use of deep visual representations in computer vision.", "year": 2014, "venue": "International Conference on Learning Representations", "authors": [ "K. Simonyan", "Andrew Zisserman" ], "externalIds": { "MAG": "2949429431", "ArXiv": "1409.1556", "DBLP": "journals/corr/SimonyanZ14a", "CorpusId": 14124313 }, "url": "https://www.semanticscholar.org/paper/eb42cf88027de515750f230b23b1a057dc782108", "referenceCount": 43, "citationCount": 93036, "influentialCitationCount": 13588, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "1-bit stochastic gradient descent and its application to data-parallel distributed training of speech DNNs", "abstract": "We show empirically that in SGD training of deep neural networks, one can, at no or nearly no loss of accuracy, quantize the gradients aggressively—to but one bit per value—if the quantization error is carried forward across minibatches (error feedback). This size reduction makes it feasible to parallelize SGD through data-parallelism with fast processors like recent GPUs. We implement data-parallel deterministically distributed SGD by combining this finding with AdaGrad, automatic minibatch-size selection, double buffering, and model parallelism. Unexpectedly, quantization benefits AdaGrad, giving a small accuracy gain. For a typical Switchboard DNN with 46M parameters, we reach computation speeds of 27k frames per second (kfps) when using 2880 samples per minibatch, and 51kfps with 16k, on a server with 8 K20X GPUs. This corresponds to speed-ups over a single GPU of 3.6 and 6.3, respectively. 7 training passes over 309h of data complete in under 7h. A 160M-parameter model training processes 3300h of data in under 16h on 20 dual-GPU servers—a 10 times speed-up—albeit at a small accuracy loss.", "year": 2014, "venue": "Interspeech", "authors": [ "F. Seide", "Hao Fu", "J. Droppo", "Gang Li", "Dong Yu" ], "externalIds": { "DBLP": "conf/interspeech/SeideFDLY14", "MAG": "2407022425", "DOI": "10.21437/Interspeech.2014-274", "CorpusId": 2189412 }, "url": "https://www.semanticscholar.org/paper/3439a127e45fb763881f03ef3ec735a1db0e0ccc", "referenceCount": 30, "citationCount": 938, "influentialCitationCount": 106, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Scaling Up Your Kernels to 31x31: Revisiting Large Kernel Design in CNNs", "abstract": "For training MobileNet V2 models (Sec. 3), we use 8 GPUs, an SGD optimizer with momentum of 0.9, a batch size of 32 per GPU, input resolution of 224×224, weight decay of 4 × 10−5, learning rate schedule with 5-epoch warmup, initial value of 0.1 and cosine annealing for 100 epochs. For the data augmentation, we only use random cropping and left-right flipping, as a common practice. For training RepLKNet models (Sec. 4.2),we use 32 GPUs and a batch size of 64 per GPU to train for 120 epochs. The optimizer is AdamW [10] with momentum of 0.9 and weight decay of 0.05. The learning rate setting includes an initial value of 4 × 10−3, cosine annealing and 10-epoch warm-up. For the data augmentation and regularization, we use RandAugment [4] (“rand-m9-mstd0.5-inc1” as implemented by timm [15]), label smoothing coefficient of 0.1, mixup [18] with α = 0.8, CutMix with α = 1.0, Rand Erasing [19] with probability of 25% and Stochastic Depth with a drop-path rate of 30%, following the recent works [1, 8, 9, 12]. The RepLKNet-31B reported in Sec. 4.3 is trained with the same configurations except the epoch number of 300 and drop-path rate of 50%. For finetuning the 224×224-trained RepLKNet-31B with 384×384, we use 32 GPUs, a batch size of 32 per GPU, initial learning rate of 4 × 10−4, cosine annealing, 1-epoch warm-up, 30 epochs, model EMA (Exponential Moving Average) with momentum of 10−4, the same RandAugment as above but no CutMix nor mixup.", "year": 2022, "venue": "", "authors": [], "externalIds": { "CorpusId": 265039963 }, "url": "https://www.semanticscholar.org/paper/63f1f2dad0a2e84d37a97258008c5609195487f0", "referenceCount": 21, "citationCount": 246, "influentialCitationCount": 26, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Scalable distributed DNN training using commodity GPU cloud computing", "abstract": "We introduce a new method for scaling up distributed Stochastic Gradient Descent (SGD) training of Deep Neural Networks (DNN). The method solves the well-known communication bottleneck problem that arises for data-parallel SGD because compute nodes frequently need to synchronize a replica of the model. We solve it by purposefully controlling the rate of weight-update per individual weight, which is in contrast to the uniform update-rate customarily imposed by the size of a mini-batch. It is shown empirically that the method can reduce the amount of communication by three orders of magnitude while training a typical DNN for acoustic modelling. This reduction in communication bandwidth enables efficient scaling to more parallel GPU nodes than any other method that we are aware of, and it can be achieved with neither loss in convergence rate nor accuracy in the resulting DNN. Furthermore, the training can be performed on commodity cloud infrastructure and networking.", "year": 2015, "venue": "Interspeech", "authors": [ "N. Strom" ], "externalIds": { "MAG": "2405578611", "DBLP": "conf/interspeech/Strom15", "DOI": "10.21437/Interspeech.2015-354", "CorpusId": 9338808 }, "url": "https://www.semanticscholar.org/paper/9ee76c41dd161df75cb50ac06d2868afec63b0db", "referenceCount": 24, "citationCount": 517, "influentialCitationCount": 45, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "We propose FedRepOpt and validate its effectiveness under different FL configurations", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "We conduct the first systematic study of training RepOpt-based models in FL settings", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "A Multi-Level Approach for Class Imbalance Problem in Federated Learning for Remote Industry 4.0 Applications": { "paper_title": "A Multi-Level Approach for Class Imbalance Problem in Federated Learning for Remote Industry 4.0 Applications", "arxiv_id": "2409.15802v1", "keyword": "federate learning", "authors": [ "Razin Farhan Hussain", "Mohsen Amini Salehi" ], "references": [ { "title": "Resource Allocation of Industry 4.0 Micro-Service Applications across Serverless Fog Federation", "abstract": null, "year": 2024, "venue": "Future generations computer systems", "authors": [ "R. Hussain", "Mohsen Amini Salehi" ], "externalIds": { "ArXiv": "2401.07194", "DBLP": "journals/fgcs/HussainS24", "DOI": "10.1016/j.future.2024.01.017", "CorpusId": 266999301 }, "url": "https://www.semanticscholar.org/paper/8eb68e3063c0c5b1e9468e8cb89a90f291da445e", "referenceCount": 65, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "UMS: Live Migration of Containerized Services across Autonomous Computing Systems", "abstract": "Containerized services deployed within various computing systems, such as edge and cloud, desire live migration support to enable user mobility, elasticity, and load balancing. To enable such a ubiquitous and efficient service migration, a live migration solution needs to handle circumstances where users have various authority levels (full control, limited control, or no control) over the underlying computing systems. Supporting the live migration at these levels serves as the cornerstone of interoperability, and can unlock several use cases across various forms of distributed systems. As such, in this study, we develop a ubiquitous migration solution (called UMS) that, for a given containerized service, can automatically identify the feasible migration approach, and then seamlessly perform the migration across autonomous computing systems. UMS does not interfere with the way the orchestrator handles containers and can coordinate the migration without the orchestrator involvement. Moreover, UMS is orchestrator-agnostic, i.e., it can be plugged into any underlying orchestrator platform. UMS is equipped with novel methods that can coordinate and perform the live migration at the orchestrator, container, and service levels. Experimental results show that for single-process containers, the service-level approach, and for multi-process containers with small $(< 128 \\mathbf{MiB})$ memory footprint, the container-level migration approach lead to the lowest migration overhead and service downtime. To demonstrate the potential of UMS in realizing interoperability and multi-cloud scenarios, we examined it to perform live service migration across heterogeneous orchestrators, and between Microsoft Azure and Google Cloud.", "year": 2023, "venue": "Global Communications Conference", "authors": [ "Thanawat Chanikaphon", "Mohsen Amini Salehi" ], "externalIds": { "DBLP": "conf/globecom/ChanikaphonS23", "ArXiv": "2309.03168", "DOI": "10.1109/GLOBECOM54140.2023.10437519", "CorpusId": 261557544 }, "url": "https://www.semanticscholar.org/paper/cb80a50066fa8444390f2ef689e9d00c29d94051", "referenceCount": 9, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "An investigation of various security and privacy issues in Internet of Things", "abstract": null, "year": 2021, "venue": "Materials Today: Proceedings", "authors": [ "Richa Singhai", "R. Sushil" ], "externalIds": { "MAG": "3191505366", "DOI": "10.1016/j.matpr.2021.07.259", "CorpusId": 238799100 }, "url": "https://www.semanticscholar.org/paper/6fdce17d89b92e698e06da39cdcd4a774ca8a4b5", "referenceCount": 32, "citationCount": 19, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "An Object Detection based Solver for Google's Image reCAPTCHA v2", "abstract": "Previous work showed that reCAPTCHA v2's image challenges could be solved by automated programs armed with Deep Neural Network (DNN) image classifiers and vision APIs provided by off-the-shelf image recognition services. In response to emerging threats, Google has made significant updates to its image reCAPTCHA v2 challenges that can render the prior approaches ineffective to a great extent. In this paper, we investigate the robustness of the latest version of reCAPTCHA v2 against advanced object detection based solvers. We propose a fully automated object detection based system that breaks the most advanced challenges of reCAPTCHA v2 with an online success rate of 83.25%, the highest success rate to date, and it takes only 19.93 seconds (including network delays) on average to crack a challenge. We also study the updated security features of reCAPTCHA v2, such as anti-recognition mechanisms, improved anti-bot detection techniques, and adjustable security preferences. Our extensive experiments show that while these security features can provide some resistance against automated attacks, adversaries can still bypass most of them. Our experimental findings indicate that the recent advances in object detection technologies pose a severe threat to the security of image captcha designs relying on simple object detection as their underlying AI problem.", "year": 2021, "venue": "International Symposium on Recent Advances in Intrusion Detection", "authors": [ "Imran Hossen", "Yazhou Tu", "Md Fazle Rabby", "Nazmul Islam", "Hui Cao", "X. Hei" ], "externalIds": { "DBLP": "journals/corr/abs-2104-03366", "ArXiv": "2104.03366", "MAG": "3092642027", "CorpusId": 222887763 }, "url": "https://www.semanticscholar.org/paper/8a0444afc80e30d74b73d85433531b4c12614b65", "referenceCount": 56, "citationCount": 18, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Self-Balancing Federated Learning With Global Imbalanced Data in Mobile Systems", "abstract": "Federated learning (FL) is a distributed deep learning method that enables multiple participants, such as mobile and IoT devices, to contribute a neural network while their private training data remains in local devices. This distributed approach is promising in the mobile systems where have a large corpus of decentralized data and require high privacy. However, unlike the common datasets, the data distribution of the mobile systems is imbalanced which will increase the bias of model. In this article, we demonstrate that the imbalanced distributed training data will cause an accuracy degradation of FL applications. To counter this problem, we build a self-balancing FL framework named Astraea, which alleviates the imbalances by 1) Z-score-based data augmentation, and 2) Mediator-based multi-client rescheduling. The proposed framework relieves global imbalance by adaptive data augmentation and downsampling, and for averaging the local imbalance, it creates the mediator to reschedule the training of clients based on Kullback–Leibler divergence (KLD) of their data distribution. Compared with FedAvg, the vanilla FL algorithm, Astraea shows +4.39 and +6.51 percent improvement of top-1 accuracy on the imbalanced EMNIST and imbalanced CINIC-10 datasets, respectively. Meanwhile, the communication traffic of Astraea is reduced by 75 percent compared to FedAvg.", "year": 2021, "venue": "IEEE Transactions on Parallel and Distributed Systems", "authors": [ "Moming Duan", "Duo Liu", "Xianzhang Chen", "Renping Liu", "Yujuan Tan", "Liang Liang" ], "externalIds": { "DBLP": "journals/tpds/DuanLCLTL21", "MAG": "3042621011", "DOI": "10.1109/tpds.2020.3009406", "CorpusId": 221086425 }, "url": "https://www.semanticscholar.org/paper/bdef93491b2eec2e71b9ba14fd304ff5eebf5188", "referenceCount": 39, "citationCount": 247, "influentialCitationCount": 17, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Analysis and Optimal Edge Assignment For Hierarchical Federated Learning on Non-IID Data", "abstract": "Learning-based applications have demonstrated practical use cases in ubiquitous environments and amplified interest in exploiting the data stored on users' mobile devices. Distributed learning algorithms aim to leverage such distributed and diverse data to learn a global phenomena by performing training amongst participating devices and repeatedly aggregating their local models' parameters into a global model. Federated learning is a promising paradigm that allows for extending local training among the participant devices before aggregating the parameters, offering better communication efficiency. However, in the cases where the participants' data are strongly skewed (i.e., non-IID), the model accuracy can significantly drop. To face this challenge, we leverage the edge computing paradigm to design a hierarchical learning system that performs Federated Gradient Descent on the user-edge layer and Federated Averaging on the edge-cloud layer. In this hierarchical architecture, the users are assigned to different edges, such that edge-level data distributions turn to be close to IID. We formalize and optimize this user-edge assignment problem to minimize classes' distribution distance between edge nodes, which enhances the Federated Averaging performance. Our experiments on multiple real-world datasets show that the proposed optimized assignment is tractable and leads to faster convergence of models towards a better accuracy value.", "year": 2020, "venue": "arXiv.org", "authors": [ "N. Mhaisen", "Alaa Awad", "Amr M. Mohamed", "A. Erbad", "M. Guizani" ], "externalIds": { "ArXiv": "2012.05622", "DBLP": "journals/corr/abs-2012-05622", "CorpusId": 228083746 }, "url": "https://www.semanticscholar.org/paper/ade793e827d513a33f695569457f724d7c6e1512", "referenceCount": 36, "citationCount": 12, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Analyzing the Performance of Smart Industry 4.0 Applications on Cloud Computing Systems", "abstract": "Cloud-based Deep Neural Network (DNN) applications that make latency-sensitive inference are becoming an indispensable part of Industry 4.0. Due to the multi-tenancy and resource heterogeneity, both inherent to the cloud computing environments, the inference time of DNN-based applications are stochastic. Such stochasticity, if not captured, can potentially lead to low Quality of Service (QoS) or even a disaster in critical sectors, such as Oil and Gas industry. To make Industry 4.0 robust, solution architects and researchers need to understand the behavior of DNN-based applications and capture the stochasticity exists in their inference times. Accordingly, in this study, we provide a descriptive analysis of the inference time from two perspectives. First, we perform an application-centric analysis and statistically model the execution time of four categorically different DNN applications on both Amazon and Chameleon clouds. Second, we take a resource-centric approach and analyze a rate-based metric in form of Million Instruction Per Second (MIPS) for heterogeneous machines in the cloud. This non-parametric modeling, achieved via Jackknife and Bootstrap re-sampling methods, provides the confidence interval of MIPS for heterogeneous cloud machines. The findings of this research can be helpful for researchers and cloud solution architects to develop solutions that are robust against the stochastic nature of the inference time of DNN applications in the cloud and can offer a higher QoS to their users and avoid unintended outcomes.", "year": 2020, "venue": "2020 IEEE 22nd International Conference on High Performance Computing and Communications; IEEE 18th International Conference on Smart City; IEEE 6th International Conference on Data Science and Systems (HPCC/SmartCity/DSS)", "authors": [ "R. Hussain", "A.R. Pakravan", "M. Salehi" ], "externalIds": { "DBLP": "journals/corr/abs-2012-06054", "MAG": "3113015691", "ArXiv": "2012.06054", "DOI": "10.1109/HPCC-SmartCity-DSS50907.2020.00003", "CorpusId": 228376129 }, "url": "https://www.semanticscholar.org/paper/085f657d684c50eff68fb3067ad006784e2fad21", "referenceCount": 39, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Privacy-Preserving Federated Learning in Fog Computing", "abstract": "Federated learning can combine a large number of scattered user groups and train models collaboratively without uploading data sets, so as to avoid the server collecting user sensitive data. However, the model of federated learning will expose the training set information of users, and the uneven amount of data owned by users in multiple users’ scenarios will lead to the inefficiency of training. In this article, we propose a privacy-preserving federated learning scheme in fog computing. Acting as a participant, each fog node is enabled to collect Internet-of-Things (IoT) device data and complete the learning task in our scheme. Such design effectively improves the low training efficiency and model accuracy caused by the uneven distribution of data and the large gap of computing power. We enable IoT device data to satisfy $\\varepsilon $ -differential privacy to resist data attacks and leverage the combination of blinding and Paillier homomorphic encryption against model attacks, which realize the security aggregation of model parameters. In addition, we formally verified our scheme can not only guarantee both data security and model security but completely resist collusion attacks launched by multiple malicious entities. Our experiments based on the Fashion-MNIST data set prove that our scheme is highly efficient in practice.", "year": 2020, "venue": "IEEE Internet of Things Journal", "authors": [ "Chunyi Zhou", "Anmin Fu", "Shui Yu", "Wei Yang", "Huaqun Wang", "Yuqing Zhang" ], "externalIds": { "MAG": "3016560828", "DBLP": "journals/iotj/ZhouFYYWZ20", "DOI": "10.1109/JIOT.2020.2987958", "CorpusId": 218780991 }, "url": "https://www.semanticscholar.org/paper/c89d4e35658eeea1f954813ea2cdd6302eec670c", "referenceCount": 40, "citationCount": 149, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Machine Learning in Predictive Maintenance towards Sustainable Smart Manufacturing in Industry 4.0", "abstract": "Recently, with the emergence of Industry 4.0 (I4.0), smart systems, machine learning (ML) within artificial intelligence (AI), predictive maintenance (PdM) approaches have been extensively applied in industries for handling the health status of industrial equipment. Due to digital transformation towards I4.0, information techniques, computerized control, and communication networks, it is possible to collect massive amounts of operational and processes conditions data generated form several pieces of equipment and harvest data for making an automated fault detection and diagnosis with the aim to minimize downtime and increase utilization rate of the components and increase their remaining useful lives. PdM is inevitable for sustainable smart manufacturing in I4.0. Machine learning (ML) techniques have emerged as a promising tool in PdM applications for smart manufacturing in I4.0, thus it has increased attraction of authors during recent years. This paper aims to provide a comprehensive review of the recent advancements of ML techniques widely applied to PdM for smart manufacturing in I4.0 by classifying the research according to the ML algorithms, ML category, machinery, and equipment used, device used in data acquisition, classification of data, size and type, and highlight the key contributions of the researchers, and thus offers guidelines and foundation for further research.", "year": 2020, "venue": "Sustainability", "authors": [ "Z. Çınar", "Abubakar Abdussalam Nuhu", "Q. Zeeshan", "Orhan Korhan", "M.B.A Asmael", "B. Safaei" ], "externalIds": { "MAG": "3092363736", "DOI": "10.3390/su12198211", "CorpusId": 225160331 }, "url": "https://www.semanticscholar.org/paper/5fb1ddd3c37597138795e7b0f0c3641239cf21f7", "referenceCount": 131, "citationCount": 352, "influentialCitationCount": 14, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A novel deep learning instance segmentation model for automated marine oil spill detection", "abstract": null, "year": 2020, "venue": "", "authors": [ "S. Yekeen", "A. Balogun", "K. Yusof" ], "externalIds": { "MAG": "3045982802", "DOI": "10.1016/j.isprsjprs.2020.07.011", "CorpusId": 225196629 }, "url": "https://www.semanticscholar.org/paper/56b3b11163d79bf7bfabd7075959861586267f87", "referenceCount": 61, "citationCount": 108, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Experiments of Federated Learning for COVID-19 Chest X-ray Images", "abstract": null, "year": 2020, "venue": "Advances in Artificial Intelligence and Security", "authors": [ "Boyi Liu", "Bingjie Yan", "Yize Zhou", "Yifan Yang", "Yixian Zhang" ], "externalIds": { "MAG": "3040730690", "ArXiv": "2007.05592", "DBLP": "journals/corr/abs-2007-05592", "DOI": "10.1007/978-3-030-78618-2_4", "CorpusId": 220496037 }, "url": "https://www.semanticscholar.org/paper/d3e2036d7ec2cdb61a33f0ffebbce184ac85aa66", "referenceCount": 22, "citationCount": 130, "influentialCitationCount": 7, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Privacy‐preserving clustering of unstructured big data for cloud‐based enterprise search solutions", "abstract": "Cloud‐based enterprise search services (e.g., Amazon Kendra) are enchanting to big data owners by providing them with convenient search solutions over their enterprise big datasets. However, individuals and businesses dealing with confidential big data (e.g., criminal reports) are reluctant to fully embrace such cloud services due to valid data privacy concerns. Solutions based on client‐side encryption have been developed to mitigate these concerns. Nonetheless, such solutions hinder data processing, especially, data clustering, which is pivotal in applications such as real‐time search on large corpora (e.g., big datasets). To cluster encrypted big data, we propose privacy‐preserving clustering schemes, called ClusPr, for three forms of unstructured datasets, namely static, semi‐dynamic, and dynamic. ClusPr functions based on statistical characteristics of the datasets to: (A) determine the suitable number of clusters; (B) populate the clusters with topically relevant tokens; and (C) adapt the cluster set based on the dynamism of the underlying dataset. Experimental results, obtained from evaluating ClusPr against other schemes in the literature, on three different test datasets demonstrate between 30%$$ 30\\% $$ and 60%$$ 60\\% $$ improvement on the cluster coherency. Moreover, we notice that employing ClusPr within a privacy‐preserving enterprise search system can reduce the search time by up to 78%$$ 78\\% $$ , while improving the search accuracy by up to 35%$$ 35\\% $$ .", "year": 2020, "venue": "Concurrency and Computation", "authors": [ "S. Zobaed", "Raju N. Gottumukkala", "M. Salehi" ], "externalIds": { "MAG": "3027910076", "DBLP": "journals/concurrency/ZobaedS22", "ArXiv": "2005.11317", "DOI": "10.1002/cpe.7160", "CorpusId": 218869768 }, "url": "https://www.semanticscholar.org/paper/04219b3907202070c85596e62110bb1a26eec76d", "referenceCount": 89, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Federated Learning in Vehicular Edge Computing: A Selective Model Aggregation Approach", "abstract": "Federated learning is a newly emerged distributed machine learning paradigm, where the clients are allowed to individually train local deep neural network (DNN) models with local data and then jointly aggregate a global DNN model at the central server. Vehicular edge computing (VEC) aims at exploiting the computation and communication resources at the edge of vehicular networks. Federated learning in VEC is promising to meet the ever-increasing demands of artificial intelligence (AI) applications in intelligent connected vehicles (ICV). Considering image classification as a typical AI application in VEC, the diversity of image quality and computation capability in vehicular clients potentially affects the accuracy and efficiency of federated learning. Accordingly, we propose a selective model aggregation approach, where “fine” local DNN models are selected and sent to the central server by evaluating the local image quality and computation capability. Regarding the implementation of model selection, the central server is not aware of the image quality and computation capability in the vehicular clients, whose privacy is protected under such a federated learning framework. To overcome this information asymmetry, we employ two-dimension contract theory as a distributed framework to facilitate the interactions between the central server and vehicular clients. The formulated problem is then transformed into a tractable problem through successively relaxing and simplifying the constraints, and eventually solved by a greedy algorithm. Using two datasets, i.e., MNIST and BelgiumTSC, our selective model aggregation approach is demonstrated to outperform the original federated averaging (FedAvg) approach in terms of accuracy and efficiency. Meanwhile, our approach also achieves higher utility at the central server compared with the baseline approaches.", "year": 2020, "venue": "IEEE Access", "authors": [ "Dongdong Ye", "Rong Yu", "M. Pan", "Zhu Han" ], "externalIds": { "MAG": "3001299093", "DBLP": "journals/access/YeYPH20", "DOI": "10.1109/ACCESS.2020.2968399", "CorpusId": 211119588 }, "url": "https://www.semanticscholar.org/paper/8d5ed7038d9b08ad49559d7ef0abe90949e51391", "referenceCount": 31, "citationCount": 256, "influentialCitationCount": 12, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FedVision: An Online Visual Object Detection Platform Powered by Federated Learning", "abstract": "Visual object detection is a computer vision-based artificial intelligence (AI) technique which has many practical applications (e.g., fire hazard monitoring). However, due to privacy concerns and the high cost of transmitting video data, it is highly challenging to build object detection models on centrally stored large training datasets following the current approach. Federated learning (FL) is a promising approach to resolve this challenge. Nevertheless, there currently lacks an easy to use tool to enable computer vision application developers who are not experts in federated learning to conveniently leverage this technology and apply it in their systems. In this paper, we report FedVision - a machine learning engineering platform to support the development of federated learning powered computer vision applications. The platform has been deployed through a collaboration between WeBank and Extreme Vision to help customers develop computer vision-based safety monitoring solutions in smart city applications. Over four months of usage, it has achieved significant efficiency improvement and cost reduction while removing the need to transmit sensitive data for three major corporate customers. To the best of our knowledge, this is the first real application of FL in computer vision-based tasks.", "year": 2020, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Yang Liu", "Anbu Huang", "Yu Luo", "He Huang", "Youzhi Liu", "Yuanyuan Chen", "Lican Feng", "Tianjian Chen", "Hang Yu", "Qiang Yang" ], "externalIds": { "MAG": "3000514287", "ArXiv": "2001.06202", "DBLP": "conf/aaai/LiuHLHLCFCYY20", "DOI": "10.1609/aaai.v34i08.7021", "CorpusId": 210714181 }, "url": "https://www.semanticscholar.org/paper/1de7dae4b03fb3ab1eeffc45d2fd761538962d5c", "referenceCount": 39, "citationCount": 258, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Machine learning applied in production planning and control: a state-of-the-art in the era of industry 4.0", "abstract": null, "year": 2020, "venue": "Journal of Intelligent Manufacturing", "authors": [ "Juan Pablo Usuga Cadavid", "S. Lamouri", "B. Grabot", "R. Pellerin", "Arnaud Fortin" ], "externalIds": { "MAG": "3000441679", "DBLP": "journals/jim/CadavidLGPF20", "DOI": "10.1007/s10845-019-01531-7", "CorpusId": 214289181 }, "url": "https://www.semanticscholar.org/paper/83d1795a99f57867772f7e2990b0ce7d351a3f87", "referenceCount": 143, "citationCount": 220, "influentialCitationCount": 12, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Robust Aggregation for Federated Learning", "abstract": "We present a novel approach to federated learning that endows its aggregation process with greater robustness to potential poisoning of local data or model parameters of participating devices. The proposed approach, Robust Federated Aggregation (RFA), relies on the aggregation of updates using the geometric median, which can be computed efficiently using a Weiszfeld-type algorithm. RFA is agnostic to the level of corruption and aggregates model updates without revealing each device’s individual contribution. We establish the convergence of the robust federated learning algorithm for the stochastic learning of additive models with least squares. We also offer two variants of RFA: a faster one with one-step robust aggregation, and another one with on-device personalization. We present experimental results with additive models and deep networks for three tasks in computer vision and natural language processing. The experiments show that RFA is competitive with the classical aggregation when the level of corruption is low, while demonstrating greater robustness under high corruption.", "year": 2019, "venue": "IEEE Transactions on Signal Processing", "authors": [ "Krishna Pillutla", "S. Kakade", "Zaïd Harchaoui" ], "externalIds": { "DBLP": "journals/tsp/PillutlaKH22", "ArXiv": "1912.13445", "MAG": "2996769125", "DOI": "10.1109/TSP.2022.3153135", "CorpusId": 197678286 }, "url": "https://www.semanticscholar.org/paper/6629248b79ad3e87d7f9f5694a315613ed201cca", "referenceCount": 95, "citationCount": 488, "influentialCitationCount": 73, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "On Safeguarding Privacy and Security in the Framework of Federated Learning", "abstract": "Motivated by the advancing computational capacity of wireless end-user equipment (UE), as well as the increasing concerns about sharing private data, a new machine learning (ML) paradigm has emerged, namely federated learning (FL). Specifically, FL allows a decoupling of data provision at UEs and ML model aggregation at a central unit. By training model locally, FL is capable of avoiding direct data leakage from the UEs, thereby preserving privacy and security to some extent. However, even if raw data are not disclosed from UEs, an individual's private information can still be extracted by some recently discovered attacks against the FL architecture. In this work, we analyze the privacy and security issues in FL, and discuss several challenges to preserving privacy and security when designing FL systems. In addition, we provide extensive simulation results to showcase the discussed issues and possible solutions.", "year": 2019, "venue": "IEEE Network", "authors": [ "Chuan Ma", "Jun Li", "Ming Ding", "Howard H. Yang", "F. Shu", "Tony Q. S. Quek", "H. Poor" ], "externalIds": { "ArXiv": "1909.06512", "MAG": "2972321170", "DBLP": "journals/network/MaLDYSQP20", "DOI": "10.1109/MNET.001.1900506", "CorpusId": 202577193 }, "url": "https://www.semanticscholar.org/paper/3cb414da9a60276268df5a1af5fd5da02905e4a6", "referenceCount": 20, "citationCount": 201, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Incentive Mechanism for Reliable Federated Learning: A Joint Optimization Approach to Combining Reputation and Contract Theory", "abstract": "Federated learning is an emerging machine learning technique that enables distributed model training using local datasets from large-scale nodes, e.g., mobile devices, but shares only model updates without uploading the raw training data. This technique provides a promising privacy preservation for mobile devices while simultaneously ensuring high learning performance. The majority of existing work has focused on designing advanced learning algorithms with an aim to achieve better learning performance. However, the challenges, such as incentive mechanisms for participating in training and worker (i.e., mobile devices) selection schemes for reliable federated learning, have not been explored yet. These challenges have hindered the widespread adoption of federated learning. To address the above challenges, in this article, we first introduce reputation as the metric to measure the reliability and trustworthiness of the mobile devices. We then design a reputation-based worker selection scheme for reliable federated learning by using a multiweight subjective logic model. We also leverage the blockchain to achieve secure reputation management for workers with nonrepudiation and tamper-resistance properties in a decentralized manner. Moreover, we propose an effective incentive mechanism combining reputation with contract theory to motivate high-reputation mobile devices with high-quality data to participate in model learning. Numerical results clearly indicate that the proposed schemes are efficient for reliable federated learning in terms of significantly improving the learning accuracy.", "year": 2019, "venue": "IEEE Internet of Things Journal", "authors": [ "Jiawen Kang", "Zehui Xiong", "D. Niyato", "Shengli Xie", "Junshan Zhang" ], "externalIds": { "MAG": "2972882814", "DBLP": "journals/iotj/KangXNXZ19", "DOI": "10.1109/JIOT.2019.2940820", "CorpusId": 203178529 }, "url": "https://www.semanticscholar.org/paper/9251ad8137f9aea4596aeccc656f351cfeced551", "referenceCount": 33, "citationCount": 576, "influentialCitationCount": 42, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Federated Edge Computing for Disaster Management in Remote Smart Oil Fields", "abstract": "Oil & Gas (O&G) industry is extending the extraction operation to remote offshore sites. Cost-effective, efficient, and nature-friendly oil extraction is a challenging issue in these remote sites, due to the disaster-prone nature of oil extraction process and hurdles in accessing these sites. To overcome these difficulties, smart oil fields use numerous sensors (e.g., pipeline pressure, gas leakage, temperature sensors) and can generate more than a terabyte of data per day. The data are transferred to cloud datacenters via high-latency and unstable satellite communication, which is not suitable for latency-intolerant (urgent) disaster-related tasks. Edge computing can be deployed in oil rigs to process the latency-intolerant tasks, however, processing capacity of an edge system falls short at the time of a disaster—when several coordinated activities must be processed within a short time. To address this shortage, we propose robust smart oil fields operating based on a federation of edge computing systems, provisioned from nearby/mobile micro datacenters. Our solution achieves robustness by capturing uncertainties exist both in communication and computation of the federated environment and allocating urgent tasks so that the likelihood of their on-time completion is maximized. Evaluation results reflect significant performance improvement (up to 27%) of the proposed solution when compared to conventional solutions for smart oil fields.", "year": 2019, "venue": "2019 IEEE 21st International Conference on High Performance Computing and Communications; IEEE 17th International Conference on Smart City; IEEE 5th International Conference on Data Science and Systems (HPCC/SmartCity/DSS)", "authors": [ "R. Hussain", "M. Salehi", "A. Kovalenko", "Yin Feng", "Omid Semiari" ], "externalIds": { "MAG": "2979087735", "DBLP": "conf/hpcc/HussainSKFS19", "DOI": "10.1109/HPCC/SmartCity/DSS.2019.00134", "CorpusId": 203655112 }, "url": "https://www.semanticscholar.org/paper/ec3898e807512f00bf9715611704f0ac5a4cec0e", "referenceCount": 35, "citationCount": 20, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Oil Spill Identification from Satellite Images Using Deep Neural Networks", "abstract": "Oil spill is considered one of the main threats to marine and coastal environments. Efficient monitoring and early identification of oil slicks are vital for the corresponding authorities to react expediently, confine the environmental pollution and avoid further damage. Synthetic aperture radar (SAR) sensors are commonly used for this objective due to their capability for operating efficiently regardless of the weather and illumination conditions. Black spots probably related to oil spills can be clearly captured by SAR sensors, yet their discrimination from look-alikes poses a challenging objective. A variety of different methods have been proposed to automatically detect and classify these dark spots. Most of them employ custom-made datasets posing results as non-comparable. Moreover, in most cases, a single label is assigned to the entire SAR image resulting in a difficulties when manipulating complex scenarios or extracting further information from the depicted content. To overcome these limitations, semantic segmentation with deep convolutional neural networks (DCNNs) is proposed as an efficient approach. Moreover, a publicly available SAR image dataset is introduced, aiming to consist a benchmark for future oil spill detection methods. The presented dataset is employed to review the performance of well-known DCNN segmentation models in the specific task. DeepLabv3+ presented the best performance, in terms of test set accuracy and related inference time. Furthermore, the complex nature of the specific problem, especially due to the challenging task of discriminating oil spills and look-alikes is discussed and illustrated, utilizing the introduced dataset. Results imply that DCNN segmentation models, trained and evaluated on the provided dataset, can be utilized to implement efficient oil spill detectors. Current work is expected to contribute significantly to the future research activity regarding oil spill identification and SAR image processing.", "year": 2019, "venue": "Remote Sensing", "authors": [ "Marios Krestenitis", "G. Orfanidis", "K. Ioannidis", "Konstantinos Avgerinakis", "S. Vrochidis", "Y. Kompatsiaris" ], "externalIds": { "MAG": "2965881028", "DBLP": "journals/remotesensing/KrestenitisOIAV19", "DOI": "10.3390/RS11151762", "CorpusId": 201328617 }, "url": "https://www.semanticscholar.org/paper/edce51390acd08050bb259b490baff315f9a528b", "referenceCount": 32, "citationCount": 146, "influentialCitationCount": 19, "isOpenAccess": true, "fieldsOfStudy": [ "Environmental Science", "Computer Science" ] }, { "title": "Astraea: Self-Balancing Federated Learning for Improving Classification Accuracy of Mobile Deep Learning Applications", "abstract": "Federated learning (FL) is a distributed deep learning method which enables multiple participants, such as mobile phones and IoT devices, to contribute a neural network model while their private training data remains in local devices. This distributed approach is promising in the edge computing system where have a large corpus of decentralized data and require high privacy. However, unlike the common training dataset, the data distribution of the edge computing system is imbalanced which will introduce biases in the model training and cause a decrease in accuracy of federated learning applications. In this paper, we demonstrate that the imbalanced distributed training data will cause accuracy degradation in FL. To counter this problem, we build a self-balancing federated learning framework call Astraea, which alleviates the imbalances by 1) Global data distribution based data augmentation, and 2) Mediator based multi-client rescheduling. The proposed framework relieves global imbalance by runtime data augmentation, and for averaging the local imbalance, it creates the mediator to reschedule the training of clients based on Kullback-Leibler divergence (KLD) of their data distribution. Compared with FedAvg, the state-of-the-art FL algorithm, Astraea shows +5.59% and +5.89% improvement of top-1 accuracy on the imbalanced EMNIST and imbalanced CINIC-10 datasets, respectively. Meanwhile, the communication traffic of Astraea can be 92% lower than that of FedAvg.", "year": 2019, "venue": "ICCD", "authors": [ "Moming Duan" ], "externalIds": { "ArXiv": "1907.01132", "DBLP": "conf/iccd/DuanLCTRQL19", "MAG": "2954124071", "DOI": "10.1109/ICCD46524.2019.00038", "CorpusId": 195776413 }, "url": "https://www.semanticscholar.org/paper/1aa245db626691d092f263210be486cb6d8023a5", "referenceCount": 34, "citationCount": 175, "influentialCitationCount": 16, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "F-FDN: Federation of Fog Computing Systems for Low Latency Video Streaming", "abstract": "Video streaming is growing in popularity and has become the most bandwidth-consuming Internet service. As such, robust streaming in terms of low latency and uninterrupted streaming experience, particularly for viewers in distant areas, has become a challenge. The common practice to reduce latency is to pre-process multiple versions of each video and use Content Delivery Networks (CDN) to cache videos that are popular in a geographical area. However, with the fast-growing video repository sizes, caching video contents in multiple versions on each CDN is becoming inefficient. Accordingly, in this paper, we propose the architecture for Fog Delivery Networks (FDN) and provide methods to federate them (called F-FDN) to reduce video streaming latency. In addition to caching, FDNs have the ability to process videos in an on-demand manner. F-FDN leverages cached contents on the neighboring FDNs to further reduce latency. In particular, F-FDN is equipped with methods that aim at reducing latency through probabilistically evaluating the cost benefit of fetching video segments either from neighboring FDNs or by processing them. Experimental results against alternative streaming methods show that both on-demand processing and leveraging cached video segments on neighboring FDNs can remarkably reduce streaming latency (on average 52%).", "year": 2019, "venue": "International Conference on Fog and Edge Computing", "authors": [ "Vaughan Veillon", "Chavit Denninnart", "M. Salehi" ], "externalIds": { "MAG": "2944348887", "DBLP": "journals/corr/abs-1905-04459", "ArXiv": "1905.04459", "DOI": "10.1109/CFEC.2019.8733154", "CorpusId": 152282372 }, "url": "https://www.semanticscholar.org/paper/967386a4dfab85c67fd2297824a183d421154d0b", "referenceCount": 27, "citationCount": 24, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Serverless Edge Computing for Green Oil and Gas Industry", "abstract": "Escalating demand of petroleum led the Oil and Gas (O&G) industry to extend oil extraction operation in the remote reservoirs. Oil extraction is a fault intolerant process where the maximum penalty is disaster impacting the environment seriously. Therefore, efficient and nature-friendly green oil extraction is a challenging operation, especially with location constrained in accessing the sites. To overcome these challenges and protect the environment from pollution, smart oil fields with numerous sensors (e.g., for pipeline pressure, gas leakage, air pollution) are established to achieve clean O&G extraction. Conventionally, cloud datacenters are utilized to process the generated data. High-latency satellite communication are used for data transfer, which is not suitable for time-sensitive operations/tasks. To process such latency-sensitive tasks, edge computing can be a suitable candidate, however, their computational power goes downhill at disaster time due to surge demand of many coordinated activities. Therefore, we propose green smart oil fields that operate based on edge computing. To overcome shortage of resources and rapid deployment of the edge computing systems, we propose to use lightweight serverless computing on a federation of edge computing resources from nearby oil rigs. Our solution coordinates urgent coordinated operations/tasks to prevent disasters in oil fields and enable the idea of green smart oil fields. Evaluation results demonstrate the efficacy of our proposed solution in compare to conventional solutions for smart oil fields.", "year": 2019, "venue": "Green Technologies Conference", "authors": [ "R. Hussain", "M. Salehi", "Omid Semiari" ], "externalIds": { "MAG": "2944287943", "ArXiv": "1905.04460", "DBLP": "journals/corr/abs-1905-04460", "DOI": "10.1109/GreenTech.2019.8767119", "CorpusId": 152282909 }, "url": "https://www.semanticscholar.org/paper/e4fb13c194372597c7ffb445b368663cb670cb61", "referenceCount": 25, "citationCount": 19, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Early Identification of Oil Spills in Satellite Images Using Deep CNNs", "abstract": null, "year": 2018, "venue": "Conference on Multimedia Modeling", "authors": [ "Marios Krestenitis", "G. Orfanidis", "K. Ioannidis", "Konstantinos Avgerinakis", "S. Vrochidis", "Y. Kompatsiaris" ], "externalIds": { "MAG": "2905354680", "DBLP": "conf/mmm/KrestenitisOIAV19", "DOI": "10.1007/978-3-030-05710-7_35", "CorpusId": 56553175 }, "url": "https://www.semanticscholar.org/paper/6d13fca70a7a56dbbd2589bb2e1364498eb46f7c", "referenceCount": 19, "citationCount": 24, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Hybrid Approach to Privacy-Preserving Federated Learning", "abstract": null, "year": 2018, "venue": "Informatik-Spektrum", "authors": [ "Stacey Truex", "Nathalie Baracaldo", "Ali Anwar", "T. Steinke", "Heiko Ludwig", "Rui Zhang" ], "externalIds": { "DBLP": "journals/insk/TruexBASLZZ19", "MAG": "2983431304", "ArXiv": "1812.03224", "DOI": "10.1007/s00287-019-01205-x", "CorpusId": 54460482 }, "url": "https://www.semanticscholar.org/paper/67498fdf77fd036a09a4593c37b012d6cf34f3f6", "referenceCount": 65, "citationCount": 744, "influentialCitationCount": 45, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Beyond Inferring Class Representatives: User-Level Privacy Leakage From Federated Learning", "abstract": "Federated learning, i.e., a mobile edge computing framework for deep learning, is a recent advance in privacy-preserving machine learning, where the model is trained in a decentralized manner by the clients, i.e., data curators, preventing the server from directly accessing those private data from the clients. This learning mechanism significantly challenges the attack from the server side. Although the state-of-the-art attacking techniques that incorporated the advance of Generative adversarial networks (GANs) could construct class representatives of the global data distribution among all clients, it is still challenging to distinguishably attack a specific client (i.e., user-level privacy leakage), which is a stronger privacy threat to precisely recover the private data from a specific client. This paper gives the first attempt to explore user-level privacy leakage against the federated learning by the attack from a malicious server. We propose a framework incorporating GAN with a multi-task discriminator, which simultaneously discriminates category, reality, and client identity of input samples. The novel discrimination on client identity enables the generator to recover user specified private data. Unlike existing works that tend to interfere the training process of the federated learning, the proposed method works “invisibly” on the server side. The experimental results demonstrate the effectiveness of the proposed attacking approach and the superior to the state-of-the-art.", "year": 2018, "venue": "IEEE Conference on Computer Communications", "authors": [ "Zhibo Wang", "Mengkai Song", "Zhifei Zhang", "Yang Song", "Qian Wang", "H. Qi" ], "externalIds": { "MAG": "2902229711", "DBLP": "conf/infocom/WangSZSWQ19", "ArXiv": "1812.00535", "DOI": "10.1109/INFOCOM.2019.8737416", "CorpusId": 54436587 }, "url": "https://www.semanticscholar.org/paper/33c3f816bde8ee63ee9f2e60d4387b9390696371", "referenceCount": 27, "citationCount": 692, "influentialCitationCount": 28, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "A Novel Focal Tversky Loss Function With Improved Attention U-Net for Lesion Segmentation", "abstract": "We propose a generalized focal loss function based on the Tversky index to address the issue of data imbalance in medical image segmentation. Compared to the commonly used Dice loss, our loss function achieves a better trade off between precision and recall when training on small structures such as lesions. To evaluate our loss function, we improve the attention U-Net model by incorporating an image pyramid to preserve contextual features. We experiment on the BUS 2017 dataset and ISIC 2018 dataset where lesions occupy 4.84% and 21.4% of the images area and improve segmentation accuracy when compared to the standard U-Net by 25.7% and 3.6%, respectively.", "year": 2018, "venue": "IEEE International Symposium on Biomedical Imaging", "authors": [ "Nabila Abraham", "N. Khan" ], "externalIds": { "DBLP": "journals/corr/abs-1810-07842", "MAG": "2962107985", "ArXiv": "1810.07842", "DOI": "10.1109/ISBI.2019.8759329", "CorpusId": 53016422 }, "url": "https://www.semanticscholar.org/paper/fa56122bf5e9c66cd4418513d0b8d914024e4e9b", "referenceCount": 18, "citationCount": 627, "influentialCitationCount": 78, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Multi-Institutional Deep Learning Modeling Without Sharing Patient Data: A Feasibility Study on Brain Tumor Segmentation", "abstract": null, "year": 2018, "venue": "BrainLes@MICCAI", "authors": [ "Micah J. Sheller", "G. A. Reina", "Brandon Edwards", "Jason Martin", "S. Bakas" ], "externalIds": { "MAG": "2951467122", "ArXiv": "1810.04304", "DBLP": "journals/corr/abs-1810-04304", "DOI": "10.1007/978-3-030-11723-8_9", "CorpusId": 52956484, "PubMed": "31231720" }, "url": "https://www.semanticscholar.org/paper/97943e09b03e3ccd30761835ea5cbbea174de43d", "referenceCount": 23, "citationCount": 414, "influentialCitationCount": 10, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine", "Mathematics" ] }, { "title": "Tversky Loss Function for Image Segmentation Using 3D Fully Convolutional Deep Networks", "abstract": null, "year": 2017, "venue": "MLMI@MICCAI", "authors": [ "S. Salehi", "Deniz Erdoğmuş", "A. Gholipour" ], "externalIds": { "DBLP": "journals/corr/SalehiEG17a", "MAG": "2685298434", "ArXiv": "1706.05721", "DOI": "10.1007/978-3-319-67389-9_44", "CorpusId": 732793 }, "url": "https://www.semanticscholar.org/paper/6bf187cf239e66767688ed7dd88f6a408bf465f0", "referenceCount": 22, "citationCount": 728, "influentialCitationCount": 49, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Communication-Efficient Learning of Deep Networks from Decentralized Data", "abstract": "Modern mobile devices have access to a wealth of data suitable for learning models, which in turn can greatly improve the user experience on the device. For example, language models can improve speech recognition and text entry, and image models can automatically select good photos. However, this rich data is often privacy sensitive, large in quantity, or both, which may preclude logging to the data center and training there using conventional approaches. We advocate an alternative that leaves the training data distributed on the mobile devices, and learns a shared model by aggregating locally-computed updates. We term this decentralized approach Federated Learning. \nWe present a practical method for the federated learning of deep networks based on iterative model averaging, and conduct an extensive empirical evaluation, considering five different model architectures and four datasets. These experiments demonstrate the approach is robust to the unbalanced and non-IID data distributions that are a defining characteristic of this setting. Communication costs are the principal constraint, and we show a reduction in required communication rounds by 10-100x as compared to synchronized stochastic gradient descent.", "year": 2016, "venue": "International Conference on Artificial Intelligence and Statistics", "authors": [ "H. B. McMahan", "Eider Moore", "Daniel Ramage", "S. Hampson", "B. A. Y. Arcas" ], "externalIds": { "MAG": "2950745363", "DBLP": "conf/aistats/McMahanMRHA17", "ArXiv": "1602.05629", "CorpusId": 14955348 }, "url": "https://www.semanticscholar.org/paper/d1dbf643447405984eeef098b1b320dee0b3b8a7", "referenceCount": 50, "citationCount": 13806, "influentialCitationCount": 3346, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Privacy-preserving deep learning", "abstract": "Deep learning based on artificial neural networks is a very popular approach to modeling, classifying, and recognizing complex data such as images, speech, and text. The unprecedented accuracy of deep learning methods has turned them into the foundation of new AI-based services on the Internet. Commercial companies that collect user data on a large scale have been the main beneficiaries of this trend since the success of deep learning techniques is directly proportional to the amount of data available for training. Massive data collection required for deep learning presents obvious privacy issues. Users' personal, highly sensitive data such as photos and voice recordings is kept indefinitely by the companies that collect it. Users can neither delete it, nor restrict the purposes for which it is used. Furthermore, centrally kept data is subject to legal subpoenas and extrajudicial surveillance. Many data owners-for example, medical institutions that may want to apply deep learning methods to clinical records-are prevented by privacy and confidentiality concerns from sharing the data and thus benefitting from large-scale deep learning. In this paper, we present a practical system that enables multiple parties to jointly learn an accurate neural-network model for a given objective without sharing their input datasets. We exploit the fact that the optimization algorithms used in modern deep learning, namely, those based on stochastic gradient descent, can be parallelized and executed asynchronously. Our system lets participants train independently on their own datasets and selectively share small subsets of their models' key parameters during training. This offers an attractive point in the utility/privacy tradeoff space: participants preserve the privacy of their respective data while still benefitting from other participants' models and thus boosting their learning accuracy beyond what is achievable solely on their own inputs. We demonstrate the accuracy of our privacy-preserving deep learning on benchmark datasets.", "year": 2015, "venue": "Allerton Conference on Communication, Control, and Computing", "authors": [ "R. Shokri", "Vitaly Shmatikov" ], "externalIds": { "MAG": "2053637704", "DBLP": "conf/ccs/ShokriS15", "DOI": "10.1145/2810103.2813687", "CorpusId": 20714 }, "url": "https://www.semanticscholar.org/paper/f2f8f7a2ec1b2ede48cbcd189b376ab9fa0735ef", "referenceCount": 61, "citationCount": 2110, "influentialCitationCount": 169, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "U-Net: Convolutional Networks for Biomedical Image Segmentation", "abstract": null, "year": 2015, "venue": "International Conference on Medical Image Computing and Computer-Assisted Intervention", "authors": [ "O. Ronneberger", "P. Fischer", "T. Brox" ], "externalIds": { "ArXiv": "1505.04597", "MAG": "1901129140", "DBLP": "journals/corr/RonnebergerFB15", "DOI": "10.1007/978-3-319-24574-4_28", "CorpusId": 3719281 }, "url": "https://www.semanticscholar.org/paper/6364fdaa0a0eccd823a779fcdd489173f938e91a", "referenceCount": 18, "citationCount": 66494, "influentialCitationCount": 9274, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Public-Key Cryptosystems Based on Composite Degree Residuosity Classes", "abstract": null, "year": 1999, "venue": "International Conference on the Theory and Application of Cryptographic Techniques", "authors": [ "Pascal Paillier" ], "externalIds": { "MAG": "2132172731", "DBLP": "conf/eurocrypt/Paillier99", "DOI": "10.1007/3-540-48910-X_16", "CorpusId": 9483611 }, "url": "https://www.semanticscholar.org/paper/16d9393477302285a5ec57e737e5734f11a4040c", "referenceCount": 34, "citationCount": 7226, "influentialCitationCount": 874, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "PySyft: A Library for Easy Federated Learning", "abstract": null, "year": 2021, "venue": "", "authors": [ "Alexander Ziller", "Andrew Trask", "Antonio Lopardo", "Benjamin Szymkow", "Bobby Wagner", "Emma Bluemke", "Jean-Mickael Nounahon", "Jonathan Passerat-Palmbach", "Kritika Prakash", "Nick Rose", "T. Ryffel", "Zarreen Naowal Reza", "Georgios Kaissis" ], "externalIds": { "MAG": "3172018708", "DOI": "10.1007/978-3-030-70604-3_5", "CorpusId": 236690571 }, "url": "https://www.semanticscholar.org/paper/9c530d2e71f9a66319c924cef583897f01807e8b", "referenceCount": 41, "citationCount": 136, "influentialCitationCount": 13, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Google colaboratory", "abstract": null, "year": null, "venue": "Building Machine Learning and Deep Learning Models on Google Cloud Platform", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Federated Large Language Models: Current Progress and Future Directions": { "paper_title": "Federated Large Language Models: Current Progress and Future Directions", "arxiv_id": "2409.15723v1", "keyword": "federate learning", "authors": [ "Yuhang Yao", "Jianyi Zhang", "Junda Wu", "Chengkai Huang", "Yu Xia", "Tong Yu", "Ruiyi Zhang", "Sungchul Kim", "Ryan Rossi", "Ang Li", "Lina Yao", "Julian McAuley", "Yiran Chen", "Carlee Joe-Wong" ], "references": [ { "title": "MLLM-FL: Multimodal Large Language Model Assisted Federated Learning on Heterogeneous and Long-tailed Data", "abstract": "Previous studies on federated learning (FL) often encounter performance degradation due to data heterogeneity among different clients. In light of the recent advances in multimodal large language models (MLLMs), such as GPT-4v and LLaVA, which demonstrate their exceptional proficiency in multimodal tasks, such as image captioning and multimodal question answering. We introduce a novel federated learning framework, named Multimodal Large Language Model Assisted Federated Learning (MLLM-FL), which which employs powerful MLLMs at the server end to address the heterogeneous and long-tailed challenges. Owing to the advanced cross-modality representation capabilities and the extensive open-vocabulary prior knowledge of MLLMs, our framework is adept at harnessing the extensive, yet previously underexploited, open-source data accessible from websites and powerful server-side computational resources. Hence, the MLLM-FL not only enhances the performance but also avoids increasing the risk of privacy leakage and the computational burden on local devices, distinguishing it from prior methodologies. Our framework has three key stages. Initially, prior to local training on local datasets of clients, we conduct global visual-text pretraining of the model. This pretraining is facilitated by utilizing the extensive open-source data available online, with the assistance of multimodal large language models. Subsequently, the pretrained model is distributed among various clients for local training. Finally, once the locally trained models are transmitted back to the server, a global alignment is carried out under the supervision of MLLMs to further enhance the performance. Experimental evaluations on established benchmarks, show that our framework delivers promising performance in the typical scenarios with data heterogeneity and long-tail distribution across different clients in FL.", "year": 2024, "venue": "", "authors": [ "Jianyi Zhang", "Hao Frank Yang", "Ang Li", "Xin Guo", "Pu Wang", "Haiming Wang", "Yiran Chen", "Hai Li" ], "externalIds": { "ArXiv": "2409.06067", "CorpusId": 272550385 }, "url": "https://www.semanticscholar.org/paper/7b5747f6b6afab4f88aa2307da88aa8c72a6fb82", "referenceCount": 50, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FederatedScope-LLM: A Comprehensive Package for Fine-tuning Large Language Models in Federated Learning", "abstract": null, "year": 2024, "venue": "Knowledge Discovery and Data Mining", "authors": [ "Weirui Kuang", "Bingchen Qian", "Zitao Li", "Daoyuan Chen", "Dawei Gao", "Xuchen Pan", "Yuexiang Xie", "Yaliang Li", "Bolin Ding", "Jingren Zhou" ], "externalIds": { "DBLP": "conf/kdd/KuangQLCGPXLDZ24", "DOI": "10.1145/3637528.3671573", "CorpusId": 271954603 }, "url": "https://www.semanticscholar.org/paper/a887f03fe0f3b36c8c6590a94604320ba2179d3d", "referenceCount": 6, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Do large language models have a legal duty to tell the truth?", "abstract": "Careless speech is a new type of harm created by large language models (LLM) that poses cumulative, long-term risks to science, education and shared social truth in democratic societies. LLMs produce responses that are plausible, helpful and confident, but that contain factual inaccuracies, misleading references and biased information. These subtle mistruths are poised to cumulatively degrade and homogenize knowledge over time. This article examines the existence and feasibility of a legal duty for LLM providers to create models that ‘tell the truth’. We argue that LLM providers should be required to mitigate careless speech and better align their models with truth through open, democratic processes. We define careless speech against ‘ground truth’ in LLMs and related risks including hallucinations, misinformation and disinformation. We assess the existence of truth-related obligations in EU human rights law and the Artificial Intelligence Act, Digital Services Act, Product Liability Directive and Artificial Intelligence Liability Directive. Current frameworks contain limited, sector-specific truth duties. Drawing on duties in science and academia, education, archives and libraries, and a German case in which Google was held liable for defamation caused by autocomplete, we propose a pathway to create a legal truth duty for providers of narrow- and general-purpose LLMs.", "year": 2024, "venue": "Royal Society Open Science", "authors": [ "Sandra Wachter", "Brent Mittelstadt", "Christopher Russell" ], "externalIds": { "PubMedCentral": "11303832", "DOI": "10.1098/rsos.240197", "CorpusId": 269166993, "PubMed": "39113763" }, "url": "https://www.semanticscholar.org/paper/38d6e4c8feefb22a0341876dc7f0e45574af9864", "referenceCount": 128, "citationCount": 6, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Challenges and Considerations in Annotating Legal Data: A Comprehensive Overview", "abstract": "The process of annotating data within the legal sector is filled with distinct challenges that differ from other fields, primarily due to the inherent complexities of legal language and documentation. The initial task usually involves selecting an appropriate raw dataset that captures the intricate aspects of legal texts. Following this, extracting text becomes a complicated task, as legal documents often have complex structures, footnotes, references, and unique terminology. The importance of data cleaning is magnified in this context, ensuring that redundant information is eliminated while maintaining crucial legal details and context. Creating comprehensive yet straightforward annotation guidelines is imperative, as these guidelines serve as the road map for maintaining uniformity and addressing the subtle nuances of legal terminology. Another critical aspect is the involvement of legal professionals in the annotation process. Their expertise is valuable in ensuring that the data not only remains contextually accurate but also adheres to prevailing legal standards and interpretations. This paper provides an expanded view of these challenges and aims to offer a foundational understanding and guidance for researchers and professionals engaged in legal data annotation projects. In addition, we provide links to our created and fine-tuned datasets and language models. These resources are outcomes of our discussed projects and solutions to challenges faced while working on them.", "year": 2024, "venue": "arXiv.org", "authors": [ "Harsh Darji", "Jelena Mitrovi'c", "Michael Granitzer" ], "externalIds": { "DBLP": "journals/corr/abs-2407-17503", "ArXiv": "2407.17503", "DOI": "10.48550/arXiv.2407.17503", "CorpusId": 271432488 }, "url": "https://www.semanticscholar.org/paper/d1dd688a39850542dd76918a7d31c455d7f2280f", "referenceCount": 14, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FedBiOT: LLM Local Fine-tuning in Federated Learning without Full Model", "abstract": "Large language models (LLMs) show amazing performance on many domain-specific tasks after fine-tuning with some appropriate data. However, many domain-specific data are privately distributed across multiple owners. Thus, this dilemma raises the interest in how to perform LLM fine-tuning in federated learning (FL). However, confronted with limited computation and communication capacities, FL clients struggle to fine-tune an LLM effectively. To this end, we introduce FedBiOT, a resource-efficient LLM fine-tuning approach to FL. Specifically, our method involves the server generating a compressed LLM and aligning its performance with the full model. Subsequently, the clients fine-tune a lightweight yet important part of the compressed model, referred to as an adapter. Notice that as the server has no access to the private data owned by the clients, the data used for alignment by the server has a different distribution from the one used for fine-tuning by clients. We formulate the problem into a bi-level optimization problem to minimize the negative effect of data discrepancy and derive the updating rules for the server and clients. We conduct extensive experiments on LLaMA-2, empirically showing that the adapter has exceptional performance when reintegrated into the global LLM. The results also indicate that the proposed FedBiOT significantly reduces resource consumption compared to existing benchmarks, all while achieving comparable performance levels.", "year": 2024, "venue": "Knowledge Discovery and Data Mining", "authors": [ "Feijie Wu", "Zitao Li", "Yaliang Li", "Bolin Ding", "Jing Gao" ], "externalIds": { "DBLP": "journals/corr/abs-2406-17706", "ArXiv": "2406.17706", "DOI": "10.48550/arXiv.2406.17706", "CorpusId": 270711287 }, "url": "https://www.semanticscholar.org/paper/f57edfe71eb044da97b56cf39157002174644352", "referenceCount": 45, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Pre-Training and Personalized Fine-Tuning via Over-the-Air Federated Meta-Learning: Convergence-Generalization Trade-Offs", "abstract": "For modern artificial intelligence (AI) applications such as large language models (LLMs), the training paradigm has recently shifted to pre-training followed by fine-tuning. Furthermore, owing to dwindling open repositories of data and thanks to efforts to democratize access to AI models, pre-training is expected to increasingly migrate from the current centralized deployments to federated learning (FL) implementations. Meta-learning provides a general framework in which pre-training and fine-tuning can be formalized. Meta-learning-based personalized FL (meta-pFL) moves beyond basic personalization by targeting generalization to new agents and tasks. This paper studies the generalization performance of meta-pFL for a wireless setting in which the agents participating in the pre-training phase, i.e., meta-learning, are connected via a shared wireless channel to the server. Adopting over-the-air computing, we study the trade-off between generalization to new agents and tasks, on the one hand, and convergence, on the other hand. The trade-off arises from the fact that channel impairments may enhance generalization, while degrading convergence. Extensive numerical results validate the theory.", "year": 2024, "venue": "arXiv.org", "authors": [ "Haifeng Wen", "Hong Xing", "Osvaldo Simeone" ], "externalIds": { "ArXiv": "2406.11569", "DBLP": "journals/corr/abs-2406-11569", "DOI": "10.48550/arXiv.2406.11569", "CorpusId": 270560825 }, "url": "https://www.semanticscholar.org/paper/66c351673ce839bcb5c6ee352e45d75e7adbf498", "referenceCount": 48, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "Federated Learning driven Large Language Models for Swarm Intelligence: A Survey", "abstract": "Federated learning (FL) offers a compelling framework for training large language models (LLMs) while addressing data privacy and decentralization challenges. This paper surveys recent advancements in the federated learning of large language models, with a particular focus on machine unlearning, a crucial aspect for complying with privacy regulations like the Right to be Forgotten. Machine unlearning in the context of federated LLMs involves systematically and securely removing individual data contributions from the learned model without retraining from scratch. We explore various strategies that enable effective unlearning, such as perturbation techniques, model decomposition, and incremental learning, highlighting their implications for maintaining model performance and data privacy. Furthermore, we examine case studies and experimental results from recent literature to assess the effectiveness and efficiency of these approaches in real-world scenarios. Our survey reveals a growing interest in developing more robust and scalable federated unlearning methods, suggesting a vital area for future research in the intersection of AI ethics and distributed machine learning technologies.", "year": 2024, "venue": "arXiv.org", "authors": [ "Youyang Qu" ], "externalIds": { "DBLP": "journals/corr/abs-2406-09831", "ArXiv": "2406.09831", "DOI": "10.48550/arXiv.2406.09831", "CorpusId": 270521408 }, "url": "https://www.semanticscholar.org/paper/80374c344049e9b67006bd223ceabf9feb4ff91d", "referenceCount": 45, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FedLLM-Bench: Realistic Benchmarks for Federated Learning of Large Language Models", "abstract": "Federated learning has enabled multiple parties to collaboratively train large language models without directly sharing their data (FedLLM). Following this training paradigm, the community has put massive efforts from diverse aspects including framework, performance, and privacy. However, an unpleasant fact is that there are currently no realistic datasets and benchmarks for FedLLM and previous works all rely on artificially constructed datasets, failing to capture properties in real-world scenarios. Addressing this, we propose FedLLM-Bench, which involves 8 training methods, 4 training datasets, and 6 evaluation metrics, to offer a comprehensive testbed for the FedLLM community. FedLLM-Bench encompasses three datasets (e.g., user-annotated multilingual dataset) for federated instruction tuning and one dataset (e.g., user-annotated preference dataset) for federated preference alignment, whose scale of client number ranges from 38 to 747. Our datasets incorporate several representative diversities: language, quality, quantity, instruction, length, embedding, and preference, capturing properties in real-world scenarios. Based on FedLLM-Bench, we conduct experiments on all datasets to benchmark existing FL methods and provide empirical insights (e.g., multilingual collaboration). We believe that our FedLLM-Bench can benefit the FedLLM community by reducing required efforts, providing a practical testbed, and promoting fair comparisons. Code and datasets are available at https://github.com/rui-ye/FedLLM-Bench.", "year": 2024, "venue": "arXiv.org", "authors": [ "Rui Ye", "Rui Ge", "Xinyu Zhu", "Jingyi Chai", "Yaxin Du", "Yang Liu", "Yanfeng Wang", "Siheng Chen" ], "externalIds": { "ArXiv": "2406.04845", "DBLP": "journals/corr/abs-2406-04845", "DOI": "10.48550/arXiv.2406.04845", "CorpusId": 270357469 }, "url": "https://www.semanticscholar.org/paper/22f084c55f5897706bc6d82c3784ebeb84433c34", "referenceCount": 79, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The Future of Large Language Model Pre-training is Federated", "abstract": "Generative pre-trained large language models (LLMs) have demonstrated impressive performance over a wide range of tasks, thanks to the unprecedented amount of data they have been trained on. As established scaling laws indicate, LLMs' future performance improvement depends on the amount of computing and data sources they can leverage for pre-training. Federated learning (FL) has the potential to unleash the majority of the planet's data and computational resources, which are underutilized by the data-center-focused training methodology of current LLM practice. Our work presents a robust, flexible, reproducible FL approach that enables large-scale collaboration across institutions to train LLMs. We propose a scalable deployment system called Photon to enable the investigation and development of this new training paradigm for LLM pre-training. We show that Photon can be used by organizations interested in collaborating with their private data sources and computational resources for pre-training LLMs with billions of parameters. This paradigm would mobilize more computational and data resources while matching or potentially exceeding centralized performance. We further show the effectiveness of the federated training scales with model size and present our approach for training a billion-scale federated LLM using limited resources. Finally, we show that LLM training is highly resilient to the classical challenges of federated statistical and hardware heterogeneity. Furthermore, we show that convergence is robust to partial participation, opening the avenue for compute-efficient collaborative training. Photon will help data-rich actors to become the protagonists of LLMs pre-training instead of leaving the stage to compute-rich actors alone.", "year": 2024, "venue": "arXiv.org", "authors": [ "Lorenzo Sani", "Alexandru Iacob", "Zeyu Cao", "Bill Marino", "Yan Gao", "Tomas Paulik", "Wanru Zhao", "William F. Shen", "Preslav Aleksandrov", "Xinchi Qiu", "N. D. Lane" ], "externalIds": { "ArXiv": "2405.10853", "DBLP": "journals/corr/abs-2405-10853", "DOI": "10.48550/arXiv.2405.10853", "CorpusId": 269899649 }, "url": "https://www.semanticscholar.org/paper/401266f09d2ddc0c41f091bf6aec0814d6ea85ac", "referenceCount": 98, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Prompt Public Large Language Models to Synthesize Data for Private On-device Applications", "abstract": "Pre-training on public data is an effective method to improve the performance for federated learning (FL) with differential privacy (DP). This paper investigates how large language models (LLMs) trained on public data can improve the quality of pre-training data for the on-device language models trained with DP and FL. We carefully design LLM prompts to filter and transform existing public data, and generate new data to resemble the real user data distribution. The model pre-trained on our synthetic dataset achieves relative improvement of 19.0% and 22.8% in next word prediction accuracy compared to the baseline model pre-trained on a standard public dataset, when evaluated over the real user data in Gboard (Google Keyboard, a production mobile keyboard application). Furthermore, our method achieves evaluation accuracy better than or comparable to the baseline during the DP FL fine-tuning over millions of mobile devices, and our final model outperforms the baseline in production A/B testing. Our experiments demonstrate the strengths of LLMs in synthesizing data close to the private distribution even without accessing the private data, and also suggest future research directions to further reduce the distribution gap.", "year": 2024, "venue": "arXiv.org", "authors": [ "Shanshan Wu", "Zheng Xu", "Yanxiang Zhang", "Yuanbo Zhang", "Daniel Ramage" ], "externalIds": { "DBLP": "journals/corr/abs-2404-04360", "ArXiv": "2404.04360", "DOI": "10.48550/arXiv.2404.04360", "CorpusId": 269005897 }, "url": "https://www.semanticscholar.org/paper/6a28f9390a2fa99e593b2b3efeccce4abec52631", "referenceCount": 56, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Improving LoRA in Privacy-preserving Federated Learning", "abstract": "Low-rank adaptation (LoRA) is one of the most popular task-specific parameter-efficient fine-tuning (PEFT) methods on pre-trained language models for its good performance and computational efficiency. LoRA injects a product of two trainable rank decomposition matrices over the top of each frozen pre-trained model module. However, when applied in the setting of privacy-preserving federated learning (FL), LoRA may become unstable due to the following facts: 1) the effects of data heterogeneity and multi-step local updates are non-negligible, 2) additive noise enforced on updating gradients to guarantee differential privacy (DP) can be amplified and 3) the final performance is susceptible to hyper-parameters. A key factor leading to these phenomena is the discordance between jointly optimizing the two low-rank matrices by local clients and separately aggregating them by the central server. Thus, this paper proposes an efficient and effective version of LoRA, Federated Freeze A LoRA (FFA-LoRA), to alleviate these challenges and further halve the communication cost of federated fine-tuning LLMs. The core idea of FFA-LoRA is to fix the randomly initialized non-zero matrices and only fine-tune the zero-initialized matrices. Compared to LoRA, FFA-LoRA is motivated by practical and theoretical benefits in privacy-preserved FL. Our experiments demonstrate that FFA-LoRA provides more consistent performance with better computational efficiency over vanilla LoRA in various FL tasks.", "year": 2024, "venue": "International Conference on Learning Representations", "authors": [ "Youbang Sun", "Zitao Li", "Yaliang Li", "Bolin Ding" ], "externalIds": { "DBLP": "journals/corr/abs-2403-12313", "ArXiv": "2403.12313", "DOI": "10.48550/arXiv.2403.12313", "CorpusId": 268531521 }, "url": "https://www.semanticscholar.org/paper/49e0456e4a21b343ff0f4b6a2dc762a8daad4afe", "referenceCount": 49, "citationCount": 20, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FedBRB: An Effective Solution to the Small-to-Large Scenario in Device-Heterogeneity Federated Learning", "abstract": "Recently, the success of large models has demonstrated the importance of scaling up model size. This has spurred interest in exploring collaborative training of large-scale models from federated learning perspective. Due to computational constraints, many institutions struggle to train a large-scale model locally. Thus, training a larger global model using only smaller local models has become an important scenario (i.e., the \\textbf{small-to-large scenario}). Although recent device-heterogeneity federated learning approaches have started to explore this area, they face limitations in fully covering the parameter space of the global model. In this paper, we propose a method called \\textbf{FedBRB} (\\underline{B}lock-wise \\underline{R}olling and weighted \\underline{B}roadcast) based on the block concept. FedBRB can uses small local models to train all blocks of the large global model, and broadcasts the trained parameters to the entire space for faster information interaction. Experiments demonstrate FedBRB yields substantial performance gains, achieving state-of-the-art results in this scenario. Moreover, FedBRB using only minimal local models can even surpass baselines using larger local models.", "year": 2024, "venue": "arXiv.org", "authors": [ "Ziyue Xu", "Ming Xu", "Tianchi Liao", "Zibin Zheng", "Chuan Chen" ], "externalIds": { "DBLP": "journals/corr/abs-2402-17202", "ArXiv": "2402.17202", "DOI": "10.48550/arXiv.2402.17202", "CorpusId": 268032508 }, "url": "https://www.semanticscholar.org/paper/af2904881d40fe23576a14bd9149fa216bc3e80c", "referenceCount": 34, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FeB4RAG: Evaluating Federated Search in the Context of Retrieval Augmented Generation", "abstract": "Federated search systems aggregate results from multiple search engines, selecting appropriate sources to enhance result quality and align with user intent. With the increasing uptake of Retrieval-Augmented Generation (RAG) pipelines, federated search can play a pivotal role in sourcing relevant information across heterogeneous data sources to generate informed responses. However, existing datasets, such as those developed in the past TREC FedWeb tracks, predate the RAG paradigm shift and lack representation of modern information retrieval challenges. To bridge this gap, we present FeB4RAG, a novel dataset specifically designed for federated search within RAG frameworks. This dataset, derived from 16 sub-collections of the widely used \\beir benchmarking collection, includes 790 information requests (akin to conversational queries) tailored for chatbot applications, along with top results returned by each resource and associated LLM-derived relevance judgements. Additionally, to support the need for this collection, we demonstrate the impact on response generation of a high quality federated search system for RAG compared to a naive approach to federated search. We do so by comparing answers generated through the RAG pipeline through a qualitative side-by-side comparison. Our collection fosters and supports the development and evaluation of new federated search methods, especially in the context of RAG pipelines.", "year": 2024, "venue": "Annual International ACM SIGIR Conference on Research and Development in Information Retrieval", "authors": [ "Shuai Wang", "Ekaterina Khramtsova", "Shengyao Zhuang", "G. Zuccon" ], "externalIds": { "DBLP": "conf/sigir/WangKZZ24", "ArXiv": "2402.11891", "DOI": "10.48550/arXiv.2402.11891", "CorpusId": 267750506 }, "url": "https://www.semanticscholar.org/paper/32b9f9e2654b7280671a2be355cbd04c1b7cc2ea", "referenceCount": 50, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Federated Fine-tuning of Large Language Models under Heterogeneous Language Tasks and Client Resources", "abstract": "Federated Learning (FL) has recently been applied to the parameter-efficient fine-tuning of Large Language Models (LLMs). While promising, it raises significant challenges due to the heterogeneous resources and data distributions of clients. This study introduces FlexLoRA, a simple yet effective aggregation scheme for LLM fine-tuning, which mitigates the ``bucket effect'' in traditional FL that restricts the potential of clients with ample resources by tying them to the capabilities of the least-resourced participants. FlexLoRA allows for dynamic adjustment of local LoRA ranks, fostering the development of a global model imbued with broader, less task-specific knowledge. By synthesizing a full-size LoRA weight from individual client contributions and employing Singular Value Decomposition (SVD) for weight redistribution, FlexLoRA fully leverages heterogeneous client resources. Involving thousands of clients performing heterogeneous NLP tasks and client resources, our experiments validate the efficacy of FlexLoRA, with the federated global model achieving consistently better improvement over SOTA FL methods in downstream NLP task performance across various heterogeneous distributions. FlexLoRA's practicality is further underscored by our theoretical analysis and its seamless integration with existing LoRA-based FL methods, offering a path toward cross-device, privacy-preserving federated tuning for LLMs.", "year": 2024, "venue": "arXiv.org", "authors": [ "Jiamu Bai", "Daoyuan Chen", "Bingchen Qian", "Liuyi Yao", "Yaliang Li" ], "externalIds": { "DBLP": "journals/corr/abs-2402-11505", "ArXiv": "2402.11505", "DOI": "10.48550/arXiv.2402.11505", "CorpusId": 267750117 }, "url": "https://www.semanticscholar.org/paper/8bc40bd3a2268b61cb1d79ad4ea468d4d23d6ebd", "referenceCount": 45, "citationCount": 11, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "LLM-based Federated Recommendation", "abstract": "Large Language Models (LLMs), with their advanced contextual understanding abilities, have demonstrated considerable potential in enhancing recommendation systems via fine-tuning methods. However, fine-tuning requires users' behavior data, which poses considerable privacy risks due to the incorporation of sensitive user information. The unintended disclosure of such data could infringe upon data protection laws and give rise to ethical issues. To mitigate these privacy issues, Federated Learning for Recommendation (Fed4Rec) has emerged as a promising approach. Nevertheless, applying Fed4Rec to LLM-based recommendation presents two main challenges: first, an increase in the imbalance of performance across clients, affecting the system's efficiency over time, and second, a high demand on clients' computational and storage resources for local training and inference of LLMs. To address these challenges, we introduce a Privacy-Preserving LLM-based Recommendation (PPLR) framework. The PPLR framework employs two primary strategies. First, it implements a dynamic balance strategy, which involves the design of dynamic parameter aggregation and adjustment of learning speed for different clients during the training phase, to ensure relatively balanced performance across all clients. Second, PPLR adopts a flexible storage strategy, selectively retaining certain sensitive layers of the language model on the client side while offloading non-sensitive layers to the server. This approach aims to preserve user privacy while efficiently saving computational and storage resources. Experimental results demonstrate that PPLR not only achieves a balanced performance among clients but also enhances overall system performance in a manner that is both computationally and storage-efficient, while effectively protecting user privacy.", "year": 2024, "venue": "arXiv.org", "authors": [ "Jujia Zhao", "Wenjie Wang", "Chen Xu", "Zhaochun Ren", "See-kiong Ng", "Tat-Seng Chua" ], "externalIds": { "DBLP": "journals/corr/abs-2402-09959", "ArXiv": "2402.09959", "DOI": "10.48550/arXiv.2402.09959", "CorpusId": 267682268 }, "url": "https://www.semanticscholar.org/paper/92c361d02caa966769b6888d4eff1080929e9dbb", "referenceCount": 43, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Federated Prompt-based Decision Transformer for Customized VR Services in Mobile Edge Computing System", "abstract": "This paper investigates resource allocation to provide heterogeneous users with customized virtual reality (VR) services in a mobile edge computing (MEC) system. We first introduce a quality of experience (QoE) metric to measure user experience, which considers the MEC system's latency, user attention levels, and preferred resolutions. Then, a QoE maximization problem is formulated for resource allocation to ensure the highest possible user experience,which is cast as a reinforcement learning problem, aiming to learn a generalized policy applicable across diverse user environments for all MEC servers. To learn the generalized policy, we propose a framework that employs federated learning (FL) and prompt-based sequence modeling to pre-train a common decision model across MEC servers, which is named FedPromptDT. Using FL solves the problem of insufficient local MEC data while protecting user privacy during offline training. The design of prompts integrating user-environment cues and user-preferred allocation improves the model's adaptability to various user environments during online execution.", "year": 2024, "venue": "arXiv.org", "authors": [ "Tailin Zhou", "Jiadong Yu", "Jun Zhang", "Danny H. K. Tsang" ], "externalIds": { "DBLP": "journals/corr/abs-2402-09729", "ArXiv": "2402.09729", "DOI": "10.48550/arXiv.2402.09729", "CorpusId": 267681828 }, "url": "https://www.semanticscholar.org/paper/4218430db82ba6ef44996e4016bebd8e4678c90d", "referenceCount": 40, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Prompt-Based Personalized Federated Learning for Medical Visual Question Answering", "abstract": "We present a novel prompt-based personalized federated learning (pFL) method to address data heterogeneity and privacy concerns in traditional medical visual question answering (VQA) methods. Specifically, we regard medical datasets from different organs as clients and use pFL to train personalized transformer-based VQA models for each client. To address the high computational complexity of client-to-client communication in previous pFL methods, we propose a succinct information sharing system by introducing prompts that are small learnable parameters. In addition, the proposed method introduces a reliability parameter to prevent the negative effects of low performance and irrelevant clients. Finally, extensive evaluations on various heterogeneous medical datasets attest to the effectiveness of our proposed method.", "year": 2024, "venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", "authors": [ "He Zhu", "Ren Togo", "Takahiro Ogawa", "M. Haseyama" ], "externalIds": { "ArXiv": "2402.09677", "DBLP": "journals/corr/abs-2402-09677", "DOI": "10.1109/ICASSP48485.2024.10445933", "CorpusId": 267682035 }, "url": "https://www.semanticscholar.org/paper/bc431ad78e91a73a69580b7d256d18777dcda313", "referenceCount": 27, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "OpenFedLLM: Training Large Language Models on Decentralized Private Data via Federated Learning", "abstract": "Trained on massive publicly available data, large language models (LLMs) have demonstrated tremendous success across various fields. While more data contributes to better performance, a disconcerting reality is that high-quality public data will be exhausted in a few years. In this paper, we offer a potential next step for contemporary LLMs: collaborative and privacy-preserving LLM training on the underutilized distributed private data via federated learning (FL), where multiple data owners collaboratively train a shared model without transmitting raw data. To achieve this, we build a concise, integrated, and research-friendly framework/codebase, named OpenFedLLM. It covers federated instruction tuning for enhancing instruction-following capability, federated value alignment for aligning with human values, and 7 representative FL algorithms. Besides, OpenFedLLM supports training on diverse domains, where we cover 8 training datasets; and provides comprehensive evaluations, where we cover 30+ evaluation metrics. Through extensive experiments, we observe that all FL algorithms outperform local training on training LLMs, demonstrating a clear performance improvement across a variety of settings. Notably, in a financial benchmark, Llama2-7B fine-tuned by applying any FL algorithm can outperform GPT-4 by a significant margin while the model obtained through individual training cannot, demonstrating strong motivation for clients to participate in FL. The code is available at https://github.com/rui-ye/OpenFedLLM.", "year": 2024, "venue": "Knowledge Discovery and Data Mining", "authors": [ "Rui Ye", "Wenhao Wang", "Jingyi Chai", "Dihan Li", "Zexi Li", "Yinda Xu", "Yaxin Du", "Yanfeng Wang", "Siheng Chen" ], "externalIds": { "DBLP": "journals/corr/abs-2402-06954", "ArXiv": "2402.06954", "DOI": "10.48550/arXiv.2402.06954", "CorpusId": 267627968 }, "url": "https://www.semanticscholar.org/paper/7ae48b24cbf955bf9b9498fb287bf4c5cd3b73d4", "referenceCount": 135, "citationCount": 23, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "On the Convergence of Zeroth-Order Federated Tuning for Large Language Models", "abstract": "The confluence of Federated Learning (FL) and Large Language Models (LLMs) is ushering in a new era in privacy-preserving natural language processing. However, the intensive memory requirements for fine-tuning LLMs pose significant challenges, especially when deploying on clients with limited computational resources. To circumvent this, we explore the novel integration of Memory-efficient Zeroth-Order Optimization within a federated setting, a synergy we term as FedMeZO. Our study is the first to examine the theoretical underpinnings of FedMeZO in the context of LLMs, tackling key questions regarding the influence of large parameter spaces on optimization behavior, the establishment of convergence properties, and the identification of critical parameters for convergence to inform personalized federated strategies. Our extensive empirical evidence supports the theory, showing that FedMeZO not only converges faster than traditional first-order methods such as FedAvg but also significantly reduces GPU memory usage during training to levels comparable to those during inference. Moreover, the proposed personalized FL strategy that is built upon the theoretical insights to customize the client-wise learning rate can effectively accelerate loss reduction. We hope our work can help to bridge theoretical and practical aspects of federated fine-tuning for LLMs, thereby stimulating further advancements and research in this area.", "year": 2024, "venue": "Knowledge Discovery and Data Mining", "authors": [ "Zhenqing Ling", "Daoyuan Chen", "Liuyi Yao", "Yaliang Li", "Ying Shen" ], "externalIds": { "ArXiv": "2402.05926", "DBLP": "journals/corr/abs-2402-05926", "DOI": "10.48550/arXiv.2402.05926", "CorpusId": 267547840 }, "url": "https://www.semanticscholar.org/paper/0255001544a130e64802b03ba03d4fdd0cd34dbb", "referenceCount": 60, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Prompt-enhanced Federated Content Representation Learning for Cross-domain Recommendation", "abstract": "Cross-domain Recommendation (CDR) as one of the effective techniques in alleviating the data sparsity issues has been widely studied in recent years. However, previous works may cause domain privacy leakage since they necessitate the aggregation of diverse domain data into a centralized server during the training process. Though several studies have conducted privacy preserving CDR via Federated Learning (FL), they still have the following limitations: 1) They need to upload users' personal information to the central server, posing the risk of leaking user privacy. 2) Existing federated methods mainly rely on atomic item IDs to represent items, which prevents them from modeling items in a unified feature space, increasing the challenge of knowledge transfer among domains. 3) They are all based on the premise of knowing overlapped users between domains, which proves impractical in real-world applications. To address the above limitations, we focus on Privacy-preserving Cross-domain Recommendation (PCDR) and propose PFCR as our solution. For Limitation 1, we develop a FL schema by exclusively utilizing users' interactions with local clients and devising an encryption method for gradient encryption. For Limitation 2, we model items in a universal feature space by their description texts. For Limitation 3, we initially learn federated content representations, harnessing the generality of natural language to establish bridges between domains. Subsequently, we craft two prompt fine-tuning strategies to tailor the pre-trained model to the target domain. Extensive experiments on two real-world datasets demonstrate the superiority of our PFCR method compared to the SOTA approaches.", "year": 2024, "venue": "The Web Conference", "authors": [ "Lei Guo", "Ziang Lu", "Junliang Yu", "Q. Nguyen", "Hongzhi Yin" ], "externalIds": { "DBLP": "journals/corr/abs-2401-14678", "ArXiv": "2401.14678", "DOI": "10.1145/3589334.3645337", "CorpusId": 267301062 }, "url": "https://www.semanticscholar.org/paper/a671214b2a24f71423aea0ef8ed46682718fa77e", "referenceCount": 57, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Vulnerabilities of Foundation Model Integrated Federated Learning Under Adversarial Threats", "abstract": "Federated Learning (FL) addresses critical issues in machine learning related to data privacy and security, yet suffering from data insufficiency and imbalance under certain circumstances. The emergence of foundation models (FMs) offers potential solutions to the limitations of existing FL frameworks, e.g., by generating synthetic data for model initialization. However, due to the inherent safety concerns of FMs, integrating FMs into FL could introduce new risks, which remains largely unexplored. To address this gap, we conduct the first investigation on the vulnerability of FM integrated FL (FM-FL) under adversarial threats. Based on a unified framework of FM-FL, we introduce a novel attack strategy that exploits safety issues of FM to compromise FL client models. Through extensive experiments with well-known models and benchmark datasets in both image and text domains, we reveal the high susceptibility of the FM-FL to this new threat under various FL configurations. Furthermore, we find that existing FL defense strategies offer limited protection against this novel attack approach. This research highlights the critical need for enhanced security measures in FL in the era of FMs.", "year": 2024, "venue": "arXiv.org", "authors": [ "Chen Wu", "Xi Li", "Jiaqi Wang" ], "externalIds": { "DBLP": "journals/corr/abs-2401-10375", "ArXiv": "2401.10375", "DOI": "10.48550/arXiv.2401.10375", "CorpusId": 267061160 }, "url": "https://www.semanticscholar.org/paper/9268a445f779f908b768d955100e0ba1e172d889", "referenceCount": 48, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Survey of Resource-efficient LLM and Multimodal Foundation Models", "abstract": "Large foundation models, including large language models (LLMs), vision transformers (ViTs), diffusion, and LLM-based multimodal models, are revolutionizing the entire machine learning lifecycle, from training to deployment. However, the substantial advancements in versatility and performance these models offer come at a significant cost in terms of hardware resources. To support the growth of these large models in a scalable and environmentally sustainable way, there has been a considerable focus on developing resource-efficient strategies. This survey delves into the critical importance of such research, examining both algorithmic and systemic aspects. It offers a comprehensive analysis and valuable insights gleaned from existing literature, encompassing a broad array of topics from cutting-edge model architectures and training/serving algorithms to practical system designs and implementations. The goal of this survey is to provide an overarching understanding of how current approaches are tackling the resource challenges posed by large foundation models and to potentially inspire future breakthroughs in this field.", "year": 2024, "venue": "arXiv.org", "authors": [ "Mengwei Xu", "Wangsong Yin", "Dongqi Cai", "Rongjie Yi", "Daliang Xu", "Qipeng Wang", "Bingyang Wu", "Yihao Zhao", "Chen Yang", "Shihe Wang", "Qiyang Zhang", "Zhenyan Lu", "Li Zhang", "Shangguang Wang", "Yuanchun Li", "Yunxin Liu", "Xin Jin", "Xuanzhe Liu" ], "externalIds": { "ArXiv": "2401.08092", "DBLP": "journals/corr/abs-2401-08092", "DOI": "10.48550/arXiv.2401.08092", "CorpusId": 267027735 }, "url": "https://www.semanticscholar.org/paper/8ac21a1545a907fc64b54cde36bf41415608cd7d", "referenceCount": 0, "citationCount": 38, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Survey on Efficient Federated Learning Methods for Foundation Model Training", "abstract": "Federated Learning (FL) has become an established technique to facilitate privacy-preserving collaborative training across a multitude of clients. However, new approaches to FL often discuss their contributions involving small deep-learning models only and focus on training full models on clients. In the wake of Foundation Models (FM), the reality is different for many deep learning applications. Typically, FMs have already been pre-trained across a wide variety of tasks and can be fine-tuned to specific downstream tasks over significantly smaller datasets than required for full model training. However, access to such datasets is often challenging. By its design, FL can help to open data silos. With this survey, we introduce a novel taxonomy focused on computational and communication efficiency, the vital elements to make use of FMs in FL systems. We discuss the benefits and drawbacks of parameter-efficient fine-tuning (PEFT) for FL applications, elaborate on the readiness of FL frameworks to work with FMs and provide future research opportunities on how to evaluate generative models in FL as well as the interplay of privacy and PEFT.", "year": 2024, "venue": "Proceedings of the Thirty-ThirdInternational Joint Conference on Artificial Intelligence", "authors": [ "Herbert Woisetschläger", "Alexander Isenko", "Shiqiang Wang", "R. Mayer", "Hans-Arno Jacobsen" ], "externalIds": { "ArXiv": "2401.04472", "DBLP": "journals/corr/abs-2401-04472", "DOI": "10.24963/ijcai.2024/919", "CorpusId": 266899949 }, "url": "https://www.semanticscholar.org/paper/88a30d7676108ecafcd8a85c2c60b3d5d1fbde50", "referenceCount": 79, "citationCount": 8, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Federated Full-Parameter Tuning of Billion-Sized Language Models with Communication Cost under 18 Kilobytes", "abstract": "Pre-trained large language models (LLMs) need fine-tuning to improve their responsiveness to natural language instructions. Federated learning offers a way to fine-tune LLMs using the abundant data on end devices without compromising data privacy. Most existing federated fine-tuning methods for LLMs rely on parameter-efficient fine-tuning techniques, which may not reach the performance height possible with full-parameter tuning. However, federated full-parameter tuning of LLMs is a non-trivial problem due to the immense communication cost. This work introduces FedKSeed that employs zeroth-order optimization with a finite set of random seeds. It significantly reduces transmission requirements between the server and clients to just a few random seeds and scalar gradients, amounting to only a few thousand bytes, making federated full-parameter tuning of billion-sized LLMs possible on devices. Building on it, we develop a strategy enabling probability-differentiated seed sampling, prioritizing perturbations with greater impact on model accuracy. Experiments across six scenarios with various LLMs, datasets and data partitions demonstrate that our approach outperforms existing federated LLM fine-tuning methods in both communication efficiency and new task generalization.", "year": 2023, "venue": "International Conference on Machine Learning", "authors": [ "Zhen Qin", "Daoyuan Chen", "Bingchen Qian", "Bolin Ding", "Yaliang Li", "Shuiguang Deng" ], "externalIds": { "DBLP": "conf/icml/QinCQDLD24", "ArXiv": "2312.06353", "DOI": "10.48550/arXiv.2312.06353", "CorpusId": 266162335 }, "url": "https://www.semanticscholar.org/paper/06eae596ea3e996453039f6a2cc68732cbba884b", "referenceCount": 65, "citationCount": 14, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Mutual Enhancement of Large and Small Language Models with Cross-Silo Knowledge Transfer", "abstract": "While large language models (LLMs) are empowered with broad knowledge, their task-specific performance is often suboptimal. It necessitates fine-tuning LLMs with task-specific data, but such data may be inaccessible due to privacy concerns. In this paper, we propose a novel approach to enhance LLMs with smaller language models (SLMs) that are trained on clients using their private task-specific data. To enable mutual enhancement between LLMs and SLMs, we propose CrossLM, where the SLMs promote the LLM to generate task-specific high-quality data, and both the LLM and SLMs are enhanced with the generated data. We evaluate CrossLM using publicly accessible language models across a range of benchmark tasks. The results demonstrate that CrossLM significantly enhances the task-specific performance of SLMs on clients and the LLM on the cloud server simultaneously while preserving the LLM's generalization capability.", "year": 2023, "venue": "arXiv.org", "authors": [ "Yongheng Deng", "Ziqing Qiao", "Ju Ren", "Yang Liu", "Yaoxue Zhang" ], "externalIds": { "DBLP": "journals/corr/abs-2312-05842", "ArXiv": "2312.05842", "DOI": "10.48550/arXiv.2312.05842", "CorpusId": 266162490 }, "url": "https://www.semanticscholar.org/paper/95d65e13b36006113ef3ba93200783baef07d2c3", "referenceCount": 38, "citationCount": 7, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Unveiling Backdoor Risks Brought by Foundation Models in Heterogeneous Federated Learning", "abstract": "The foundation models (FMs) have been used to generate synthetic public datasets for the heterogeneous federated learning (HFL) problem where each client uses a unique model architecture. However, the vulnerabilities of integrating FMs, especially against backdoor attacks, are not well-explored in the HFL contexts. In this paper, we introduce a novel backdoor attack mechanism for HFL that circumvents the need for client compromise or ongoing participation in the FL process. This method plants and transfers the backdoor through a generated synthetic public dataset, which could help evade existing backdoor defenses in FL by presenting normal client behaviors. Empirical experiments across different HFL configurations and benchmark datasets demonstrate the effectiveness of our attack compared to traditional client-based attacks. Our findings reveal significant security risks in developing robust FM-assisted HFL systems. This research contributes to enhancing the safety and integrity of FL systems, highlighting the need for advanced security measures in the era of FMs.", "year": 2023, "venue": "Pacific-Asia Conference on Knowledge Discovery and Data Mining", "authors": [ "Xi Li", "Chen Wu", "Jiaqi Wang" ], "externalIds": { "ArXiv": "2311.18350", "DBLP": "conf/pakdd/LiWW24", "DOI": "10.48550/arXiv.2311.18350", "CorpusId": 265506382 }, "url": "https://www.semanticscholar.org/paper/9bf5c27696ecd7b96b8d1b6e62a7aee9bf485937", "referenceCount": 45, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Grounding Foundation Models through Federated Transfer Learning: A General Framework", "abstract": "Foundation Models (FMs) such as GPT-4 encoded with vast knowledge and powerful emergent abilities have achieved remarkable success in various natural language processing and computer vision tasks. Grounding FMs by adapting them to domain-specific tasks or augmenting them with domain-specific knowledge enables us to exploit the full potential of FMs. However, grounding FMs faces several challenges, stemming primarily from constrained computing resources, data privacy, model heterogeneity, and model ownership. Federated Transfer Learning (FTL), the combination of federated learning and transfer learning, provides promising solutions to address these challenges. In recent years, the need for grounding FMs leveraging FTL, coined FTL-FM, has arisen strongly in both academia and industry. Motivated by the strong growth in FTL-FM research and the potential impact of FTL-FM on industrial applications, we propose an FTL-FM framework that formulates problems of grounding FMs in the federated learning setting, construct a detailed taxonomy based on the FTL-FM framework to categorize state-of-the-art FTL-FM works, and comprehensively overview FTL-FM works based on the proposed taxonomy. We also establish correspondences between FTL-FM and conventional phases of adapting FM so that FM practitioners can align their research works with FTL-FM. In addition, we overview advanced efficiency-improving and privacy-preserving techniques because efficiency and privacy are critical concerns in FTL-FM. Last, we discuss opportunities and future research directions of FTL-FM.", "year": 2023, "venue": "arXiv.org", "authors": [ "Yan Kang", "Tao Fan", "Hanlin Gu", "Lixin Fan", "Qiang Yang" ], "externalIds": { "ArXiv": "2311.17431", "DBLP": "journals/corr/abs-2311-17431", "DOI": "10.48550/arXiv.2311.17431", "CorpusId": 265498954 }, "url": "https://www.semanticscholar.org/paper/f416c5574c9741e14f0fd728112178bcda382c21", "referenceCount": 171, "citationCount": 12, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FedPEAT: Convergence of Federated Learning, Parameter-Efficient Fine Tuning, and Emulator Assisted Tuning for Artificial Intelligence Foundation Models with Mobile Edge Computing", "abstract": "The emergence of foundation models, including language and vision models, has reshaped AI's landscape, offering capabilities across various applications. Deploying and fine-tuning these large models, like GPT-3 and BERT, presents challenges, especially in the current foundation model era. We introduce Emulator-Assisted Tuning (EAT) combined with Parameter-Efficient Fine-Tuning (PEFT) to form Parameter-Efficient Emulator-Assisted Tuning (PEAT). Further, we expand this into federated learning as Federated PEAT (FedPEAT). FedPEAT uses adapters, emulators, and PEFT for federated model tuning, enhancing model privacy and memory efficiency. Adapters adjust pre-trained models, while emulators give a compact representation of original models, addressing both privacy and efficiency. Adaptable to various neural networks, our approach also uses deep reinforcement learning for hyper-parameter optimization. We tested FedPEAT in a unique scenario with a server participating in collaborative federated tuning, showcasing its potential in tackling foundation model challenges.", "year": 2023, "venue": "arXiv.org", "authors": [ "Terence Jie Chua", "Wen-li Yu", "Junfeng Zhao", "Kwok-Yan Lam" ], "externalIds": { "DBLP": "journals/corr/abs-2310-17491", "ArXiv": "2310.17491", "DOI": "10.48550/arXiv.2310.17491", "CorpusId": 264490925 }, "url": "https://www.semanticscholar.org/paper/8a75ffe04999efeff039085c8b160b1b4ec6a897", "referenceCount": 47, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Federated Learning of Large Language Models with Parameter-Efficient Prompt Tuning and Adaptive Optimization", "abstract": "Federated learning (FL) is a promising paradigm to enable collaborative model training with decentralized data. However, the training process of Large Language Models (LLMs) generally incurs the update of significant parameters, which limits the applicability of FL techniques to tackle the LLMs in real scenarios. Prompt tuning can significantly reduce the number of parameters to update, but it either incurs performance degradation or low training efficiency. The straightforward utilization of prompt tuning in the FL often raises non-trivial communication costs and dramatically degrades performance. In addition, the decentralized data is generally non-Independent and Identically Distributed (non-IID), which brings client drift problems and thus poor performance. This paper proposes a Parameter-efficient prompt Tuning approach with Adaptive Optimization, i.e., FedPepTAO, to enable efficient and effective FL of LLMs. First, an efficient partial prompt tuning approach is proposed to improve performance and efficiency simultaneously. Second, a novel adaptive optimization method is developed to address the client drift problems on both the device and server sides to enhance performance further. Extensive experiments based on 10 datasets demonstrate the superb performance (up to 60.8\\% in terms of accuracy) and efficiency (up to 97.59\\% in terms of training time) of FedPepTAO compared with 9 baseline approaches. Our code is available at https://github.com/llm-eff/FedPepTAO.", "year": 2023, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Tianshi Che", "Ji Liu", "Yang Zhou", "Jiaxiang Ren", "Jiwen Zhou", "Victor S. Sheng", "H. Dai", "D. Dou" ], "externalIds": { "DBLP": "journals/corr/abs-2310-15080", "ArXiv": "2310.15080", "DOI": "10.48550/arXiv.2310.15080", "CorpusId": 264436414 }, "url": "https://www.semanticscholar.org/paper/67ffe6037cf058b8c5b39f59693c4c349cc1e456", "referenceCount": 96, "citationCount": 26, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Text-driven Prompt Generation for Vision-Language Models in Federated Learning", "abstract": "Prompt learning for vision-language models, e.g., CoOp, has shown great success in adapting CLIP to different downstream tasks, making it a promising solution for federated learning due to computational reasons. Existing prompt learning techniques replace hand-crafted text prompts with learned vectors that offer improvements on seen classes, but struggle to generalize to unseen classes. Our work addresses this challenge by proposing Federated Text-driven Prompt Generation (FedTPG), which learns a unified prompt generation network across multiple remote clients in a scalable manner. The prompt generation network is conditioned on task-related text input, thus is context-aware, making it suitable to generalize for both seen and unseen classes. Our comprehensive empirical evaluations on nine diverse image classification datasets show that our method is superior to existing federated prompt learning methods, that achieve overall better generalization on both seen and unseen classes and is also generalizable to unseen datasets.", "year": 2023, "venue": "arXiv.org", "authors": [ "Chen Qiu", "Xingyu Li", "Chaithanya Kumar Mummadi", "M. Ganesh", "Zhenzhen Li", "Lu Peng", "Wan-Yi Lin" ], "externalIds": { "ArXiv": "2310.06123", "DBLP": "journals/corr/abs-2310-06123", "DOI": "10.48550/arXiv.2310.06123", "CorpusId": 263828673 }, "url": "https://www.semanticscholar.org/paper/3a6d9e0d5896491dbdb192ea1a9032e9940abe54", "referenceCount": 37, "citationCount": 7, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Profit: Benchmarking Personalization and Robustness Trade-off in Federated Prompt Tuning", "abstract": "In many applications of federated learning (FL), clients desire models that are personalized using their local data, yet are also robust in the sense that they retain general global knowledge. However, the presence of data heterogeneity across clients induces a fundamental trade-off between personalization (i.e., adaptation to a local distribution) and robustness (i.e., not forgetting previously learned general knowledge). It is critical to understand how to navigate this personalization vs robustness trade-off when designing federated systems, which are increasingly moving towards a paradigm of fine-tuning large foundation models. Due to limited computational and communication capabilities in most federated settings, this foundation model fine-tuning must be done using parameter-efficient fine-tuning (PEFT) approaches. While some recent work has studied federated approaches to PEFT, the personalization vs robustness trade-off of federated PEFT has been largely unexplored. In this work, we take a step towards bridging this gap by benchmarking fundamental FL algorithms -- FedAvg and FedSGD plus personalization (via client local fine-tuning) -- applied to one of the most ubiquitous PEFT approaches to large language models (LLMs) -- prompt tuning -- in a multitude of hyperparameter settings under varying levels of data heterogeneity. Our results show that federated-trained prompts can be surprisingly robust when using a small learning rate with many local epochs for personalization, especially when using an adaptive optimizer as the client optimizer during federated training. We also demonstrate that simple approaches such as adding regularization and interpolating two prompts are effective in improving the personalization vs robustness trade-off in computation-limited settings with few local updates allowed for personalization.", "year": 2023, "venue": "arXiv.org", "authors": [ "Liam Collins", "Shanshan Wu", "Sewoong Oh", "K. Sim" ], "externalIds": { "DBLP": "journals/corr/abs-2310-04627", "ArXiv": "2310.04627", "DOI": "10.48550/arXiv.2310.04627", "CorpusId": 263830654 }, "url": "https://www.semanticscholar.org/paper/61f46dbe000930877c5da4d8628c63ce1ce2df82", "referenceCount": 72, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Inclusive Data Representation in Federated Learning: A Novel Approach Integrating Textual and Visual Prompt", "abstract": "Federated Learning (FL) is often impeded by communication overhead issues. Prompt tuning, as a potential solution, has been introduced to only adjust a few trainable parameters rather than the whole model. However, current single-modality prompt tuning approaches fail to comprehensively portray local clients’ data. To overcome this limitation, we present Twin Prompt Federated learning (TPFL), a pioneering solution that integrates both visual and textual modalities, ensuring a more holistic representation of local clients’ data characteristics. Furthermore, in order to tackle the data heterogeneity issues, we introduce the Augmented TPFL (ATPFL) employing the contrastive learning to TPFL, which not only enhances the global knowledge acquisition of client models but also fosters the development of robust, compact models. The effectiveness of TPFL and ATPFL is substantiated by our extensive evaluations, consistently showing superior performance compared to all baselines.", "year": 2023, "venue": "UbiComp/ISWC Adjunct", "authors": [ "Zihao Zhao", "Zhenpeng Shi", "Yang Liu", "Wenbo Ding" ], "externalIds": { "DBLP": "journals/corr/abs-2310-04455", "ArXiv": "2310.04455", "DOI": "10.1145/3594739.3612914", "CorpusId": 263742966 }, "url": "https://www.semanticscholar.org/paper/d7fad0fc4de4f238b16a66ad37e03172821f661d", "referenceCount": 29, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Bridging the Gap Between Foundation Models and Heterogeneous Federated Learning", "abstract": "Federated learning (FL) offers privacy-preserving decentralized machine learning, optimizing models at edge clients without sharing private data. Simultaneously, foundation models (FMs) have gained traction in the artificial intelligence (AI) community due to their exceptional performance across various tasks. However, integrating FMs into FL presents challenges, primarily due to their substantial size and intensive resource requirements. This is especially true when considering the resource heterogeneity in edge FL systems. We present an adaptive framework for Resource-aware Federated Foundation Models (RaFFM) to address these challenges. RaFFM introduces specialized model compression algorithms tailored for FL scenarios, such as salient parameter prioritization and high-performance subnetwork extraction. These algorithms enable dynamic scaling of given transformer-based FMs to fit heterogeneous resource constraints at the network edge during both FL's optimization and deployment stages. Experimental results demonstrate that RaFFM shows significant superiority in resource utilization efficiency and uses fewer resources to deploy FMs to FL. Despite the lower resource consumption, target models optimized by RaFFM achieve performance on par with traditional FL methods applied to full-sized FMs. This is evident across tasks in both natural language processing and computer vision domains.", "year": 2023, "venue": "arXiv.org", "authors": [ "Sixing Yu", "J. P. Muñoz", "A. Jannesari" ], "externalIds": { "ArXiv": "2310.00247", "DBLP": "journals/corr/abs-2310-00247", "DOI": "10.48550/arXiv.2310.00247", "CorpusId": 263675781 }, "url": "https://www.semanticscholar.org/paper/053bdcbe3cf45c5f34a4e50b271a6016a960637a", "referenceCount": 44, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FusionAI: Decentralized Training and Deploying LLMs with Massive Consumer-Level GPUs", "abstract": "The rapid growth of memory and computation requirements of large language models (LLMs) has outpaced the development of hardware, hindering people who lack large-scale high-end GPUs from training or deploying LLMs. However, consumer-level GPUs, which constitute a larger market share, are typically overlooked in LLM due to their weaker computing performance, smaller storage capacity, and lower communication bandwidth. Additionally, users may have privacy concerns when interacting with remote LLMs. In this paper, we envision a decentralized system unlocking the potential vast untapped consumer-level GPUs in pre-training, inference and fine-tuning of LLMs with privacy protection. However, this system faces critical challenges, including limited CPU and GPU memory, low network bandwidth, the variability of peer and device heterogeneity. To address these challenges, our system design incorporates: 1) a broker with backup pool to implement dynamic join and quit of computing providers; 2) task scheduling with hardware performance to improve system efficiency; 3) abstracting ML procedures into directed acyclic graphs (DAGs) to achieve model and task universality; 4) abstracting intermediate represention and execution planes to ensure compatibility of various devices and deep learning (DL) frameworks. Our performance analysis demonstrates that 50 RTX 3080 GPUs can achieve throughputs comparable to those of 4 H100 GPUs, which are significantly more expensive.", "year": 2023, "venue": "arXiv.org", "authors": [ "Zhenheng Tang", "Yuxin Wang", "Xin He", "Longteng Zhang", "Xinglin Pan", "Qiang Wang", "Rongfei Zeng", "Kaiyong Zhao", "S. Shi", "Bingsheng He", "Xiaowen Chu" ], "externalIds": { "DBLP": "journals/corr/abs-2309-01172", "ArXiv": "2309.01172", "DOI": "10.48550/arXiv.2309.01172", "CorpusId": 261530813 }, "url": "https://www.semanticscholar.org/paper/660380b17d3a37d8132f2e6dcb5cb47092e5b7d1", "referenceCount": 86, "citationCount": 12, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FedDAT: An Approach for Foundation Model Finetuning in Multi-Modal Heterogeneous Federated Learning", "abstract": "Recently, foundation models have exhibited remarkable advancements in multi-modal learning. These models, equipped with millions (or billions) of parameters, typically require a substantial amount of data for finetuning. However, collecting and centralizing training data from diverse sectors becomes challenging due to distinct privacy regulations. Federated Learning (FL) emerges as a promising solution, enabling multiple clients to collaboratively train neural networks without centralizing their local data. To alleviate client computation burdens and communication overheads, previous works have adapted Parameter-efficient Finetuning (PEFT) methods for FL. Hereby, only a small fraction of the model parameters are optimized and communicated during federated communications. Nevertheless, most previous works have focused on a single modality and neglected one common phenomenon, i.e., the presence of data heterogeneity across the clients. Therefore, in this work, we propose a finetuning framework tailored to heterogeneous multi-modal FL, called Federated Dual-Aadapter Teacher (FedDAT). Specifically, our approach leverages a Dual-Adapter Teacher (DAT) to address data heterogeneity by regularizing the client local updates and applying Mutual Knowledge Distillation (MKD) for an efficient knowledge transfer. FedDAT is the first approach that enables an efficient distributed finetuning of foundation models for a variety of heterogeneous Vision-Language tasks. To demonstrate its effectiveness, we conduct extensive experiments on four multi-modality FL benchmarks with different types of data heterogeneity, where FedDAT substantially outperforms the existing centralized PEFT methods adapted for FL.", "year": 2023, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Haokun Chen", "Yao Zhang", "Denis Krompass", "Jindong Gu", "Volker Tresp" ], "externalIds": { "DBLP": "journals/corr/abs-2308-12305", "ArXiv": "2308.12305", "DOI": "10.48550/arXiv.2308.12305", "CorpusId": 261101138 }, "url": "https://www.semanticscholar.org/paper/3cb399d96cd70f5e9aa4ffecf7d329d1b0910745", "referenceCount": 57, "citationCount": 15, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "SLoRA: Federated Parameter Efficient Fine-Tuning of Language Models", "abstract": "Transfer learning via fine-tuning pre-trained transformer models has gained significant success in delivering state-of-the-art results across various NLP tasks. In the absence of centralized data, Federated Learning (FL) can benefit from distributed and private data of the FL edge clients for fine-tuning. However, due to the limited communication, computation, and storage capabilities of edge devices and the huge sizes of popular transformer models, efficient fine-tuning is crucial to make federated training feasible. This work explores the opportunities and challenges associated with applying parameter efficient fine-tuning (PEFT) methods in different FL settings for language tasks. Specifically, our investigation reveals that as the data across users becomes more diverse, the gap between fully fine-tuning the model and employing PEFT methods widens. To bridge this performance gap, we propose a method called SLoRA, which overcomes the key limitations of LoRA in high heterogeneous data scenarios through a novel data-driven initialization technique. Our experimental results demonstrate that SLoRA achieves performance comparable to full fine-tuning, with significant sparse updates with approximately $\\sim 1\\%$ density while reducing training time by up to $90\\%$.", "year": 2023, "venue": "arXiv.org", "authors": [ "Sara Babakniya", "A. Elkordy", "Yahya H. Ezzeldin", "Qingfeng Liu", "Kee-Bong Song", "Mostafa El-Khamy", "S. Avestimehr" ], "externalIds": { "ArXiv": "2308.06522", "DBLP": "journals/corr/abs-2308-06522", "DOI": "10.48550/arXiv.2308.06522", "CorpusId": 260887495 }, "url": "https://www.semanticscholar.org/paper/5ce0fa3f0882910a5dfa2697bb04c8785d914725", "referenceCount": 39, "citationCount": 31, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Simple synthetic data reduces sycophancy in large language models", "abstract": "Sycophancy is an undesirable behavior where models tailor their responses to follow a human user's view even when that view is not objectively correct (e.g., adapting liberal views once a user reveals that they are liberal). In this paper, we study the prevalence of sycophancy in language models and propose a simple synthetic-data intervention to reduce this behavior. First, on a set of three sycophancy tasks (Perez et al., 2022) where models are asked for an opinion on statements with no correct answers (e.g., politics), we observe that both model scaling and instruction tuning significantly increase sycophancy for PaLM models up to 540B parameters. Second, we extend sycophancy evaluations to simple addition statements that are objectively incorrect, finding that despite knowing that these statements are wrong, language models will still agree with them if the user does as well. To reduce sycophancy, we present a straightforward synthetic-data intervention that takes public NLP tasks and encourages models to be robust to user opinions on these tasks. Adding these data in a lightweight finetuning step can significantly reduce sycophantic behavior on held-out prompts. Code for generating synthetic data for intervention can be found at https://github.com/google/sycophancy-intervention.", "year": 2023, "venue": "arXiv.org", "authors": [ "Jerry W. Wei", "Da Huang", "Yifeng Lu", "Denny Zhou", "Quoc V. Le" ], "externalIds": { "DBLP": "journals/corr/abs-2308-03958", "ArXiv": "2308.03958", "DOI": "10.48550/arXiv.2308.03958", "CorpusId": 260704246 }, "url": "https://www.semanticscholar.org/paper/a37d5620210276e47cf0c9dd2898c2a82c9d0422", "referenceCount": 62, "citationCount": 44, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Low-Parameter Federated Learning with Large Language Models", "abstract": "We study few-shot Natural Language Understanding (NLU) tasks with Large Language Models (LLMs) in federated learning (FL) scenarios. It is a challenging task due to limited labeled data and communication capacities in FL, especially with mobile devices. Recent studies show LLMs can be prompted to perform few-shot NLU tasks like sentiment analysis and arithmetic reasoning. However, the huge sizes of LLMs result in high computation and communication costs, making classical FL schemes impractical. To address these challenges, we propose Low-Parameter Federated Learning (LP-FL). LP-FL combines few-shot prompt learning from LLMs with efficient communication and federating techniques. Our approach enables federated clients to assign soft labels to unlabeled data using gradually learned knowledge from the global model. Through iterative soft-label assigning, we continually expand the labeled set during the FL process. Additionally, to reduce computation and communication costs, LP-FL utilizes the Low-Rank Adaptation (LoRA) technique for compact learnable parameter construction, efficient local model fine-tuning, and affordable global model federation. LP-FL consistently outperforms Full-Parameter Federated Learning (FP-FL) in sentiment analysis tasks across various FL settings. Its resistance to overfitting allows LP-FL to equal or surpass centralized training in few-shot scenarios.", "year": 2023, "venue": "arXiv.org", "authors": [ "Jing Jiang", "Xiangyang Liu", "Chenyou Fan" ], "externalIds": { "DBLP": "journals/corr/abs-2307-13896", "ArXiv": "2307.13896", "DOI": "10.48550/arXiv.2307.13896", "CorpusId": 260164619 }, "url": "https://www.semanticscholar.org/paper/573dad7b2fca7ce72a7f0daf681391d96379ebe3", "referenceCount": 30, "citationCount": 14, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Federated Large Language Model: A Position Paper", "abstract": "Large scale language models (LLM) have received significant attention and found diverse applications across various domains, but their development encounters challenges in real-world scenarios. These challenges arise due to the scarcity of public domain data availability and the need to maintain privacy with respect to private domain data. To address these issues, federated learning (FL) has emerged as a promising technology that enables collaborative training of shared models while preserving decentralized data. We propose the concept of federated LLM, which comprises three key components, i.e., federated LLM pre-training, federated LLM fine-tuning, and federated LLM prompt engineering. For each component, we discuss its advantage over traditional LLM training methods and propose specific engineering strategies for implementation. Furthermore, we explore the novel challenges introduced by the integration of FL and LLM. We analyze existing solutions and identify potential obstacles faced by these solutions within the context of federated LLM.", "year": 2023, "venue": "arXiv.org", "authors": [ "Chaochao Chen", "Xiaohua Feng", "Jun Zhou", "Jianwei Yin", "Xiaolin Zheng" ], "externalIds": { "DBLP": "journals/corr/abs-2307-08925", "ArXiv": "2307.08925", "DOI": "10.48550/arXiv.2307.08925", "CorpusId": 259950775 }, "url": "https://www.semanticscholar.org/paper/7aad760762c4a10dfbc2d3391eb8bdb28c80b236", "referenceCount": 51, "citationCount": 35, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Towards Federated Foundation Models: Scalable Dataset Pipelines for Group-Structured Learning", "abstract": "We introduce Dataset Grouper, a library to create large-scale group-structured (e.g., federated) datasets, enabling federated learning simulation at the scale of foundation models. This library facilitates the creation of group-structured versions of existing datasets based on user-specified partitions and directly leads to a variety of useful heterogeneous datasets that can be plugged into existing software frameworks. Dataset Grouper offers three key advantages. First, it scales to settings where even a single group's dataset is too large to fit in memory. Second, it provides flexibility, both in choosing the base (non-partitioned) dataset and in defining partitions. Finally, it is framework-agnostic. We empirically demonstrate that Dataset Grouper enables large-scale federated language modeling simulations on datasets that are orders of magnitude larger than in previous work, allowing for federated training of language models with hundreds of millions, and even billions, of parameters. Our experimental results show that algorithms like FedAvg operate more as meta-learning methods than as empirical risk minimization methods at this scale, suggesting their utility in downstream personalization and task-specific adaptation. Dataset Grouper is available at https://github.com/google-research/dataset_grouper.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Zachary B. Charles", "Nicole Mitchell", "Krishna Pillutla", "Michael Reneer", "Zachary Garrett" ], "externalIds": { "DBLP": "journals/corr/abs-2307-09619", "ArXiv": "2307.09619", "DOI": "10.48550/arXiv.2307.09619", "CorpusId": 259982518 }, "url": "https://www.semanticscholar.org/paper/f5e670c22d1125de557aaa79f721fcfb557fcb36", "referenceCount": 92, "citationCount": 14, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FedYolo: Augmenting Federated Learning with Pretrained Transformers", "abstract": "The growth and diversity of machine learning applications motivate a rethinking of learning with mobile and edge devices. How can we address diverse client goals and learn with scarce heterogeneous data? While federated learning aims to address these issues, it has challenges hindering a unified solution. Large transformer models have been shown to work across a variety of tasks achieving remarkable few-shot adaptation. This raises the question: Can clients use a single general-purpose model, rather than custom models for each task, while obeying device and network constraints? In this work, we investigate pretrained transformers (PTF) to achieve these on-device learning goals and thoroughly explore the roles of model size and modularity, where the latter refers to adaptation through modules such as prompts or adapters. Focusing on federated learning, we demonstrate that: (1) Larger scale shrinks the accuracy gaps between alternative approaches and improves heterogeneity robustness. Scale allows clients to run more local SGD epochs which can significantly reduce the number of communication rounds. At the extreme, clients can achieve respectable accuracy locally highlighting the potential of fully-local learning. (2) Modularity, by design, enables $>$100$\\times$ less communication in bits. Surprisingly, it also boosts the generalization capability of local adaptation methods and the robustness of smaller PTFs. Finally, it enables clients to solve multiple unrelated tasks simultaneously using a single PTF, whereas full updates are prone to catastrophic forgetting. These insights on scale and modularity motivate a new federated learning approach we call\"You Only Load Once\"(FedYolo): The clients load a full PTF model once and all future updates are accomplished through communication-efficient modules with limited catastrophic-forgetting, where each task is assigned to its own module.", "year": 2023, "venue": "arXiv.org", "authors": [ "Xuechen Zhang", "Mingchen Li", "Xiangyu Chang", "Jiasi Chen", "A. Roy-Chowdhury", "A. Suresh", "Samet Oymak" ], "externalIds": { "DBLP": "journals/corr/abs-2307-04905", "ArXiv": "2307.04905", "DOI": "10.48550/arXiv.2307.04905", "CorpusId": 259766512 }, "url": "https://www.semanticscholar.org/paper/951284f2c6749024fe5ca06aade16888f7c8ecce", "referenceCount": 48, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "When Foundation Model Meets Federated Learning: Motivations, Challenges, and Future Directions", "abstract": "The intersection of the Foundation Model (FM) and Federated Learning (FL) provides mutual benefits, presents a unique opportunity to unlock new possibilities in AI research, and address critical challenges in AI and real-world applications. FL expands the availability of data for FMs and enables computation sharing, distributing the training process and reducing the burden on FL participants. It promotes collaborative FM development, democratizing the process and fostering inclusivity and innovation. On the other hand, FM, with its enormous size, pre-trained knowledge, and exceptional performance, serves as a robust starting point for FL, facilitating faster convergence and better performance under non-iid data. Additionally, leveraging FM to generate synthetic data enriches data diversity, reduces overfitting, and preserves privacy. By examining the interplay between FL and FM, this paper aims to deepen the understanding of their synergistic relationship, highlighting the motivations, challenges, and future directions. Through an exploration of the challenges faced by FL and FM individually and their interconnections, we aim to inspire future research directions that can further enhance both fields, driving advancements and propelling the development of privacy-preserving and scalable AI systems.", "year": 2023, "venue": "arXiv.org", "authors": [ "Weiming Zhuang", "Chen Chen", "Lingjuan Lyu" ], "externalIds": { "DBLP": "journals/corr/abs-2306-15546", "ArXiv": "2306.15546", "DOI": "10.48550/arXiv.2306.15546", "CorpusId": 259262601 }, "url": "https://www.semanticscholar.org/paper/242dbbef9e7f624525c57645f193e0b13a90ad44", "referenceCount": 145, "citationCount": 54, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Can Public Large Language Models Help Private Cross-device Federated Learning?", "abstract": "We study (differentially) private federated learning (FL) of language models. The language models in cross-device FL are relatively small, which can be trained with meaningful formal user-level differential privacy (DP) guarantees when massive parallelism in training is enabled by the participation of a moderate size of users. Recently, public data has been used to improve privacy-utility trade-offs for both large and small language models. In this work, we provide a systematic study of using large-scale public data and LLMs to help differentially private training of on-device FL models, and further improve the privacy-utility tradeoff by techniques of distillation. Moreover, we propose a novel distribution matching algorithm with theoretical grounding to sample public data close to private data distribution, which significantly improves the sample efficiency of (pre-)training on public data. The proposed method is efficient and effective for training private models by taking advantage of public data, especially for customized on-device architectures that do not have ready-to-use pre-trained models.", "year": 2023, "venue": "NAACL-HLT", "authors": [ "Boxin Wang", "Yibo Zhang", "Yuan Cao", "Bo Li", "H. B. McMahan", "Sewoong Oh", "Zheng Xu", "M. Zaheer" ], "externalIds": { "DBLP": "conf/naacl/WangZCLMOXZ24", "ArXiv": "2305.12132", "DOI": "10.48550/arXiv.2305.12132", "CorpusId": 258833462 }, "url": "https://www.semanticscholar.org/paper/865662f736e0b9cd5ddbabc23294f68ff3484138", "referenceCount": 57, "citationCount": 27, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Federated Foundation Models: Privacy-Preserving and Collaborative Learning for Large Models", "abstract": "Foundation Models (FMs), such as LLaMA, BERT, GPT, ViT, and CLIP, have demonstrated remarkable success in a wide range of applications, driven by their ability to leverage vast amounts of data for pre-training. However, optimizing FMs often requires access to sensitive data, raising privacy concerns and limiting their applicability in many domains. In this paper, we propose the Federated Foundation Models (FFMs) paradigm, which combines the benefits of FMs and Federated Learning (FL) to enable privacy-preserving and collaborative learning across multiple end-users. We discuss the potential benefits and challenges of integrating FL into the lifespan of FMs, covering pre-training, fine-tuning, and application. We further outline potential future research avenues in FFM, including FFM pre-training, FFM fine-tuning, and federated prompt tuning, which allow the development of more personalized and context-aware models while ensuring data privacy. Moreover, we explore the possibility of continual/lifelong learning in FFMs, as increased computational power at the edge may unlock the potential for optimizing FMs using newly generated private data close to the data source. The proposed FFM concepts offer a flexible and scalable framework for training large language models in a privacy-preserving manner, setting the stage for subsequent advancements in both FM training and federated learning.", "year": 2023, "venue": "International Conference on Language Resources and Evaluation", "authors": [ "Sixing Yu", "J. P. Muñoz", "A. Jannesari" ], "externalIds": { "DBLP": "journals/corr/abs-2305-11414", "ArXiv": "2305.11414", "ACL": "2024.lrec-main.630", "DOI": "10.48550/arXiv.2305.11414", "CorpusId": 258823148 }, "url": "https://www.semanticscholar.org/paper/aa6ba4ade170abfb6c6c99d3ab5f1957b6ccec83", "referenceCount": 56, "citationCount": 31, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Towards Building The Federatedgpt: Federated Instruction Tuning", "abstract": "While \"instruction-tuned\" generative large language models (LLMs) have demonstrated an impressive ability to generalize to new tasks, the training phases heavily rely on large amounts of diverse and high-quality instruction data (such as ChatGPT and GPT-4). Unfortunately, acquiring high-quality data, especially when it comes to human-written data, can pose significant challenges both in terms of cost and accessibility. Moreover, concerns related to privacy can further limit access to such data, making the process of obtaining it a complex and nuanced undertaking. To tackle this issue, our study introduces a new approach called Federated Instruction Tuning (FedIT), which leverages federated learning (FL) as the learning framework for the instruction tuning of LLMs. This marks the first exploration of FL-based instruction tuning for LLMs. This is especially important since text data is predominantly generated by end users. For example, collecting extensive amounts of everyday user conversations can be a useful approach to improving the generalizability of LLMs, allowing them to generate authentic and natural responses. Therefore, it is imperative to design and adapt FL approaches to effectively leverage these users’ diverse instructions stored on local devices while mitigating concerns related to the data sensitivity and the cost of data transmission. In this study, we leverage extensive qualitative analysis, including the prevalent GPT-4 auto-evaluation to illustrate how our FedIT framework enhances the performance of LLMs. Utilizing diverse instruction sets on the client side, FedIT outperforms centralized training with only limited local instructions.", "year": 2023, "venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", "authors": [ "Jianyi Zhang", "Saeed Vahidian", "Martin Kuo", "Chunyuan Li", "Ruiyi Zhang", "Guoyin Wang", "Yiran Chen" ], "externalIds": { "ArXiv": "2305.05644", "DBLP": "conf/icassp/ZhangVKLZ00024", "DOI": "10.1109/ICASSP48485.2024.10447454", "CorpusId": 258564501 }, "url": "https://www.semanticscholar.org/paper/5db3257a61d86f302767ae1f21d6fd30567f12e5", "referenceCount": 103, "citationCount": 63, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "pFedPrompt: Learning Personalized Prompt for Vision-Language Models in Federated Learning", "abstract": "Pre-trained vision-language models like CLIP show great potential in learning representations that capture latent characteristics of users. A recently proposed method called Contextual Optimization (CoOp) introduces the concept of training prompt for adapting pre-trained vision-language models. Given the lightweight nature of this method, researchers have migrated the paradigm from centralized to decentralized system to innovate the collaborative training framework of Federated Learning (FL). However, current prompt training in FL mainly focuses on modeling user consensus and lacks the adaptation to user characteristics, leaving the personalization of prompt largely under-explored. Researches over the past few years have applied personalized FL (pFL) approaches to customizing models for heterogeneous users. Unfortunately, we find that with the variation of modality and training behavior, directly applying the pFL methods to prompt training leads to insufficient personalization and performance. To bridge the gap, we present pFedPrompt, which leverages the unique advantage of multimodality in vision-language models by learning user consensus from linguistic space and adapting to user characteristics in visual space in a non-parametric manner. Through this dual collaboration, the learned prompt will be fully personalized and aligned to the user’s local characteristics. We conduct extensive experiments across various datasets under the FL setting with statistical heterogeneity. The results demonstrate the superiority of our pFedPrompt against the alternative approaches with robust performance.", "year": 2023, "venue": "The Web Conference", "authors": [ "Tao Guo", "Song Guo", "Junxiao Wang" ], "externalIds": { "DBLP": "conf/www/Guo0W23", "DOI": "10.1145/3543507.3583518", "CorpusId": 258333829 }, "url": "https://www.semanticscholar.org/paper/c6be8510ea66521cf9d48befce4b012ac0cb0aea", "referenceCount": 60, "citationCount": 35, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Federated Prompting and Chain-of-Thought Reasoning for Improving LLMs Answering", "abstract": "We investigate how to enhance answer precision in frequently asked questions posed by distributed users using cloud-based Large Language Models (LLMs). Our study focuses on a typical situations where users ask similar queries that involve identical mathematical reasoning steps and problem-solving procedures. Due to the unsatisfactory accuracy of LLMs' zero-shot prompting with standalone questions, we propose to improve the distributed synonymous questions using Self-Consistency (SC) and Chain-of-Thought (CoT) techniques. Specifically, we first retrieve synonymous questions from a crowd-sourced database and create a federated question pool. We call these federated synonymous questions with the same or different parameters SP-questions or DP-questions, respectively. We refer to our methods as Fed-SP-SC and Fed-DP-CoT, which can generate significantly more accurate answers for all user queries without requiring sophisticated model-tuning. Through extensive experiments, we demonstrate that our proposed methods can significantly enhance question accuracy by fully exploring the synonymous nature of the questions and the consistency of the answers.", "year": 2023, "venue": "Knowledge Science, Engineering and Management", "authors": [ "Xiangyang Liu", "Tianqi Pang", "Chenyou Fan" ], "externalIds": { "ArXiv": "2304.13911", "DBLP": "conf/ksem/LiuPF23", "DOI": "10.48550/arXiv.2304.13911", "CorpusId": 258352268 }, "url": "https://www.semanticscholar.org/paper/a7c0d9bf44045c9d4c41e329e2a87df0ae7e0af6", "referenceCount": 32, "citationCount": 15, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FedML-HE: An Efficient Homomorphic-Encryption-Based Privacy-Preserving Federated Learning System", "abstract": "Federated Learning trains machine learning models on distributed devices by aggregating local model updates instead of local data. However, privacy concerns arise as the aggregated local models on the server may reveal sensitive personal information by inversion attacks. Privacy-preserving methods, such as homomorphic encryption (HE), then become necessary for FL training. Despite HE's privacy advantages, its applications suffer from impractical overheads, especially for foundation models. In this paper, we present FedML-HE, the first practical federated learning system with efficient HE-based secure model aggregation. FedML-HE proposes to selectively encrypt sensitive parameters, significantly reducing both computation and communication overheads during training while providing customizable privacy preservation. Our optimized system demonstrates considerable overhead reduction, particularly for large foundation models (e.g., ~10x reduction for ResNet-50, and up to ~40x reduction for BERT), demonstrating the potential for scalable HE-based FL deployment.", "year": 2023, "venue": "arXiv.org", "authors": [ "Weizhao Jin", "Yuhang Yao", "Shanshan Han", "Carlee Joe-Wong", "Srivatsan Ravi", "A. Avestimehr", "Chaoyang He" ], "externalIds": { "DBLP": "journals/corr/abs-2303-10837", "ArXiv": "2303.10837", "DOI": "10.48550/arXiv.2303.10837", "CorpusId": 257631492 }, "url": "https://www.semanticscholar.org/paper/e0cad7a42d7039ead9b7f88439208444c3fa1454", "referenceCount": 70, "citationCount": 21, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Knowledge Distillation from Multiple Foundation Models for End-to-End Speech Recognition", "abstract": "Although large foundation models pre-trained by self-supervised learning have achieved state-of-the-art performance in many tasks including automatic speech recognition (ASR), knowledge distillation (KD) is often required in practice to transfer the knowledge learned by large teacher models into much smaller student models with affordable computation and memory costs. This paper proposes a novel two-stage KD framework to distil the knowledge from multiple speech foundation models as teachers into a single student neural transducer model for ASR. In the first stage, the student model encoder is pre-trained using the embeddings extracted from multiple teacher models. In the second stage, the student encoder is fine-tuned with the audio-text pairs based on the ASR task. Experiments on the LibriSpeech 100-hour subset show that the proposed KD framework improves the performance of both streaming and non-streaming student models when using only one teacher. The performance of the student model can be further enhanced when multiple teachers are used jointly, achieving word error rate reductions (WERRs) of 17.5% and 10.6%. Our proposed framework can be combined with other existing KD methods to achieve further improvements. Further WERRs were obtained by incorporating extra unlabelled data during encoder pre-training, leading to a total relative WERR of 55.0% on the non-streaming student model.", "year": 2023, "venue": "arXiv.org", "authors": [ "Xiaoyu Yang", "Qiujia Li", "C. Zhang", "P. Woodland" ], "externalIds": { "ArXiv": "2303.10917", "DBLP": "journals/corr/abs-2303-10917", "DOI": "10.48550/arXiv.2303.10917", "CorpusId": 257632314 }, "url": "https://www.semanticscholar.org/paper/df69ba7294ed7080bccd2f474c7b733d704db65f", "referenceCount": 55, "citationCount": 3, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Engineering", "Computer Science" ] }, { "title": "FedCLIP: Fast Generalization and Personalization for CLIP in Federated Learning", "abstract": "Federated learning (FL) has emerged as a new paradigm for privacy-preserving computation in recent years. Unfortunately, FL faces two critical challenges that hinder its actual performance: data distribution heterogeneity and high resource costs brought by large foundation models. Specifically, the non-IID data in different clients make existing FL algorithms hard to converge while the high resource costs, including computational and communication costs that increase the deployment difficulty in real-world scenarios. In this paper, we propose an effective yet simple method, named FedCLIP, to achieve fast generalization and personalization for CLIP in federated learning. Concretely, we design an attention-based adapter for the large model, CLIP, and the rest operations merely depend on adapters. Lightweight adapters can make the most use of pretrained model information and ensure models be adaptive for clients in specific tasks. Simultaneously, small-scale operations can mitigate the computational burden and communication burden caused by large models. Extensive experiments are conducted on three datasets with distribution shifts. Qualitative and quantitative results demonstrate that FedCLIP significantly outperforms other baselines (9% overall improvements on PACS) and effectively reduces computational and communication costs (283x faster than FedAVG). Our code will be available at: https://github.com/microsoft/PersonalizedFL.", "year": 2023, "venue": "IEEE Data Engineering Bulletin", "authors": [ "Wang Lu", "Xixu Hu", "Jindong Wang", "Xingxu Xie" ], "externalIds": { "DBLP": "journals/debu/LuH0023", "ArXiv": "2302.13485", "DOI": "10.48550/arXiv.2302.13485", "CorpusId": 257220009 }, "url": "https://www.semanticscholar.org/paper/922729f3c8bebc4dea35c1b0dcc2769345759c8c", "referenceCount": 63, "citationCount": 36, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Prompt Federated Learning for Weather Forecasting: Toward Foundation Models on Meteorological Data", "abstract": "To tackle the global climate challenge, it urgently needs to develop a collaborative platform for comprehensive weather forecasting on large-scale meteorological data. Despite urgency, heterogeneous meteorological sensors across countries and regions, inevitably causing multivariate heterogeneity and data exposure, become the main barrier. This paper develops a foundation model across regions capable of understanding complex meteorological data and providing weather forecasting. To relieve the data exposure concern across regions, a novel federated learning approach has been proposed to collaboratively learn a brand-new spatio-temporal Transformer-based foundation model across participants with heterogeneous meteorological data. Moreover, a novel prompt learning mechanism has been adopted to satisfy low-resourced sensors' communication and computational constraints. The effectiveness of the proposed method has been demonstrated on classical weather forecasting tasks using three meteorological datasets with multivariate time series.", "year": 2023, "venue": "International Joint Conference on Artificial Intelligence", "authors": [ "Shen Chen", "Guodong Long", "Tao Shen", "Jing Jiang" ], "externalIds": { "ArXiv": "2301.09152", "DBLP": "journals/corr/abs-2301-09152", "DOI": "10.48550/arXiv.2301.09152", "CorpusId": 256105644 }, "url": "https://www.semanticscholar.org/paper/a4d189fb6375245c0d72cc8ad8507708473430df", "referenceCount": 67, "citationCount": 22, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Self-Instruct: Aligning Language Models with Self-Generated Instructions", "abstract": "Large “instruction-tuned” language models (i.e., finetuned to respond to instructions) have demonstrated a remarkable ability to generalize zero-shot to new tasks. Nevertheless, they depend heavily on human-written instruction data that is often limited in quantity, diversity, and creativity, therefore hindering the generality of the tuned model. We introduce Self-Instruct, a framework for improving the instruction-following capabilities of pretrained language models by bootstrapping off their own generations. Our pipeline generates instructions, input, and output samples from a language model, then filters invalid or similar ones before using them to finetune the original model. Applying our method to the vanilla GPT3, we demonstrate a 33% absolute improvement over the original model on Super-NaturalInstructions, on par with the performance of InstructGPT-001, which was trained with private user data and human annotations. For further evaluation, we curate a set of expert-written instructions for novel tasks, and show through human evaluation that tuning GPT3 with Self-Instruct outperforms using existing public instruction datasets by a large margin, leaving only a 5% absolute gap behind InstructGPT-001. Self-Instruct provides an almost annotation-free method for aligning pre-trained language models with instructions, and we release our large synthetic dataset to facilitate future studies on instruction tuning.", "year": 2022, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Yizhong Wang", "Yeganeh Kordi", "Swaroop Mishra", "Alisa Liu", "Noah A. Smith", "Daniel Khashabi", "Hannaneh Hajishirzi" ], "externalIds": { "ACL": "2023.acl-long.754", "DBLP": "journals/corr/abs-2212-10560", "ArXiv": "2212.10560", "DOI": "10.48550/arXiv.2212.10560", "CorpusId": 254877310 }, "url": "https://www.semanticscholar.org/paper/e65b346d442e9962a4276dc1c1af2956d9d5f1eb", "referenceCount": 66, "citationCount": 1477, "influentialCitationCount": 151, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FedPETuning: When Federated Learning Meets the Parameter-Efficient Tuning Methods of Pre-trained Language Models", "abstract": "With increasing privacy concerns on data, recent studies have made significant progress using federated learning (FL) on privacy-sensitive natural language processing (NLP) tasks. Much literature suggests fully fine-tuning pre-trained language models (PLMs) in the FL paradigm can mitigate the data heterogeneity problem and close the performance gap with centralized training. However, large PLMs bring the curse of prohibitive communication overhead and local model adaptation costs for the FL system. To this end, we introduce various parameter-efficient tuning (PETuning) methods into federated learning. Specifically, we provide a holistic empirical study of representative PLMs tuning methods in FL. The experimental results cover the analysis of data heterogeneity levels, data scales, and different FL scenarios. Overall communication overhead can be significantly reduced by locally tuning and globally aggregating lightweight model parameters while maintaining acceptable performance in various FL settings. To facilitate the research of PETuning in FL, we also develop a federated tuning framework FedPETuning, which allows practitioners to exploit different PETuning methods under the FL training paradigm conveniently. The source code is available at \\url{https://github.com/iezhuozhuo/FedETuning/tree/deltaTuning}.", "year": 2022, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Zhuo Zhang", "Yuanhang Yang", "Yong Dai", "Lizhen Qu", "Zenglin Xu" ], "externalIds": { "DBLP": "conf/acl/ZhangYDWYQX23", "ArXiv": "2212.10025", "DOI": "10.18653/v1/2023.findings-acl.632", "CorpusId": 254877120 }, "url": "https://www.semanticscholar.org/paper/763125fd2befe605b009cdd8d7ee8c8c694bc9e5", "referenceCount": 45, "citationCount": 33, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Federated Few-Shot Learning for Mobile NLP", "abstract": "Natural language processing (NLP) sees rich mobile applications. To support various language understanding tasks, a foundation NLP model is often fine-tuned in a federated, privacy-preserving setting (FL). This process currently relies on at least hundreds of thousands of labeled training samples from mobile clients; yet mobile users often lack willingness or knowledge to label their data. Such an inadequacy of data labels is known as a few-shot scenario; it becomes the key blocker for mobile NLP applications. For the first time, this work investigates federated NLP in the few-shot scenario (FedFSL). By retrofitting algorithmic advances of pseudo labeling and prompt learning, we first establish a training pipeline that delivers competitive accuracy when only 0.05% (fewer than 100) of the training data is labeled and the remaining is unlabeled. To instantiate the workflow, we further present a system FeS1, addressing the high execution cost with novel designs: (1) Curriculum pacing, which injects pseudo labels to the training workflow at a rate commensurate to the learning progress; (2) Representational diversity, a mechanism for selecting the most learnable data, only for which pseudo labels will be generated; (3) Co-planning of a model's training depth and layer capacity. Together, these designs reduce the training delay, client energy, and network traffic by up to 46.0×, 41.2× and 3000.0×, respectively. Through algorithm/system co-design, FeS demonstrates that FL can apply to challenging settings where most training samples are unlabeled.", "year": 2022, "venue": "ACM/IEEE International Conference on Mobile Computing and Networking", "authors": [ "Dongqi Cai", "Shangguang Wang", "Yaozong Wu", "F. Lin", "Mengwei Xu" ], "externalIds": { "DBLP": "conf/mobicom/CaiWWLX23a", "ArXiv": "2212.05974", "DOI": "10.1145/3570361.3613277", "CorpusId": 254564325 }, "url": "https://www.semanticscholar.org/paper/341546ab4ff1945b004d18300749419c3896c6c9", "referenceCount": 111, "citationCount": 10, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Federated Learning for Inference at Anytime and Anywhere", "abstract": "100, FEMNIST and SpeechCommandsv2 demonstrate that this simple framework provides fast and accurate FL while supporting heterogenous device capabilities, efficient personalization, and scalable-cost anytime inference", "year": 2022, "venue": "arXiv.org", "authors": [ "Zicheng Liu", "Da Li", "Javier Fernández-Marqués", "Stefanos Laskaridis", "Yan Gao", "L. Dudziak", "Stan Z. Li", "S. Hu", "Timothy M. Hospedales" ], "externalIds": { "DBLP": "journals/corr/abs-2212-04084", "ArXiv": "2212.04084", "DOI": "10.48550/arXiv.2212.04084", "CorpusId": 254408530 }, "url": "https://www.semanticscholar.org/paper/a2ff9a03d542544fcc1bec30e40c0d2ae46b0395", "referenceCount": 38, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Federated Adaptive Prompt Tuning for Multi-Domain Collaborative Learning", "abstract": "Federated learning (FL) enables multiple clients to collaboratively train a global model without disclosing their data. Previous researches often require training the complete model parameters. However, the emergence of powerful pre-trained models makes it possible to achieve higher performance with fewer learnable parameters in FL. In this paper, we propose a federated adaptive prompt tuning algorithm, FedAPT, for multi-domain collaborative image classification with powerful foundation models, like CLIP. Compared with direct federated prompt tuning, our core idea is to adaptively unlock specific domain knowledge for each test sample in order to provide them with personalized prompts. To implement this idea, we design an adaptive prompt tuning module, which consists of a meta prompt, an adaptive network, and some keys. The server randomly generates a set of keys and assigns a unique key to each client. Then all clients cooperatively train the global adaptive network and meta prompt with the local datasets and the frozen keys. Ultimately, the global aggregation model can assign a personalized prompt to CLIP based on the domain features of each test sample. We perform extensive experiments on two multi-domain image classification datasets across two different settings -- supervised and unsupervised. The results show that FedAPT can achieve better performance with less than 10% of the number of parameters of the fully trained model, and the global model can perform well in diverse client domains simultaneously.", "year": 2022, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Shangchao Su", "Min Yang", "Bin Li", "Xiangyang Xue" ], "externalIds": { "DBLP": "conf/aaai/SuY0X24", "ArXiv": "2211.07864", "DOI": "10.1609/aaai.v38i13.29434", "CorpusId": 253523463 }, "url": "https://www.semanticscholar.org/paper/b1e50cac6019353db4991dabb8ad56939fc21dd9", "referenceCount": 50, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FedTune: A Deep Dive into Efficient Federated Fine-Tuning with Pre-trained Transformers", "abstract": "Federated Learning (FL) is an emerging paradigm that enables distributed users to collaboratively and iteratively train machine learning models without sharing their private data. Motivated by the effectiveness and robustness of self-attention-based architectures, researchers are turning to using pre-trained Transformers (i.e., foundation models) instead of traditional convolutional neural networks in FL to leverage their excellent transfer learning capabilities. Despite recent progress, how pre-trained Transformer models play a role in FL remains obscure, that is, how to efficiently fine-tune these pre-trained models in FL and how FL users could benefit from this new paradigm. In this paper, we explore this issue and demonstrate that the fine-tuned Transformers achieve extraordinary performance on FL, and that the lightweight fine-tuning method facilitates a fast convergence rate and low communication costs. Concretely, we conduct a rigorous empirical study of three tuning methods (i.e., modifying the input, adding extra modules, and adjusting the backbone) using two types of pre-trained models (i.e., vision-language models and vision models) for FL. Our experiments show that 1) Fine-tuning the bias term of the backbone performs best when relying on a strong pre-trained model; 2) The vision-language model (e.g., CLIP) outperforms the pure vision model (e.g., ViT) and is more robust to the few-shot settings; 3) Compared to pure local training, FL with pre-trained models has a higher accuracy because it alleviates the problem of over-fitting. We will release our code and encourage further exploration of pre-trained Transformers and FL.", "year": 2022, "venue": "arXiv.org", "authors": [ "Jinyu Chen", "Wenchao Xu", "Song Guo", "Junxiao Wang", "J. Zhang", "Haozhao Wang" ], "externalIds": { "DBLP": "journals/corr/abs-2211-08025", "ArXiv": "2211.08025", "DOI": "10.48550/arXiv.2211.08025", "CorpusId": 253523368 }, "url": "https://www.semanticscholar.org/paper/4aa6ae7d867ee9a08d3f81adfb851e115e23b8b0", "referenceCount": 52, "citationCount": 26, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Using Synthetic Data to Reduce Model Convergence Time in Federated Learning", "abstract": "Federated Learning (FL) is a hot new topic in collaborative training of machine learning problems. It is a privacy-preserving distributed machine learning approach, allowing multiple clients to jointly train a global model under the coordination of a central server, while keeping their sensitive data private. The problem with FL systems is that they require intense communication between the server and clients to achieve the final machine learning model. Such complexity increases with the number of clients participating and the complexity of the model sought. In this paper, we introduce synthetic data generation into FL systems with the intention of reducing the number of iterations required for model convergence. In this novel method, clients generate synthetic datasets modeling their private data. The synthetic datasets are then sent to the central server and are used to generate a cognizant initial model. Our experiments show that such conscious method for generating the initial model lowers the number of iterations by a factor of more than 4 without affecting the model accuracy. As such it enhances the overall efficiency of FL systems.", "year": 2022, "venue": "International Conference on Advances in Social Networks Analysis and Mining", "authors": [ "F. Dankar", "N. Madathil" ], "externalIds": { "DBLP": "conf/asunam/DankarM22", "DOI": "10.1109/ASONAM55673.2022.10068615", "CorpusId": 257720206 }, "url": "https://www.semanticscholar.org/paper/f4c619de20b0608c915929cb4a5412cf4c5816a4", "referenceCount": 32, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Where to Begin? On the Impact of Pre-Training and Initialization in Federated Learning", "abstract": "An oft-cited challenge of federated learning is the presence of heterogeneity. \\emph{Data heterogeneity} refers to the fact that data from different clients may follow very different distributions. \\emph{System heterogeneity} refers to the fact that client devices have different system capabilities. A considerable number of federated optimization methods address this challenge. In the literature, empirical evaluations usually start federated training from random initialization. However, in many practical applications of federated learning, the server has access to proxy data for the training task that can be used to pre-train a model before starting federated training. We empirically study the impact of starting from a pre-trained model in federated learning using four standard federated learning benchmark datasets. Unsurprisingly, starting from a pre-trained model reduces the training time required to reach a target error rate and enables the training of more accurate models (up to 40\\%) than is possible when starting from random initialization. Surprisingly, we also find that starting federated learning from a pre-trained initialization reduces the effect of both data and system heterogeneity. We recommend that future work proposing and evaluating federated optimization methods evaluate the performance when starting from random and pre-trained initializations. We also believe this study raises several questions for further work on understanding the role of heterogeneity in federated optimization.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "John Nguyen", "Jianyu Wang", "Kshitiz Malik", "Maziar Sanjabi", "Michael G. Rabbat" ], "externalIds": { "DBLP": "journals/corr/abs-2210-08090", "ArXiv": "2210.08090", "DOI": "10.48550/arXiv.2210.08090", "CorpusId": 252918693 }, "url": "https://www.semanticscholar.org/paper/fc721f4fd6260ed3b86c64eaa204375e18863aad", "referenceCount": 46, "citationCount": 47, "influentialCitationCount": 8, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Survey on Heterogeneous Federated Learning", "abstract": "Federated learning (FL) has been proposed to protect data privacy and virtually assemble the isolated data silos by cooperatively training models among organizations without breaching privacy and security. However, FL faces heterogeneity from various aspects, including data space, statistical, and system heterogeneity. For example, collaborative organizations without conflict of interest often come from different areas and have heterogeneous data from different feature spaces. Participants may also want to train heterogeneous personalized local models due to non-IID and imbalanced data distribution and various resource-constrained devices. Therefore, heterogeneous FL is proposed to address the problem of heterogeneity in FL. In this survey, we comprehensively investigate the domain of heterogeneous FL in terms of data space, statistical, system, and model heterogeneity. We first give an overview of FL, including its definition and categorization. Then, We propose a precise taxonomy of heterogeneous FL settings for each type of heterogeneity according to the problem setting and learning objective. We also investigate the transfer learning methodologies to tackle the heterogeneity in FL. We further present the applications of heterogeneous FL. Finally, we highlight the challenges and opportunities and envision promising future research directions toward new framework design and trustworthy approaches.", "year": 2022, "venue": "arXiv.org", "authors": [ "Dashan Gao", "Xin Yao", "Qian Yang" ], "externalIds": { "ArXiv": "2210.04505", "DBLP": "journals/corr/abs-2210-04505", "DOI": "10.48550/arXiv.2210.04505", "CorpusId": 252780731 }, "url": "https://www.semanticscholar.org/paper/e467ad57b7872a48b5210af756f5260dad8a5ab4", "referenceCount": 223, "citationCount": 45, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Federated Learning from Pre-Trained Models: A Contrastive Learning Approach", "abstract": "Federated Learning (FL) is a machine learning paradigm that allows decentralized clients to learn collaboratively without sharing their private data. However, excessive computation and communication demands pose challenges to current FL frameworks, especially when training large-scale models. To prevent these issues from hindering the deployment of FL systems, we propose a lightweight framework where clients jointly learn to fuse the representations generated by multiple fixed pre-trained models rather than training a large-scale model from scratch. This leads us to a more practical FL problem by considering how to capture more client-specific and class-relevant information from the pre-trained models and jointly improve each client's ability to exploit those off-the-shelf models. In this work, we design a Federated Prototype-wise Contrastive Learning (FedPCL) approach which shares knowledge across clients through their class prototypes and builds client-specific representations in a prototype-wise contrastive manner. Sharing prototypes rather than learnable model parameters allows each client to fuse the representations in a personalized way while keeping the shared knowledge in a compact form for efficient communication. We perform a thorough evaluation of the proposed FedPCL in the lightweight framework, measuring and visualizing its ability to fuse various pre-trained models on popular FL datasets.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Yue Tan", "Guodong Long", "Jie Ma", "Lu Liu", "Tianyi Zhou", "Jing Jiang" ], "externalIds": { "DBLP": "conf/nips/TanLML0022", "ArXiv": "2209.10083", "DOI": "10.48550/arXiv.2209.10083", "CorpusId": 252407543 }, "url": "https://www.semanticscholar.org/paper/2510bd1b8bc09d7b8f08cf888297753c97c5ef44", "referenceCount": 87, "citationCount": 112, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FedPrompt: Communication-Efficient and Privacy-Preserving Prompt Tuning in Federated Learning", "abstract": "Federated learning (FL) has enabled global model training on decentralized data in a privacy-preserving way. However, for tasks that utilize pre-trained language models (PLMs) with massive parameters, there are considerable communication costs. Prompt tuning, which tunes soft prompts without modifying PLMs, has achieved excellent performance as a new learning paradigm. In this paper, we want to combine these methods and explore the effect of prompt tuning under FL. We propose \"FedPrompt\" studying prompt tuning in a model split aggregation way using FL, and prove that split aggregation greatly reduces the communication cost, only 0.01% of the PLMs’ parameters, with little decrease on accuracy both on IID and Non-IID data distribution. We further conduct backdoor attacks by data poisoning on FedPrompt. Experiments show that attack achieve a quite low attack success rate and can not inject backdoor effectively, proving the robustness of FedPrompt.", "year": 2022, "venue": "IEEE International Conference on Acoustics, Speech, and Signal Processing", "authors": [ "Haodong Zhao", "Wei Du", "Fang Li", "Peixuan Li", "Gongshen Liu" ], "externalIds": { "DBLP": "conf/icassp/ZhaoDLLL23", "ArXiv": "2208.12268", "DOI": "10.1109/ICASSP49357.2023.10095356", "CorpusId": 252762685 }, "url": "https://www.semanticscholar.org/paper/15abd9759bc65f560abf74eb5bf14ce40a0c7526", "referenceCount": 50, "citationCount": 40, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "PromptFL: Let Federated Participants Cooperatively Learn Prompts Instead of Models – Federated Learning in Age of Foundation Model", "abstract": "Quick global aggregation of effective distributed parameters is crucial to federated learning (FL), which requires adequate bandwidth for parameters communication and sufficient user data for local training. Otherwise, FL may cost excessive training time for convergence and produce inaccurate models. In this paper, we propose a brand-new FL framework, PromptFL, that replaces the federated model training with the federated prompt training, i.e., let federated participants train prompts instead of a shared model, to simultaneously achieve the efficient global aggregation and local training on insufficient data by exploiting the power of foundation models (FM) in a distributed way. PromptFL ships an off-the-shelf FM, i.e., CLIP, to distributed clients who would cooperatively train shared soft prompts based on very few local data. Since PromptFL only needs to update the prompts instead of the whole model, both the local training and the global aggregation can be significantly accelerated. And FM trained over large scale data can provide strong adaptation capability to distributed users tasks with the trained soft prompts. We empirically analyze the PromptFL via extensive experiments, and show its superiority in terms of system feasibility, user privacy, and performance.", "year": 2022, "venue": "IEEE Transactions on Mobile Computing", "authors": [ "Tao Guo", "Song Guo", "Junxiao Wang", "Xueyang Tang", "Wenchao Xu" ], "externalIds": { "ArXiv": "2208.11625", "DBLP": "journals/tmc/GuoGWTX24", "DOI": "10.1109/TMC.2023.3302410", "CorpusId": 251765106 }, "url": "https://www.semanticscholar.org/paper/8319aa06ed1bdc7e455bbc29c07a409f76250a6d", "referenceCount": 65, "citationCount": 69, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "On the Importance and Applicability of Pre-Training for Federated Learning", "abstract": "Pre-training is prevalent in nowadays deep learning to improve the learned model's performance. However, in the literature on federated learning (FL), neural networks are mostly initialized with random weights. These attract our interest in conducting a systematic study to explore pre-training for FL. Across multiple visual recognition benchmarks, we found that pre-training can not only improve FL, but also close its accuracy gap to the counterpart centralized learning, especially in the challenging cases of non-IID clients' data. To make our findings applicable to situations where pre-trained models are not directly available, we explore pre-training with synthetic data or even with clients' data in a decentralized manner, and found that they can already improve FL notably. Interestingly, many of the techniques we explore are complementary to each other to further boost the performance, and we view this as a critical result toward scaling up deep FL for real-world applications. We conclude our paper with an attempt to understand the effect of pre-training on FL. We found that pre-training enables the learned global models under different clients' data conditions to converge to the same loss basin, and makes global aggregation in FL more stable. Nevertheless, pre-training seems to not alleviate local model drifting, a fundamental problem in FL under non-IID data.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Hong-You Chen", "Cheng-Hao Tu", "Zi-hua Li", "Hang Shen", "Wei-Lun Chao" ], "externalIds": { "DBLP": "conf/iclr/Chen0LSC23", "ArXiv": "2206.11488", "CorpusId": 253158053 }, "url": "https://www.semanticscholar.org/paper/393148d88541881b590c36eaf0effc4a6a823035", "referenceCount": 100, "citationCount": 57, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Recovering Private Text in Federated Learning of Language Models", "abstract": "Federated learning allows distributed users to collaboratively train a model while keeping each user's data private. Recently, a growing body of work has demonstrated that an eavesdropping attacker can effectively recover image data from gradients transmitted during federated learning. However, little progress has been made in recovering text data. In this paper, we present a novel attack method FILM for federated learning of language models (LMs). For the first time, we show the feasibility of recovering text from large batch sizes of up to 128 sentences. Unlike image-recovery methods that are optimized to match gradients, we take a distinct approach that first identifies a set of words from gradients and then directly reconstructs sentences based on beam search and a prior-based reordering strategy. We conduct the FILM attack on several large-scale datasets and show that it can successfully reconstruct single sentences with high fidelity for large batch sizes and even multiple sentences if applied iteratively. We evaluate three defense methods: gradient pruning, DPSGD, and a simple approach to freeze word embeddings that we propose. We show that both gradient pruning and DPSGD lead to a significant drop in utility. However, if we fine-tune a public pre-trained LM on private text without updating word embeddings, it can effectively defend the attack with minimal data utility loss. Together, we hope that our results can encourage the community to rethink the privacy concerns of LM training and its standard practices in the future.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Samyak Gupta", "Yangsibo Huang", "Zexuan Zhong", "Tianyu Gao", "Kai Li", "Danqi Chen" ], "externalIds": { "ArXiv": "2205.08514", "DBLP": "conf/nips/GuptaHZGLC22", "DOI": "10.48550/arXiv.2205.08514", "CorpusId": 248834481 }, "url": "https://www.semanticscholar.org/paper/6fcbb819920ce206269105d1524489a33518d06d", "referenceCount": 78, "citationCount": 55, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FedKC: Federated Knowledge Composition for Multilingual Natural Language Understanding", "abstract": "Multilingual natural language understanding, which aims to comprehend multilingual documents, is an important task. Existing efforts have been focusing on the analysis of centrally stored text data, but in real practice, multilingual data is usually distributed. Federated learning is a promising paradigm to solve this problem, which trains local models with decentralized data on local clients and aggregates local models on the central server to achieve a good global model. However, existing federated learning methods assume that data are independent and identically distributed (IID), and cannot handle multilingual data, that are usually non-IID with severely skewed distributions: First, multilingual data is stored on local client devices such that there are only monolingual or bilingual data stored on each client. This makes it difficult for local models to know the information of documents in other languages. Second, the distribution over different languages could be skewed. High resource language data is much more abundant than low resource language data. The model trained on such skewed data may focus more on high resource languages but fail to consider the key information of low resource languages. To solve the aforementioned challenges of multilingual federated NLU, we propose a plug-and-play knowledge composition (KC) module, called FedKC, which exchanges knowledge among clients without sharing raw data. Specifically, we propose an effective way to calculate a consistency loss defined based on the shared knowledge across clients, which enables models trained on different clients achieve similar predictions on similar data. Leveraging this consistency loss, joint training is thus conducted on distributed data respecting the privacy constraints. We also analyze the potential risk of FedKC and provide theoretical bound to show that it is difficult to recover data from the corrupted data. We conduct extensive experiments on three public multilingual datasets for three typical NLU tasks, including paraphrase identification, question answering matching, and news classification. The experiment results show that the proposed FedKC can outperform state-of-the-art baselines on the three datasets significantly.", "year": 2022, "venue": "The Web Conference", "authors": [ "Haoyu Wang", "Handong Zhao", "Yaqing Wang", "Tong Yu", "Jiuxiang Gu", "Jing Gao" ], "externalIds": { "DBLP": "conf/www/WangZW0GG22", "DOI": "10.1145/3485447.3511988", "CorpusId": 248367539 }, "url": "https://www.semanticscholar.org/paper/3ff8b79ddf587f6553083ab64d93e26e712068f1", "referenceCount": 68, "citationCount": 12, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FedBERT: When Federated Learning Meets Pre-training", "abstract": "The fast growth of pre-trained models (PTMs) has brought natural language processing to a new era, which has become a dominant technique for various natural language processing (NLP) applications. Every user can download the weights of PTMs, then fine-tune the weights for a task on the local side. However, the pre-training of a model relies heavily on accessing a large-scale of training data and requires a vast amount of computing resources. These strict requirements make it impossible for any single client to pre-train such a model. To grant clients with limited computing capability to participate in pre-training a large model, we propose a new learning approach, FedBERT, that takes advantage of the federated learning and split learning approaches, resorting to pre-training BERT in a federated way. FedBERT can prevent sharing the raw data information and obtain excellent performance. Extensive experiments on seven GLUE tasks demonstrate that FedBERT can maintain its effectiveness without communicating to the sensitive local data of clients.", "year": 2022, "venue": "ACM Transactions on Intelligent Systems and Technology", "authors": [ "Yuanyishu Tian", "Yao Wan", "Lingjuan Lyu", "Dezhong Yao", "Hai Jin", "Lichao Sun" ], "externalIds": { "DBLP": "journals/tist/TianWLYJS22", "DOI": "10.1145/3510033", "CorpusId": 246531797 }, "url": "https://www.semanticscholar.org/paper/220590e7815ea278959329058a5de3e4c9df9f4e", "referenceCount": 70, "citationCount": 86, "influentialCitationCount": 8, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Decepticons: Corrupted Transformers Breach Privacy in Federated Learning for Language Models", "abstract": "A central tenet of Federated learning (FL), which trains models without centralizing user data, is privacy. However, previous work has shown that the gradient updates used in FL can leak user information. While the most industrial uses of FL are for text applications (e.g. keystroke prediction), nearly all attacks on FL privacy have focused on simple image classifiers. We propose a novel attack that reveals private user text by deploying malicious parameter vectors, and which succeeds even with mini-batches, multiple users, and long sequences. Unlike previous attacks on FL, the attack exploits characteristics of both the Transformer architecture and the token embedding, separately extracting tokens and positional embeddings to retrieve high-fidelity text. This work suggests that FL on text, which has historically been resistant to privacy attacks, is far more vulnerable than previously thought.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Liam H. Fowl", "Jonas Geiping", "Steven Reich", "Yuxin Wen", "Wojtek Czaja", "Micah Goldblum", "T. Goldstein" ], "externalIds": { "DBLP": "conf/iclr/FowlGRWCGG23", "ArXiv": "2201.12675", "CorpusId": 246430796 }, "url": "https://www.semanticscholar.org/paper/f2cd15c1925ef54d58b3d71506d7113d7911a8c2", "referenceCount": 67, "citationCount": 37, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Chain of Thought Prompting Elicits Reasoning in Large Language Models", "abstract": "We explore how generating a chain of thought -- a series of intermediate reasoning steps -- significantly improves the ability of large language models to perform complex reasoning. In particular, we show how such reasoning abilities emerge naturally in sufficiently large language models via a simple method called chain of thought prompting, where a few chain of thought demonstrations are provided as exemplars in prompting. Experiments on three large language models show that chain of thought prompting improves performance on a range of arithmetic, commonsense, and symbolic reasoning tasks. The empirical gains can be striking. For instance, prompting a 540B-parameter language model with just eight chain of thought exemplars achieves state of the art accuracy on the GSM8K benchmark of math word problems, surpassing even finetuned GPT-3 with a verifier.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Jason Wei", "Xuezhi Wang", "Dale Schuurmans", "Maarten Bosma", "E. Chi", "F. Xia", "Quoc Le", "Denny Zhou" ], "externalIds": { "DBLP": "journals/corr/abs-2201-11903", "ArXiv": "2201.11903", "CorpusId": 246411621 }, "url": "https://www.semanticscholar.org/paper/1b6e810ce0afd0dd093f789d2b2742d047e316d5", "referenceCount": 118, "citationCount": 5208, "influentialCitationCount": 590, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DENSE: Data-Free One-Shot Federated Learning", "abstract": "One-shot Federated Learning (FL) has recently emerged as a promising approach, which allows the central server to learn a model in a single communication round. Despite the low communication cost, existing one-shot FL methods are mostly impractical or face inherent limitations, \\eg a public dataset is required, clients' models are homogeneous, and additional data/model information need to be uploaded. To overcome these issues, we propose a novel two-stage \\textbf{D}ata-fre\\textbf{E} o\\textbf{N}e-\\textbf{S}hot federated l\\textbf{E}arning (DENSE) framework, which trains the global model by a data generation stage and a model distillation stage. DENSE is a practical one-shot FL method that can be applied in reality due to the following advantages: (1) DENSE requires no additional information compared with other methods (except the model parameters) to be transferred between clients and the server; (2) DENSE does not require any auxiliary dataset for training; (3) DENSE considers model heterogeneity in FL, \\ie different clients can have different model architectures. Experiments on a variety of real-world datasets demonstrate the superiority of our method.For example, DENSE outperforms the best baseline method Fed-ADI by 5.08\\% on CIFAR10 dataset.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "J Zhang", "Chen Chen", "Bo Li", "Lingjuan Lyu", "Shuang Wu", "Shouhong Ding", "Chunhua Shen", "Chao Wu" ], "externalIds": { "DBLP": "conf/nips/0081C0L0DS022", "ArXiv": "2112.12371", "CorpusId": 253735434 }, "url": "https://www.semanticscholar.org/paper/5772b1fb16fd23dac5457fed1c986b6c3e315f80", "referenceCount": 63, "citationCount": 74, "influentialCitationCount": 13, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FedAffect: Few-shot federated learning for facial expression recognition", "abstract": "Annotation of large-scale facial expression datasets in the real world is a major challenge because of privacy concerns of the individuals due to which traditional supervised learning approaches won’t scale. Moreover, training models on large curated datasets often leads to dataset bias which reduces generalizability for real world use. Federated learning is a recent paradigm for training models collaboratively with decentralized private data on user devices. In this paper, we propose a few-shot federated learning framework which utilizes few samples of labeled private facial expression data to train local models in each training round and aggregates all the local model weights in the central server to get a globally optimal model. In addition, as the user devices are a large source of unlabeled data, we design a federated learning based self-supervised method to disjointly update the feature extractor network on unlabeled private facial data in order to learn robust and diverse face representations. Experimental results by testing the globally trained model on benchmark datasets (FER-2013 and FERG) show comparable performance with state of the art centralized approaches. To the best of author’s knowledge, this is the first work on few-shot federated learning for facial expression recognition.", "year": 2021, "venue": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)", "authors": [ "Debaditya Shome", "Tejaswini Kar" ], "externalIds": { "DBLP": "conf/iccvw/ShomeK21", "DOI": "10.1109/ICCVW54120.2021.00463", "CorpusId": 244531554 }, "url": "https://www.semanticscholar.org/paper/294d748fca814a9671d27d89f4c8b1502a6b953e", "referenceCount": 40, "citationCount": 23, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Pre-train, Prompt, and Predict: A Systematic Survey of Prompting Methods in Natural Language Processing", "abstract": "This article surveys and organizes research works in a new paradigm in natural language processing, which we dub “prompt-based learning.” Unlike traditional supervised learning, which trains a model to take in an input x and predict an output y as P(y|x), prompt-based learning is based on language models that model the probability of text directly. To use these models to perform prediction tasks, the original input x is modified using a template into a textual string prompt x′ that has some unfilled slots, and then the language model is used to probabilistically fill the unfilled information to obtain a final string x̂, from which the final output y can be derived. This framework is powerful and attractive for a number of reasons: It allows the language model to be pre-trained on massive amounts of raw text, and by defining a new prompting function the model is able to perform few-shot or even zero-shot learning, adapting to new scenarios with few or no labeled data. In this article, we introduce the basics of this promising paradigm, describe a unified set of mathematical notations that can cover a wide variety of existing work, and organize existing work along several dimensions, e.g., the choice of pre-trained language models, prompts, and tuning strategies. To make the field more accessible to interested beginners, we not only make a systematic review of existing works and a highly structured typology of prompt-based concepts but also release other resources, e.g., a website NLPedia–Pretrain including constantly updated survey and paperlist.", "year": 2021, "venue": "ACM Computing Surveys", "authors": [ "Pengfei Liu", "Weizhe Yuan", "Jinlan Fu", "Zhengbao Jiang", "Hiroaki Hayashi", "Graham Neubig" ], "externalIds": { "DBLP": "journals/csur/LiuYFJHN23", "ArXiv": "2107.13586", "DOI": "10.1145/3560815", "CorpusId": 236493269 }, "url": "https://www.semanticscholar.org/paper/28692beece311a90f5fa1ca2ec9d0c2ce293d069", "referenceCount": 222, "citationCount": 2972, "influentialCitationCount": 220, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Federated Learning via Synthetic Data", "abstract": "Federated learning allows for the training of a model using data on multiple clients without the clients transmitting that raw data. However the standard method is to transmit model parameters (or updates), which for modern neural networks can be on the scale of millions of parameters, inflicting significant computational costs on the clients. We propose a method for federated learning where instead of transmitting a gradient update back to the server, we instead transmit a small amount of synthetic `data'. We describe the procedure and show some experimental results suggesting this procedure has potential, providing more than an order of magnitude reduction in communication costs with minimal model degradation.", "year": 2020, "venue": "arXiv.org", "authors": [ "Jack Goetz", "Ambuj Tewari" ], "externalIds": { "DBLP": "journals/corr/abs-2008-04489", "MAG": "3048813204", "ArXiv": "2008.04489", "CorpusId": 221095527 }, "url": "https://www.semanticscholar.org/paper/bc84cc0e1a033071091d22c6d48a7ee751e967dd", "referenceCount": 8, "citationCount": 62, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "FedMD: Heterogenous Federated Learning via Model Distillation", "abstract": "Federated learning enables the creation of a powerful centralized model without compromising data privacy of multiple participants. While successful, it does not incorporate the case where each participant independently designs its own model. Due to intellectual property concerns and heterogeneous nature of tasks and data, this is a widespread requirement in applications of federated learning to areas such as health care and AI as a service. In this work, we use transfer learning and knowledge distillation to develop a universal framework that enables federated learning when each agent owns not only their private data, but also uniquely designed models. We test our framework on the MNIST/FEMNIST dataset and the CIFAR10/CIFAR100 dataset and observe fast improvement across all participating models. With 10 distinct participants, the final test accuracy of each model on average receives a 20% gain on top of what's possible without collaboration and is only a few percent lower than the performance each model would have obtained if all private datasets were pooled and made directly available for all participants.", "year": 2019, "venue": "arXiv.org", "authors": [ "Daliang Li", "Junpu Wang" ], "externalIds": { "MAG": "2980216952", "ArXiv": "1910.03581", "DBLP": "journals/corr/abs-1910-03581", "CorpusId": 203951869 }, "url": "https://www.semanticscholar.org/paper/0a9945cc7ce7f98403358d0c74e9aa2da34e8089", "referenceCount": 18, "citationCount": 664, "influentialCitationCount": 92, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Breaking Physical and Linguistic Borders: Multilingual Federated Prompt Tuning for Low-Resource Languages", "abstract": null, "year": 2024, "venue": "International Conference on Learning Representations", "authors": [ "Wanru Zhao", "Yihong Chen", "Royson Lee", "Xinchi Qiu", "Yan Gao", "Hongxiang Fan", "N. D. Lane" ], "externalIds": { "DBLP": "conf/iclr/ZhaoCLQGFL24", "CorpusId": 271745794 }, "url": "https://www.semanticscholar.org/paper/bb7d05ecffb8ba5037cf025e42117f4b2c51abf2", "referenceCount": 0, "citationCount": 10, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Personalized Federated Learning for Text Classification with Gradient-Free Prompt Tuning", "abstract": "In this paper, we study personalized federated 001 learning for text classification with Pretrained 002 Language Models (PLMs). We identify two 003 challenges in efficiently leveraging PLMs for 004 personalized federated learning: 1) Communi-005 cation. PLMs are usually large in size, e.g. , 006 with hundreds of millions of parameters, induc-007 ing huge communication cost in a federated 008 setting. 2) Local Training. Training with PLMs 009 generally requires back-propagation, during 010 which memory consumption can be several 011 times that of the forward-propagation. This 012 may not be affordable when the PLMs are 013 trained locally on the clients, since the clients 014 may be resource constrained, e.g. , mobile de-015 vices with limited access to memory resources. 016 Additionally, the PLMs can be provided as con-017 cealed APIs, for which the back-propagation 018 operations may not be available. For the first 019 challenge, we adopt prompt tuning for PLMs 020 that only train with the prompt parameters, 021 while the pretrained parameters are frozen. 022 We further propose a compression method for 023 the learned prompts to reduce communication 024 cost. For the second challenge, we propose 025 a gradient-free approach based on discrete lo-026 cal search with natural language tokens, cir-027 cumventing gradient computation with back-028 propagation, while also reducing the communi-029 cation cost. Experiments on multiple datasets 030 demonstrates the effectiveness of our method. 031", "year": 2024, "venue": "NAACL-HLT", "authors": [ "Yang Gao", "Yifan Li", "B. Dong", "Yu Lin", "Tao Guo", "Song Guo", "Junxiao Wang", "Wenchao Xu", "István Heged˝us", "Gábor Danner", "Márk Jelasity", "J. Devlin", "Ming-Wei Chang", "Lee Kristina", "Brian Lester", "Rami Al-Rfou", "Noah Constant", "Q. Li", "Zeyi Wen", "Zhaomin Wu", "Sixu Hu", "Anit Tian Li", "Kumar Sahu", "M. Zaheer", "Maziar San-791", "Ameet Talwalkar", "Virginia Smith", "Tianxiang Sun", "Zhen-Min He", "Hong Qian", "Yunhua Zhou", "Chao Tao", "Lu Hou", "Wei Zhang", "Lifeng Shang", "Kevin W. Hamlen", "Latifur Khan", "Adaptive", "Haodong Zhao", "Wei Du", "Fang Li", "Peixuan Li" ], "externalIds": { "DBLP": "conf/naacl/WangYZKRZWMYH24", "DOI": "10.18653/v1/2024.findings-naacl.286", "CorpusId": 259290384 }, "url": "https://www.semanticscholar.org/paper/740cd33d4d94d0f73f4c4672f845a19437b7015b", "referenceCount": 75, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FedPIT: Towards Privacy-preserving and Few-shot Federated Instruction Tuning", "abstract": "Instruction tuning has proven essential for enhancing the performance of large language models (LLMs) in generating human-aligned responses. However, collecting diverse, high-quality instruction data for tuning poses challenges, particularly in privacy-sensitive domains. Federated instruction tuning (F ED IT) has emerged as a solution, leveraging federated learning from multiple data owners while preserving privacy. Yet, it faces challenges due to limited instruction data and vulnerabilities to training data extraction attacks. To address these issues, we propose a novel federated algorithm, F ED PIT, which utilizes LLMs’ in-context learning capability to self-generate task-specific synthetic data for training autonomously. Our method employs parameter-isolated training to maintain global parameters trained on synthetic data and local parameters trained on augmented local data, effectively thwarting data extraction attacks. Extensive experiments on real-world medical data demonstrate the effectiveness of F ED PIT in improving federated few-shot performance while preserving privacy and robustness against data heterogeneity.", "year": 2024, "venue": "arXiv.org", "authors": [ "Zhuo Zhang", "Jingyuan Zhang", "Jintao Huang", "Lizhen Qu", "Hongzhi Zhang", "Zenglin Xu" ], "externalIds": { "DBLP": "journals/corr/abs-2403-06131", "DOI": "10.48550/arXiv.2403.06131", "CorpusId": 271333097 }, "url": "https://www.semanticscholar.org/paper/d48eb2400161a2ceb1d787be580dc20f61943d73", "referenceCount": 44, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Federated Domain Adaptation for Named Entity Recognition via Distilling with Heterogeneous Tag Sets", "abstract": "Federated learning involves collaborative training with private data from multiple platforms, while not violating data privacy. We study the problem of federated domain adaptation for Named Entity Recognition (NER), where we seek to transfer knowledge across different platforms with data of multiple domains. In addition, we consider a practical and challenging scenario, where NER datasets of different platforms of federated learning are annotated with heterogeneous tag sets, i.e. , different sets of entity types. The goal is to train a global model with federated learning, such that it can predict with a complete tag set, i.e. , with all the occurring entity types for data across all platforms. To cope with the heterogeneous tag sets in a multi-domain setting, we propose a distillation approach along with a mechanism of instance weighting to facilitate knowledge transfer across platforms. Besides, we release two re-annotated clinic NER datasets, for testing the proposed method in the clinic domain. Our method shows superior empirical performance for clinic NER with federated learning.", "year": 2023, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Rui Wang", "Tong Yu", "Junda Wu", "Handong Zhao", "Sungchul Kim", "Ruiyi Zhang", "Subrata Mitra", "Ricardo Henao" ], "externalIds": { "DBLP": "conf/acl/Wang0WZKZMH23", "DOI": "10.18653/v1/2023.findings-acl.470", "CorpusId": 259858886 }, "url": "https://www.semanticscholar.org/paper/2dc55f83ec773698b6bdc8328df1df7a3e252750", "referenceCount": 32, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FedMLSecurity: A Benchmark for Attacks and Defenses in Federated Learning and LLMs", "abstract": "This paper introduces FedMLSecurity , a benchmark that simulates adversarial attacks and corresponding defense mechanisms in Federated Learning (FL). As an integral module of the open-sourced library FedML [22] that facilitates FL algo-rithm development and performance comparison, FedMLSecurity enhances the security assessment capacity of FedML. FedMLSecurity comprises two principal components: FedMLAttacker , which simulates attacks injected into FL training, and FedMLDefender , which emulates defensive strategies designed to mitigate the impacts of the attacks. FedMLSecurity is open-sourced 1 and is customizable to a wide range of machine learning models ( e . g ., Logistic Regression, ResNet [23], GAN [19], etc.) and federated optimizers ( e . g ., FedAVG [32], FedOPT [37], FedNOVA [46], etc.). Experimental evaluations in this paper also demonstrate the ease of application of FedMLSecurity to Large Language Models (LLMs), further reinforcing its versatility and practical utility in various scenarios.", "year": 2023, "venue": "arXiv.org", "authors": [ "Shanshan Han", "Baturalp Buyukates", "Zijian Hu", "Han Jin", "Weizhao Jin", "Lichao Sun", "Xiaoya Wang", "Chulin Xie", "Kai Zhang", "Qifan Zhang", "Yuhui Zhang", "Chaoyang He", "S. Avestimehr" ], "externalIds": { "DBLP": "journals/corr/abs-2306-04959", "DOI": "10.48550/arXiv.2306.04959", "CorpusId": 259108574 }, "url": "https://www.semanticscholar.org/paper/f999d04824df9e6b2f125c02039884aa4ccf4702", "referenceCount": 99, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FedLoRA: Model-Heterogeneous Personalized Federated Learning with LoRA Tuning", "abstract": "Federated learning (FL) is an emerging machine learning paradigm in which a central server coordinates multiple participants (a.k.a. FL clients) to train a model collaboratively on decentralized data with privacy protection. This paradigm constrains that all clients have to train models with the same structures (homogeneous). In practice, FL often faces statistical heterogeneity, system heterogeneity and model heterogeneity challenges. These challenging issues inspire the field of Model-Heterogeneous Personalized Federated Learning (MHPFL) which aims to train a personalized and heterogeneous local model for each FL client. Existing MH-PFL approaches cannot achieve satisfactory model performance, acceptable computational overhead and efficient communication simultaneously. To bridge this gap, we propose a novel computation-and communication-efficient model-heterogeneous personalized Federated learning framework based on LoRA tuning ( FedLoRA ). It is designed to incorporate a homogeneous small adapter for each client’s heterogeneous local model. Both models are trained following the proposed iterative training for global-local knowledge exchange. The homogeneous small local adapters are sent to the FL server to be aggregated into a global adapter. In this way, FL clients can train heterogeneous local models without incurring high computation and communication costs. We theoretically prove the non-convex convergence rate of FedLoRA . Extensive experiments on two real-world datasets demonstrate that FedLoRA outperforms six state-of-the-art baselines, beating the best approach by 1 . 35% in terms of test accuracy, 11 . 81 × computation overhead reduction and 7 . 41 × communication cost saving.", "year": 2023, "venue": "arXiv.org", "authors": [ "Liping Yi", "Han Yu", "Gang Wang", "Xiaoguang Liu" ], "externalIds": { "DBLP": "journals/corr/abs-2310-13283", "DOI": "10.48550/arXiv.2310.13283", "CorpusId": 264405713 }, "url": "https://www.semanticscholar.org/paper/acbc16961cb2664f74e30abcc0b8d078d802ea85", "referenceCount": 43, "citationCount": 23, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Where Did I Come From? Origin Attribution of AI-Generated Images", "abstract": null, "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Zhenting Wang", "Chen Chen", "Yi Zeng", "Lingjuan Lyu", "Shiqing Ma" ], "externalIds": { "DBLP": "conf/nips/Wang0ZLM23", "CorpusId": 268095526 }, "url": "https://www.semanticscholar.org/paper/6ae452ecaed3d57924ad1a29f71530973550a8f0", "referenceCount": 0, "citationCount": 9, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Federated Fine-tuning of Billion-Sized Language Models across Mobile Devices", "abstract": "Large Language Models (LLMs) are transforming the landscape of mobile intelligence. Federated Learning (FL), a method to preserve user data privacy, is often employed in fine-tuning LLMs to downstream mobile tasks, an approach known as FedLLM. Though recent efforts have addressed the network issue induced by the vast model size, they have not practically mitigated vital challenges concerning integration with mobile devices, such as significant memory consumption and sluggish model convergence. In response to these challenges, this work introduces FwdLLM , an innovative FL protocol designed to enhance the FedLLM efficiency. The key idea of FwdLLM is to employ backpropagation (BP)-free training methods, requiring devices only to execute “perturbed inferences”. Consequently, FwdLLM delivers way better memory efficiency and time efficiency (expedited by mobile NPUs and an expanded array of participant devices). FwdLLM centers around three key designs: (1) it combines BP-free training with parameter-efficient training methods, an essential way to scale the approach to the LLM era; (2) it systematically and adaptively allocates computational loads across devices, striking a careful balance between convergence speed and accuracy; (3) it discriminatively samples perturbed predictions that are more valuable to model convergence. Comprehensive experiments with five LLMs and three NLP tasks illustrate FwdLLM ’s significant advantages over conventional methods, including up to three orders of magnitude faster convergence and a 14.6 × reduction in memory footprint. Uniquely, FwdLLM paves the way for federated learning of billion-parameter LLMs such as LLaMA on COTS mobile devices – a feat previously unattained.", "year": 2023, "venue": "arXiv.org", "authors": [ "Mengwei Xu", "Yaozong Wu", "Dongqi Cai", "Xiang Li", "Shangguang Wang" ], "externalIds": { "DBLP": "journals/corr/abs-2308-13894", "DOI": "10.48550/arXiv.2308.13894", "CorpusId": 271881735 }, "url": "https://www.semanticscholar.org/paper/1c09585099a2fa90b9d7110079d1e6abe9e293f7", "referenceCount": 104, "citationCount": 13, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FedMLSecurity: A Benchmark for Attacks and Defenses in Federated Learning and Federated LLMs", "abstract": "This paper introduces FedMLSecurity, an end-to-end benchmark designed to simulate adversarial attacks and corresponding defense mechanisms in Federated Learning (FL). FedMLSecurity comprises two pivotal components: FedMLAttacker, which facilitates the simulation of a variety of attacks during FL training, and FedMLDe-fender, which implements defensive mechanisms to counteract these attacks. As an open-source library, FedMLSecurity enhances its usability compared to from-scratch implementations that focus on specific attack/defense scenarios based on the following features: i ) It offers extensive customization options to accommodate", "year": 2023, "venue": "", "authors": [ "Shanshan Han", "Baturalp Buyukates", "Zijian Hu", "Han Jin", "Weizhao Jin", "Lichao Sun", "Xiaoyang Wang", "Wenxuan Wu", "Chulin Xie", "Yuhang Yao", "Kai Zhang", "Qifan Zhang", "Yuhui Zhang", "S. Avestimehr", "Chaoyang He" ], "externalIds": { "CorpusId": 271201256 }, "url": "https://www.semanticscholar.org/paper/00547fcffcd9dc46b863e3390d1ac6fd9e7f48fb", "referenceCount": 112, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Cefhri: A communication efficient federated learning framework for recognizing industrial human-robot interaction", "abstract": null, "year": 2023, "venue": "2023 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Practical Attribute Reconstruction Attack Against Federated Learning", "abstract": null, "year": 2022, "venue": "IEEE Transactions on Big Data", "authors": [ "Chen Chen", "Lingjuan Lyu", "Han Yu", "Gang Chen" ], "externalIds": { "DOI": "10.1109/tbdata.2022.3159236", "CorpusId": 247484550 }, "url": "https://www.semanticscholar.org/paper/e2d32e065900d947da15119316a445bc67660128", "referenceCount": 0, "citationCount": 22, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "How does nlp benefit legal system: A summary of legal artificial intelligence", "abstract": null, "year": 2020, "venue": "arXiv", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "A Benchmark for", "abstract": null, "year": 2001, "venue": "", "authors": [ "Jonas Lext", "Ulf Assarsson", "Tomas Möller" ], "externalIds": { "MAG": "2186473180", "CorpusId": 195916719 }, "url": "https://www.semanticscholar.org/paper/9fbde5e089e61c12757e835a9c82fb94aa250c7f", "referenceCount": 7, "citationCount": 105, "influentialCitationCount": 12, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Fedrdma: Communication-efficient cross-silo federated llm via chun-ked rdma transmission", "abstract": null, "year": null, "venue": "arXiv preprint", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Heterogeneous Environments", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Federated fine-tuning of llms on the very edge: The good, the bad, the ugly", "abstract": null, "year": null, "venue": "arXiv preprint", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Exploring parameter-efficient fine-tuning for improving communication efficiency in federated learning", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Federated inference through aligning local representations and learning a consensus graph", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "En-hancing storage and computational efficiency in federated multimodal learning for large-scale models", "abstract": null, "year": null, "venue": "Forty-first International Conference on Machine Learning", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Synthetic data generation with large language models for text classification: Potential and limitations", "abstract": null, "year": null, "venue": "arXiv preprint", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Fedlogic: Interpretable federated multi-domain chain-of-thought prompt selection for large language models", "abstract": null, "year": null, "venue": "arXiv preprint", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Privacy-preserving fine-tuning of artificial intelligence (ai) foundation models with federated learning, differential privacy, offsite tuning, and parameter-efficient fine-tuning (peft)", "abstract": null, "year": null, "venue": "Authorea Preprints", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Efficient federated prompt tuning for black-box large pre-trained models", "abstract": null, "year": null, "venue": "arXiv preprint", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Fedbiot: a solution for federated large language model fine-tuning with intellectual property protection", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Ex-ploring the robustness of decentralized training 13", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Heterogeneous low-rank approximation for federated fine-tuning of on-device foundation models", "abstract": null, "year": null, "venue": "arXiv preprint", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Adversarial Federated Consensus Learning for Surface Defect Classification Under Data Heterogeneity in IIoT": { "paper_title": "Adversarial Federated Consensus Learning for Surface Defect Classification Under Data Heterogeneity in IIoT", "arxiv_id": "2409.15711v1", "keyword": "federate learning", "authors": [ "Jixuan Cui", "Jun Li", "Zhen Mei", "Yiyang Ni", "Wen Chen", "Zengxiang Li" ], "references": [ { "title": "A Blockchain-Reinforced Federated Intrusion Detection Architecture for IIoT", "abstract": "Federated learning (FL) in Industrial IoT (IIoT) facilitates collaborative model training across distributed edge devices, ensuring data privacy and localized insights without centralized data aggregation. However, the networked parameter sharing mechanism in FL renders it vulnerable to exploitation by man-in-the-middle (MITM) attackers, potentially disrupting the model training process. To mitigate this threat, this article presents a novel blockchain-reinforced FL architecture aimed at enabling cooperative intrusion detection. Initially, FL is leveraged to aggregate all learned information from edge servers, thereby disseminating extracted attack characteristics to all participants through gradient sharing. Subsequently, a blockchain-based parameter verification scheme is introduced to safeguard against tampered local parameters affecting the global model. Clients record model parameters in smart contracts deployed on a private chain, and parameter servers verify parameter confidentiality before aggregation, ensuring only valid parameters are considered. Finally, extensive experiments are conducted using an edge IIoT cybersecurity data set comprising 61 features spanning ten protocol layers and five attacks targeting IIoT connectivity protocols. Simulation results demonstrate that the proposed scheme significantly enhances intrusion detection accuracy, achieving a threefold improvement when two-thirds of federated nodes are subjected to MITM attacks.", "year": 2024, "venue": "IEEE Internet of Things Journal", "authors": [ "Dingde Jiang", "Zhihao Wang", "Ye Wang", "Lizhuang Tan", "Jian Wang", "Peiying Zhang" ], "externalIds": { "DBLP": "journals/iotj/JiangWWTWZ24", "DOI": "10.1109/JIOT.2024.3406602", "CorpusId": 270460038 }, "url": "https://www.semanticscholar.org/paper/c417c4fab1c581baf3d36ab29fc4e8c53df17071", "referenceCount": 46, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Hierarchical Transfer-Generative Framework for Automating Multianalytical Tasks in Rail Surface Defect Inspection", "abstract": "Rail surface inspection is crucial for ensuring the safety and longevity of rail transport systems, grapples with the challenges posed by the scarcity of defective samples. Additionally, contemporary techniques in this domain typically fail to concurrently identify and localize defects at both image level and pixel levels. Addressing these intricacies, we present a hierarchical transfer-generative framework, the HTg-Net. This innovative framework is geared toward the automation and enhancement of multianalytical tasks in rail surface inspection. The HTg-Net architecture synergistically melds two pivotal subnetworks: 1) the memory-guided generation subnetwork (MGN), which is endowed with a cutting-edge memory mechanism. This mechanism adeptly captures and recalls the typical patterns observed in rail images, facilitating the detection of anomalies or deviations and 2) the attention-focused segmentation subnetwork (ASN) is fortified with a gated attention mechanism and hierarchical weights transferred from MGN, enabling parallel feature extraction and enhancing defect localization. Rigorous evaluations of HTg-Net on three data sets elucidate its superior efficiency and performance over prevailing benchmarks, positioning it as an advanced solution for the comprehensive inspection of rail surface defects.", "year": 2024, "venue": "IEEE Internet of Things Journal", "authors": [ "Tiange Wang", "Zijun Zhang", "K. Tsui" ], "externalIds": { "DBLP": "journals/iotj/WangZT24a", "DOI": "10.1109/JIOT.2024.3374751", "CorpusId": 270315231 }, "url": "https://www.semanticscholar.org/paper/c948f0c522bf42526f95f475e82f478007c88ced", "referenceCount": 54, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Adaptive Incentive for Cross-Silo Federated Learning in IIoT: A Multiagent Reinforcement Learning Approach", "abstract": "In the Industrial Internet of Things (IIoT), cross-silo federated learning (CSFL) enables entities, such as manufacturers and suppliers to train global models for optimizing production processes while ensuring data privacy. A well-designed incentive mechanism is essential to persuade clients to contribute data resources. However, existing methodologies overlook the dynamic nature of the training process, where the accuracy of the globally trained model and the client’s data ownership change over time. Furthermore, the majority of previous research assumes a defined functional relationship between the data contribution and the model accuracy, which is infeasible in realistic and dynamic training environments. To address these challenges, we design a novel adaptive mechanism for CSFL that inspires organizations to contribute data resources in a dynamic training environment with the aim of maximizing their long-term payoffs. This mechanism leverages multiagent reinforcement learning (MARL) to ascertain near-optimal data contribution strategies from potential game histories without necessitating private organizational information or a precise accuracy function. Experimental results indicate that our mechanism achieves adaptive incentive in dynamic environments and effectively enhances the long-term payoffs of organizations.", "year": 2024, "venue": "IEEE Internet of Things Journal", "authors": [ "Shijing Yuan", "Beiyu Dong", "Hongtao Lvy", "Hongze Liu", "Hongyang Chen", "Chentao Wu", "Song Guo", "Yue Ding", "Jie Li" ], "externalIds": { "DBLP": "journals/iotj/YuanDLLCWGDL24", "DOI": "10.1109/JIOT.2023.3315770", "CorpusId": 262099211 }, "url": "https://www.semanticscholar.org/paper/dd4360114688c46b4ef83e54e059f47c0385dfe1", "referenceCount": 50, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FASCNet: An Edge-Computational Defect Detection Model for Industrial Parts", "abstract": "Online inspection of industrial parts becomes increasingly important for factories to improve production quality, where small sizes and high computations increase difficulties in the defect detection process. In order to solve these issues, we propose a defect detection model to identify detailed defects with edge computations, named fast attention segmentation classification network (FASCNet). In the model, we design skip connection attention (SCA) with edge average attention (eAA), edge sum attention (eSA), and attention for segmentation (AS) to catch complex features of extremely tiny defects. Additionally, global mixed pooling (GMP) operation is explored to adaptively obtain severe mapping into low dimensional feature domains. Furthermore, a tensor freeze decomposition (TFD) is discovered to reduce model computation and complexity for edge devices. Finally, we achieve an average precision (AP) of 97.86% and giga floating point operations (GFLOPs) of 64.4495 on the real-world sprocket surface data set, which has 16.06% of GFLOPs of the current state-of-the-art method while only lowering the AP by 0.58%. On the public data set, we achieve an AP of 98.83% and GFLOPs of 92.0379 on the Severstal Steel data set. The experimental results indicate that our model performs more effectively than other state-of-the-art approaches in terms of both accuracy and computational cost simultaneously.", "year": 2024, "venue": "IEEE Internet of Things Journal", "authors": [ "Jie Li", "Rui Wu", "Shuai Zhang", "Yanli Chen", "Zhicheng Dong" ], "externalIds": { "DBLP": "journals/iotj/LiWZCD24", "DOI": "10.1109/JIOT.2023.3313164", "CorpusId": 261643582 }, "url": "https://www.semanticscholar.org/paper/bca08900aacee05ce3536cd649bdb5fd04b14871", "referenceCount": 46, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Heterogeneous Federated Learning: State-of-the-art and Research Challenges", "abstract": "Federated learning (FL) has drawn increasing attention owing to its potential use in large-scale industrial applications. Existing FL works mainly focus on model homogeneous settings. However, practical FL typically faces the heterogeneity of data distributions, model architectures, network environments, and hardware devices among participant clients. Heterogeneous Federated Learning (HFL) is much more challenging, and corresponding solutions are diverse and complex. Therefore, a systematic survey on this topic about the research challenges and state-of-the-art is essential. In this survey, we firstly summarize the various research challenges in HFL from five aspects: statistical heterogeneity, model heterogeneity, communication heterogeneity, device heterogeneity, and additional challenges. In addition, recent advances in HFL are reviewed and a new taxonomy of existing HFL methods is proposed with an in-depth analysis of their pros and cons. We classify existing methods from three different levels according to the HFL procedure: data-level, model-level, and server-level. Finally, several critical and promising future research directions in HFL are discussed, which may facilitate further developments in this field. A periodically updated collection on HFL is available at https://github.com/marswhu/HFL_Survey.", "year": 2023, "venue": "ACM Computing Surveys", "authors": [ "Mang Ye", "Xiuwen Fang", "Bo Du", "PongChi Yuen", "Dacheng Tao" ], "externalIds": { "ArXiv": "2307.10616", "DBLP": "journals/csur/YeFDYT24", "DOI": "10.1145/3625558", "CorpusId": 259991420 }, "url": "https://www.semanticscholar.org/paper/e2628754de142e2a9733f5889124fef540b58005", "referenceCount": 264, "citationCount": 94, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FedALA: Adaptive Local Aggregation for Personalized Federated Learning", "abstract": "A key challenge in federated learning (FL) is the statistical heterogeneity that impairs the generalization of the global model on each client. To address this, we propose a method Federated learning with Adaptive Local Aggregation (FedALA) by capturing the desired information in the global model for client models in personalized FL. The key component of FedALA is an Adaptive Local Aggregation (ALA) module, which can adaptively aggregate the downloaded global model and local model towards the local objective on each client to initialize the local model before training in each iteration. To evaluate the effectiveness of FedALA, we conduct extensive experiments with five benchmark datasets in computer vision and natural language processing domains. FedALA outperforms eleven state-of-the-art baselines by up to 3.27% in test accuracy. Furthermore, we also apply ALA module to other federated learning methods and achieve up to 24.19% improvement in test accuracy. Code is available at https://github.com/TsingZ0/FedALA.", "year": 2022, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Jianqing Zhang", "Yang Hua", "Hao Wang", "Tao Song", "Zhengui Xue", "Ruhui Ma", "Haibing Guan" ], "externalIds": { "DBLP": "conf/aaai/ZhangHWSXMG23", "ArXiv": "2212.01197", "DOI": "10.1609/aaai.v37i9.26330", "CorpusId": 254220922 }, "url": "https://www.semanticscholar.org/paper/8022ea93560908bb57b5ff0a668f21079d0ccdaa", "referenceCount": 36, "citationCount": 90, "influentialCitationCount": 8, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A New Cycle-consistent Adversarial Networks With Attention Mechanism for Surface Defect Classification With Small Samples", "abstract": "Surface defect detection is the essential process to ensure the quality of products. Surface defect classification (SDC) based on deep learning (DL) has shown its great potential. However, the well-trained SDC model usually requires large training data, and the small intraclass differences between the defect and normal samples also degrades the performance of SDC model. To overcome these drawbacks, this article proposed a new cycle-consistent adversarial networks with attention mechanism (AttenCGAN). First, AttenCGAN is used for synthesizing defect samples to enlarge the samples volume. Second, the attention mechanism is adopted for the feature enhancement by finding the discriminative parts of the samples and enlarging the differences among the samples. AttenCGAN is tested on KolektorSDD and DAGM2007 datasets, and its accuracies are 98.53% and 99.57% with only a few samples. The experiment results show that AttenCGAN outperforms other published SDC methods based on DL and machine learning, which validates its potential.", "year": 2022, "venue": "IEEE Transactions on Industrial Informatics", "authors": [ "Long Wen", "You Wang", "Xinyu Li" ], "externalIds": { "DBLP": "journals/tii/WenWL22", "DOI": "10.1109/TII.2022.3168432", "CorpusId": 248279317 }, "url": "https://www.semanticscholar.org/paper/70835a8a85f344fbf188c5da2baeb5c36177fefb", "referenceCount": 39, "citationCount": 32, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Small samples data augmentation and improved MobileNet for surface defects classification of hot-rolled steel strips", "abstract": "Abstract. Surface defects in hot-rolled steel strips are one of the common product problems for the steel industry, which harm the product appearance, affect the corrosion and wear resistance, and shorten the product service life. The natural defect samples are sparse, category imbalanced, and expensive manual annotations. Therefore, it is crucial to study the data augmentation and classification methods for small sample surface defects. To solve the above problems and improve the accuracy and real-time performance of defect classification, we propose a random offline data augmentation algorithm (Random-CutMix) and an improved MobileNet architecture (SP-MobileNet). The Random-CutMix algorithm expands the dataset by random sampling to balance the number of each defect class. The SP-MobileNet combines the inverse residual module with the channel shuffle mechanism (CSIn-Module) and pyramid split attention (PSA) module, which facilitates cross-group information flow and improves model representation capability and generalization performance with low computational cost. The accuracy, recall, F1 score, parameter, computational complexity, and frame rate of SP-MobileNet with Random-CutMix on the X-SDD dataset were 95.97%, 95.22%, 95.46%, 6.5 M, and 0.54 G, 72 FPS, respectively. The experiment results indicate that our method outperforms the state-of-the-art methods and provides an effective trade-off between accuracy and instantaneity in actual industrial production.", "year": 2022, "venue": "Journal of Electronic Imaging (JEI)", "authors": [ "Liyuan Lin", "Ying Wang", "Shuxian Zhao", "Jinlong Liu", "Shun Zhang", "Gaoyan Zhang" ], "externalIds": { "DOI": "10.1117/1.JEI.31.6.063056", "CorpusId": 255115670 }, "url": "https://www.semanticscholar.org/paper/54da10f927a75a75aadbf96ca98a4978bb07ab31", "referenceCount": 47, "citationCount": 8, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Engineering" ] }, { "title": "Recent Advances on Federated Learning for Cybersecurity and Cybersecurity for Federated Learning for Internet of Things", "abstract": "Decentralized paradigm in the field of cybersecurity and machine learning (ML) for the emerging Internet of Things (IoT) has gained a lot of attention from the government, academia, and industries in recent years. Federated cybersecurity (FC) is regarded as a revolutionary concept to make the IoT safer and more efficient in the future. This emerging concept has the potential of detecting security threats, taking countermeasures, and limiting the spreading of threats over the IoT network system efficiently. An objective of cybersecurity is achieved by forming the federation of the learned and shared model on top of various participants. Federated learning (FL), which is regarded as a privacy-aware ML model, is particularly useful to secure the vulnerable IoT environment. In this article, we start with background and comparison of centralized learning, distributed on-site learning, and FL, which is then followed by a survey of the application of FL to cybersecurity for IoT. This survey primarily focuses on the security aspect but it also discusses several approaches that address the performance issues (e.g., accuracy, latency, resource constraint, and others) associated with FL, which may impact the security and overall performance of the IoT. To anticipate the future evolution of this new paradigm, we discuss the main ongoing research efforts, challenges, and research trends in this area. With this article, readers can have a more thorough understanding of FL for cybersecurity as well as cybersecurity for FL, different security attacks, and countermeasures.", "year": 2022, "venue": "IEEE Internet of Things Journal", "authors": [ "Bimal Ghimire", "D. Rawat" ], "externalIds": { "DBLP": "journals/iotj/GhimireR22", "DOI": "10.1109/jiot.2022.3150363", "CorpusId": 246766565 }, "url": "https://www.semanticscholar.org/paper/ef8f8dc9eff937d65a312ee619c16cf06eb3e456", "referenceCount": 0, "citationCount": 205, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Trusted AI in Multiagent Systems: An Overview of Privacy and Security for Distributed Learning", "abstract": "Motivated by the advancing computational capacity of distributed end-user equipment (UE), as well as the increasing concerns about sharing private data, there has been considerable recent interest in machine learning (ML) and artificial intelligence (AI) that can be processed on distributed UEs. Specifically, in this paradigm, parts of an ML process are outsourced to multiple distributed UEs. Then, the processed information is aggregated on a certain level at a central server, which turns a centralized ML process into a distributed one and brings about significant benefits. However, this new distributed ML paradigm raises new risks in terms of privacy and security issues. In this article, we provide a survey of the emerging security and privacy risks of distributed ML from a unique perspective of information exchange levels, which are defined according to the key steps of an ML process, i.e., we consider the following levels: 1) the level of preprocessed data; 2) the level of learning models; 3) the level of extracted knowledge; and 4) the level of intermediate results. We explore and analyze the potential of threats for each information exchange level based on an overview of current state-of-the-art attack mechanisms and then discuss the possible defense methods against such threats. Finally, we complete the survey by providing an outlook on the challenges and possible directions for future research in this critical area.", "year": 2022, "venue": "Proceedings of the IEEE", "authors": [ "Chuan Ma", "Jun Li", "Kang Wei", "Bo Liu", "Ming Ding", "Long Yuan", "Zhu Han", "H. Poor" ], "externalIds": { "ArXiv": "2202.09027", "DBLP": "journals/pieee/MaLWLDYHP23", "DOI": "10.1109/JPROC.2023.3306773", "CorpusId": 246996518 }, "url": "https://www.semanticscholar.org/paper/b8a845e3f5fda4aacf4c4c9b20d1c7fe9853b0ae", "referenceCount": 357, "citationCount": 29, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "X-SDD: A New Benchmark for Hot Rolled Steel Strip Surface Defects Detection", "abstract": "It is important to accurately classify the defects in hot rolled steel strip since the detection of defects in hot rolled steel strip is closely related to the quality of the final product. The lack of actual hot-rolled strip defect data sets currently limits further research on the classification of hot-rolled strip defects to some extent. In real production, the convolutional neural network (CNN)-based algorithm has some difficulties, for example, the algorithm is not particularly accurate in classifying some uncommon defects. Therefore, further research is needed on how to apply deep learning to the actual detection of defects on the surface of hot rolled steel strip. In this paper, we proposed a hot rolled steel strip defect dataset called Xsteel surface defect dataset (X-SDD) which contains seven typical types of hot rolled strip defects with a total of 1360 defect images. Compared with the six defect types of the commonly used NEU surface defect database (NEU-CLS), our proposed X-SDD contains more types. Then, we adopt the newly proposed RepVGG algorithm and combine it with the spatial attention (SA) mechanism to verify the effect on the X-SDD. Finally, we apply multiple algorithms to test on our proposed X-SDD to provide the corresponding benchmarks. The test results show that our algorithm achieves an accuracy of 95.10% on the testset, which exceeds other comparable algorithms by a large margin. Meanwhile, our algorithm achieves the best results in Macro-Precision, Macro-Recall and Macro-F1-score metrics.", "year": 2021, "venue": "Symmetry", "authors": [ "Xinglong Feng", "Xian-wen Gao", "Ling Luo" ], "externalIds": { "DBLP": "journals/symmetry/FengGL21", "DOI": "10.3390/sym13040706", "CorpusId": 234753289 }, "url": "https://www.semanticscholar.org/paper/ff8ad53f6591409b3dd1c7947ca32d49ca3dc569", "referenceCount": 45, "citationCount": 82, "influentialCitationCount": 9, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Automatic Detection and Classification of Steel Surface Defect Using Deep Convolutional Neural Networks", "abstract": "Automatic detection of steel surface defects is very important for product quality control in the steel industry. However, the traditional method cannot be well applied in the production line, because of its low accuracy and slow running speed. The current, popular algorithm (based on deep learning) also has the problem of low accuracy, and there is still a lot of room for improvement. This paper proposes a method combining improved ResNet50 and enhanced faster region convolutional neural networks (faster R-CNN) to reduce the average running time and improve the accuracy. Firstly, the image input into the improved ResNet50 model, which add the deformable revolution network (DCN) and improved cutout to classify the sample with defects and without defects. If the probability of having a defect is less than 0.3, the algorithm directly outputs the sample without defects. Otherwise, the samples are further input into the improved faster R-CNN, which adds spatial pyramid pooling (SPP), enhanced feature pyramid networks (FPN), and matrix NMS. The final output is the location and classification of the defect in the sample or without defect in the sample. By analyzing the data set obtained in the real factory environment, the accuracy of this method can reach 98.2%. At the same time, the average running time is faster than other models.", "year": 2021, "venue": "Metals", "authors": [ "Shuai Wang", "Xiaojun Xia", "Lanqing Ye", "Binbin Yang" ], "externalIds": { "MAG": "3134639409", "DOI": "10.3390/MET11030388", "CorpusId": 233907788 }, "url": "https://www.semanticscholar.org/paper/1f7c5252b1225008916c3c098bd1f17d6025333e", "referenceCount": 44, "citationCount": 79, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Exploiting Shared Representations for Personalized Federated Learning", "abstract": "Deep neural networks have shown the ability to extract universal feature representations from data such as images and text that have been useful for a variety of learning tasks. However, the fruits of representation learning have yet to be fully-realized in federated settings. Although data in federated settings is often non-i.i.d. across clients, the success of centralized deep learning suggests that data often shares a global feature representation, while the statistical heterogeneity across clients or tasks is concentrated in the labels. Based on this intuition, we propose a novel federated learning framework and algorithm for learning a shared data representation across clients and unique local heads for each client. Our algorithm harnesses the distributed computational power across clients to perform many local-updates with respect to the low-dimensional local parameters for every update of the representation. We prove that this method obtains linear convergence to the ground-truth representation with near-optimal sample complexity in a linear setting, demonstrating that it can efficiently reduce the problem dimension for each client. This result is of interest beyond federated learning to a broad class of problems in which we aim to learn a shared low-dimensional representation among data distributions, for example in meta-learning and multi-task learning. Further, extensive experimental results show the empirical improvement of our method over alternative personalized federated learning approaches in federated environments with heterogeneous data.", "year": 2021, "venue": "International Conference on Machine Learning", "authors": [ "Liam Collins", "Hamed Hassani", "Aryan Mokhtari", "S. Shakkottai" ], "externalIds": { "DBLP": "conf/icml/CollinsHMS21", "ArXiv": "2102.07078", "CorpusId": 231924497 }, "url": "https://www.semanticscholar.org/paper/ffd393dacee23e476bd8eb0802dec86f2296b36c", "referenceCount": 54, "citationCount": 510, "influentialCitationCount": 111, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Blockchain Assisted Decentralized Federated Learning (BLADE-FL): Performance Analysis and Resource Allocation", "abstract": "Federated learning (FL), as a distributed machine learning paradigm, promotes personal privacy by local data processing at each client. However, relying on a centralized server for model aggregation, standard FL is vulnerable to server malfunctions, untrustworthy server, and external attacks. To address this issue, we propose a decentralized FL framework by integrating blockchain into FL, namely, blockchain assisted decentralized federated learning (BLADE-FL). In a round of the proposed BLADE-FL, each client broadcasts the trained model to other clients, aggregates its own model with received ones, and then competes to generate a block before its local training of the next round. We evaluate the learning performance of BLADE-FL, and develop an upper bound on the global loss function. Then we verify that this bound is convex with respect to the number of overall aggregation rounds K, and optimize the computing resource allocation for minimizing the upper bound. We also note that there is a critical problem of training deficiency, caused by lazy clients who plagiarize others trained models and add artificial noises to disguise their cheating behaviors. Focusing on this problem, we explore the impact of lazy clients on the learning performance of BLADE-FL, and characterize the relationship among the optimal K, the learning parameters, and the proportion of lazy clients. Based on MNIST and Fashion-MNIST datasets, we show that the experimental results are consistent with the analytical ones. To be specific, the gap between the developed upper bound and experimental results is lower than 5%, and the optimized K based on the upper bound can effectively minimize the loss function.", "year": 2021, "venue": "IEEE Transactions on Parallel and Distributed Systems", "authors": [ "Jun Li", "Yumeng Shao", "Kang Wei", "Ming Ding", "Chuan Ma", "Long Shi", "Zhu Han", "Vincent Poor" ], "externalIds": { "ArXiv": "2101.06905", "DBLP": "journals/tpds/LiSWDMSHP22", "DOI": "10.1109/TPDS.2021.3138848", "CorpusId": 231632079 }, "url": "https://www.semanticscholar.org/paper/a876b8f62355b0500c3d1720c6bce662a268741a", "referenceCount": 57, "citationCount": 129, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Ditto: Fair and Robust Federated Learning Through Personalization", "abstract": "Fairness and robustness are two important concerns for federated learning systems. In this work, we identify that robustness to data and model poisoning attacks and fairness, measured as the uniformity of performance across devices, are competing constraints in statistically heterogeneous networks. To address these constraints, we propose employing a simple, general framework for personalized federated learning, Ditto, that can inherently provide fairness and robustness benefits, and develop a scalable solver for it. Theoretically, we analyze the ability of Ditto to achieve fairness and robustness simultaneously on a class of linear problems. Empirically, across a suite of federated datasets, we show that Ditto not only achieves competitive performance relative to recent personalization methods, but also enables more accurate, robust, and fair models relative to state-of-the-art fair or robust baselines.", "year": 2020, "venue": "International Conference on Machine Learning", "authors": [ "Tian Li", "Shengyuan Hu", "Ahmad Beirami", "Virginia Smith" ], "externalIds": { "DBLP": "conf/icml/00050BS21", "ArXiv": "2012.04221", "CorpusId": 235446706 }, "url": "https://www.semanticscholar.org/paper/31949039a48961f939ac50440c3b4b8504fccceb", "referenceCount": 70, "citationCount": 660, "influentialCitationCount": 111, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Synthetic data augmentation for surface defect detection and classification using deep learning", "abstract": null, "year": 2020, "venue": "Journal of Intelligent Manufacturing", "authors": [ "Saksham Jain", "Gautam Seth", "Arpit Paruthi", "Umang Soni", "Girish Kumar" ], "externalIds": { "MAG": "3100487172", "DBLP": "journals/jim/JainSPSK22", "DOI": "10.1007/s10845-020-01710-x", "CorpusId": 228822798 }, "url": "https://www.semanticscholar.org/paper/8d6384463645f5222b9aff2b990e2d881a86aa56", "referenceCount": 55, "citationCount": 135, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Automated Visual Defect Classification for Flat Steel Surface: A Survey", "abstract": "For a typical surface automated visual inspection (AVI) instrument of planar materials, defect classification is an indispensable part after defect detection, which acts as a crucial precondition for achieving the online quality inspection of end products. In the industrial environment of manufacturing flat steels, this task is awfully difficult due to diverse defect appearances, ambiguous intraclass, and interclass distances. This article attempts to present a focused but systematic review of the traditional and emerging automated computer-vision-based defect classification methods by investigating approximately 140 studies on three specific flat steel products of con-casting slabs, hot-rolled steel strips, and cold-rolled steel strips. According to the natural image processing procedure of defect recognition, the diverse approaches are grouped into five successive parts: image acquisition, image preprocessing, feature extraction, feature selection, and defect classifier. Recent literature has been reviewed from an industrial goal-oriented perspective to provide some guidelines for future studies and recommend suitable methods for boosting the surface quality inspection level of AVI instruments.", "year": 2020, "venue": "IEEE Transactions on Instrumentation and Measurement", "authors": [ "Qiwu Luo", "Xiaoxin Fang", "Jiaojiao Su", "Jian Zhou", "Bingxing Zhou", "Chunhua Yang", "Li Liu", "W. Gui", "Lu Tian" ], "externalIds": { "DBLP": "journals/tim/LuoFSZZYLGT20", "MAG": "3102571413", "DOI": "10.1109/TIM.2020.3030167", "CorpusId": 226851091 }, "url": "https://www.semanticscholar.org/paper/c1898f9b88e92c05fe42b48c960bdb95898963fd", "referenceCount": 141, "citationCount": 59, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Edge-Computing-Enabled Unmanned Module Defect Detection and Diagnosis System for Large-Scale Photovoltaic Plants", "abstract": "The power efficiency of photovoltaic (PV) modules is highly correlated with their health status. Under dynamically changing environments, PV defects could spontaneously form and develop into fatal faults during the daily operation of PV power plants. To facilitate defect detection with less human intervention, a nondestructive, contactless, and automatical visual inspection system with the help of unmanned aerial vehicles and edge computing is proposed in this article. During the processing of the incoming data stream, the system may collect some new, unknown, and unlabeled defects that have not been identified yet in the existing database. To distinguish them from the existing ones, a deep embedded restricted cluster algorithm is designed to identify the unknown and unlabeled PV module defects in an unsupervised manner. Limited by the resources of edge devices and the availability of images of PV defects for training, we developed an online solution combined with deep learning, data argumentation, and transfer learning to properly address the issues of running resource-hungry applications on edge devices and lack of training samples faced by the deep learning approaches used in the field. In addition, pointwise convolution layers are introduced into the network to reduce the parameters and the size of the model. With the reduction of the network depth of the deep convolutional neural network model and the features transferred from the learned defects, the resource consumption of our proposed approach is significantly reduced, and thus can be used on a wide range of edge devices to complete defect detection in a timely manner with high accuracy. The experimental results clearly demonstrate the practicality and effectiveness.", "year": 2020, "venue": "IEEE Internet of Things Journal", "authors": [ "Xiaoxia Li", "Wei Li", "Qiang Yang", "W. Yan", "Albert Y. Zomaya" ], "externalIds": { "MAG": "3012995208", "DBLP": "journals/iotj/Li00YZ20", "DOI": "10.1109/JIOT.2020.2983723", "CorpusId": 216481721 }, "url": "https://www.semanticscholar.org/paper/609583cc33fa1431843b776f3f745ed4b2d83e50", "referenceCount": 37, "citationCount": 29, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "When Federated Learning Meets Blockchain: A New Distributed Learning Paradigm", "abstract": "Motivated by the increasingly powerful computing capabilities of end-user equipment, and by the growing privacy concerns over sharing sensitive raw data, a distributed machine learning paradigm known as federated learning (FL) has emerged. By training models locally at each client and aggregating learning models at a central server, FL has the capability to avoid sharing data directly, thereby reducing privacy leakage. However, the conventional FL framework relies heavily on a single central server, and it may fail if such a server behaves maliciously. To address this single point of failure, in this work, a blockchain-assisted decentralized FL framework is investigated, which can prevent malicious clients from poisoning the learning process, and thus provides a self-motivated and reliable learning environment for clients. In this framework, the model aggregation process is fully decentralized and the tasks of training for FL and mining for blockchain are integrated into each participant. Privacy and resource-allocation issues are further investigated in the proposed framework, and a critical and unique issue inherent in the proposed framework is disclosed. In particular, a lazy client can simply duplicate models shared by other clients to reap benefits without contributing its resources to FL. To address these issues, analytical and experimental results are provided to shed light on possible solutions, i.e., adding noise to achieve local differential privacy and using pseudo-noise (PN) sequences as watermarks to detect lazy clients.", "year": 2020, "venue": "IEEE Computational Intelligence Magazine", "authors": [ "Chuan Ma", "Jun Li", "Ming Ding", "Long Shi", "Taotao Wang", "Zhu Han", "H. Poor" ], "externalIds": { "DBLP": "journals/corr/abs-2009-09338", "MAG": "3087463235", "ArXiv": "2009.09338", "DOI": "10.1109/mci.2022.3180932", "CorpusId": 221818945 }, "url": "https://www.semanticscholar.org/paper/198ecb52a6252498febcdd5b9e8a3b376c04e87d", "referenceCount": 23, "citationCount": 117, "influentialCitationCount": 9, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep Metallic Surface Defect Detection: The New Benchmark and Detection Network", "abstract": "Metallic surface defect detection is an essential and necessary process to control the qualities of industrial products. However, due to the limited data scale and defect categories, existing defect datasets are generally unavailable for the deployment of the detection model. To address this problem, we contribute a new dataset called GC10-DET for large-scale metallic surface defect detection. The GC10-DET dataset has great challenges on defect categories, image number, and data scale. Besides, traditional detection approaches are poor in both efficiency and accuracy for the complex real-world environment. Thus, we also propose a novel end-to-end defect detection network (EDDN) based on the Single Shot MultiBox Detector. The EDDN model can deal with defects with different scales. Furthermore, a hard negative mining method is designed to alleviate the problem of data imbalance, while some data augmentation methods are adopted to enrich the training data for the expensive data collection problem. Finally, the extensive experiments on two datasets demonstrate that the proposed method is robust and can meet accuracy requirements for metallic defect detection.", "year": 2020, "venue": "Italian National Conference on Sensors", "authors": [ "Xiaoming Lv", "F. Duan", "Jia-jia Jiang", "Xiao Fu", "Lin Gan" ], "externalIds": { "PubMedCentral": "7146379", "MAG": "3012374719", "DBLP": "journals/sensors/LvDJFG20", "DOI": "10.3390/s20061562", "CorpusId": 212707205, "PubMed": "32168887" }, "url": "https://www.semanticscholar.org/paper/e663e20b30a2fdb87af2a31938f99d0cdfb1c417", "referenceCount": 41, "citationCount": 195, "influentialCitationCount": 8, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Automated Visual Defect Detection for Flat Steel Surface: A Survey", "abstract": "Automated computer-vision-based defect detection has received much attention with the increasing surface quality assurance demands for the industrial manufacturing of flat steels. This article attempts to present a comprehensive survey on surface defect detection technologies by reviewing about 120 publications over the last two decades for three typical flat steel products of con-casting slabs and hot- and cold-rolled steel strips. According to the nature of algorithms as well as image features, the existing methodologies are categorized into four groups: statistical, spectral, model-based, and machine learning. These works are summarized in this review to enable easy referral to suitable methods for diverse application scenarios in steel mills. Realization recommendations and future research trends are also addressed at an abstract level.", "year": 2020, "venue": "IEEE Transactions on Instrumentation and Measurement", "authors": [ "Qiwu Luo", "Xiaoxin Fang", "Li Liu", "Chunhua Yang", "Yichuang Sun" ], "externalIds": { "DBLP": "journals/tim/LuoFLYS20", "MAG": "2998291476", "DOI": "10.1109/TIM.2019.2963555", "CorpusId": 210177304 }, "url": "https://www.semanticscholar.org/paper/f5b9b024b3e3d7207225365bd52a4a0666e0f9a6", "referenceCount": 125, "citationCount": 236, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Path Planning for UAV-Mounted Mobile Edge Computing With Deep Reinforcement Learning", "abstract": "In this letter, we study an unmanned aerial vehicle (UAV)-mounted mobile edge computing network, where the UAV executes computational tasks offloaded from mobile terminal users (TUs) and the motion of each TU follows a Gauss-Markov random model. To ensure the quality-of-service (QoS) of each TU, the UAV with limited energy dynamically plans its trajectory according to the locations of mobile TUs. Towards this end, we formulate the problem as a Markov decision process, wherein the UAV trajectory and UAV-TU association are modeled as the parameters to be optimized. To maximize the system reward and meet the QoS constraint, we develop a QoS-based action selection policy in the proposed algorithm based on double deep Q-network. Simulations show that the proposed algorithm converges more quickly and achieves a higher sum throughput than conventional algorithms.", "year": 2020, "venue": "IEEE Transactions on Vehicular Technology", "authors": [ "Qian Liu", "Long Shi", "Linlin Sun", "Jun Li", "Ming Ding", "Feng Shu" ], "externalIds": { "DBLP": "journals/corr/abs-2001-10268", "MAG": "3013960924", "ArXiv": "2001.10268", "DOI": "10.1109/TVT.2020.2982508", "CorpusId": 210932648 }, "url": "https://www.semanticscholar.org/paper/5fed15c749a98ffe28f638b95e686ceb4c5963a8", "referenceCount": 21, "citationCount": 166, "influentialCitationCount": 15, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "Federated Learning with Personalization Layers", "abstract": "The emerging paradigm of federated learning strives to enable collaborative training of machine learning models on the network edge without centrally aggregating raw data and hence, improving data privacy. This sharply deviates from traditional machine learning and necessitates the design of algorithms robust to various sources of heterogeneity. Specifically, statistical heterogeneity of data across user devices can severely degrade the performance of standard federated averaging for traditional machine learning applications like personalization with deep learning. This paper pro-posesFedPer, a base + personalization layer approach for federated training of deep feedforward neural networks, which can combat the ill-effects of statistical heterogeneity. We demonstrate effectiveness ofFedPerfor non-identical data partitions ofCIFARdatasetsand on a personalized image aesthetics dataset from Flickr.", "year": 2019, "venue": "arXiv.org", "authors": [ "Manoj Ghuhan Arivazhagan", "V. Aggarwal", "Aaditya Kumar Singh", "Sunav Choudhary" ], "externalIds": { "ArXiv": "1912.00818", "DBLP": "journals/corr/abs-1912-00818", "MAG": "2990789643", "CorpusId": 208526865 }, "url": "https://www.semanticscholar.org/paper/35aebe08b34e5cb0d012a16563e5c3f6fd17a906", "referenceCount": 21, "citationCount": 632, "influentialCitationCount": 82, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Classification of Steel Surface Defect Using Convolutional Neural Network with Few Images", "abstract": "Classification of steel surface defects in steel making industry is essential for the detection of defects through the classification of defects and for the analysis of causes that make defects. This makes it possible to reduce the defect rate of the product, and drastically reduces the mass defect in the steel making process. Recently, Deep Learning has been used for defect detection using Convolutional Neural Network(CNN). Compared to the existing rule-based method, the defect classification using CNN achieved high performance. However, learning CNN requires hundreds or thousands of images. In the case of a defective image, it is difficult to obtain images of thousands or hundreds of images. To overcome these shortcomings, few-shot learning which need few images to train network can be used. Siamese Neural Network using CNN is used for few-shot learning. In this paper, we use few-shot learning with Siamese Neural Network using CNN structure with contrastive loss to classify defects with a small number of steel surface defect images.", "year": 2019, "venue": "Asian Control Conference", "authors": [ "Min Su Kim", "Taesu Park", "P. Park" ], "externalIds": { "DBLP": "conf/ascc/KimPP19", "MAG": "2966400067", "CorpusId": 198146586 }, "url": "https://www.semanticscholar.org/paper/21c8d0baf01c6b2207e4748cac54321b8fc1f456", "referenceCount": 18, "citationCount": 30, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Convolutional Neural Network for Wafer Surface Defect Classification and the Detection of Unknown Defect Class", "abstract": "An automatic defect classification (ADC) system identifies and classifies wafer surface defects using scanning electron microscope images. By classifying defects, manufacturers can determine whether the wafer can be repaired and proceed to the next fabrication step. Current ADC systems have high defect detection performance. However, the classification power is poor. In most work sites, defect classification is performed manually using the naked eye, which is unreliable. This paper proposes an ADC method based on deep learning that automatically classifies various types of wafer surface damage. In contrast to conventional ADC methods, which apply a series of image recognition and machine learning techniques to find features for defect classification, the proposed model adopts a single convolutional neural network (CNN) model that can extract effective features for defect classification without using additional feature extraction algorithms. Moreover, the proposed method can identify defect classes not seen during training by comparing the CNN features of the unseen classes with those of the trained classes. Experiments with real datasets verified that the proposed ADC method achieves high defect classification performance.", "year": 2019, "venue": "IEEE transactions on semiconductor manufacturing", "authors": [ "S. Cheon", "Hankang Lee", "C. Kim", "Seok Hyung Lee" ], "externalIds": { "MAG": "2920311927", "DOI": "10.1109/TSM.2019.2902657", "CorpusId": 116334837 }, "url": "https://www.semanticscholar.org/paper/d643c61ac80c9cf37ee8082334d6fb004b26c624", "referenceCount": 18, "citationCount": 186, "influentialCitationCount": 9, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Generalized Completed Local Binary Patterns for Time-Efficient Steel Surface Defect Classification", "abstract": "Efficient defect classification is one of the most important preconditions to achieve online quality inspection for hot-rolled strip steels. It is extremely challenging owing to various defect appearances, large intraclass variation, ambiguous interclass distance, and unstable gray values. In this paper, a generalized completed local binary patterns (GCLBP) framework is proposed. Two variants of improved completed local binary patterns (ICLBP) and improved completed noise-invariant local-structure patterns (ICNLP) under the GCLBP framework are developed for steel surface defect classification. Different from conventional local binary patterns variants, descriptive information hidden in nonuniform patterns is innovatively excavated for the better defect representation. This paper focuses on the following aspects. First, a lightweight searching algorithm is established for exploiting the dominant nonuniform patterns (DNUPs). Second, a hybrid pattern code mapping mechanism is proposed to encode all the uniform patterns and DNUPs. Third, feature extraction is carried out under the GCLBP framework. Finally, histogram matching is efficiently accomplished by simple nearest-neighbor classifier. The classification accuracy and time efficiency are verified on a widely recognized texture database (Outex) and a real-world steel surface defect database [Northeastern University (NEU)]. The experimental results promise that the proposed method can be widely applied in online automatic optical inspection instruments for hot-rolled strip steel.", "year": 2019, "venue": "IEEE Transactions on Instrumentation and Measurement", "authors": [ "Qiwu Luo", "Yichuang Sun", "Pengcheng Li", "Oluyomi Simpson", "Lu Tian", "Yigang He" ], "externalIds": { "MAG": "2884563051", "DBLP": "journals/tim/LuoSLSTH19", "DOI": "10.1109/TIM.2018.2852918", "CorpusId": 61808117 }, "url": "https://www.semanticscholar.org/paper/bf3539e03b000ae7bfe731c987dd6d092ead0b18", "referenceCount": 35, "citationCount": 100, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Federated Optimization in Heterogeneous Networks", "abstract": "Federated Learning is a distributed learning paradigm with two key challenges that differentiate it from traditional distributed optimization: (1) significant variability in terms of the systems characteristics on each device in the network (systems heterogeneity), and (2) non-identically distributed data across the network (statistical heterogeneity). In this work, we introduce a framework, FedProx, to tackle heterogeneity in federated networks. FedProx can be viewed as a generalization and re-parametrization of FedAvg, the current state-of-the-art method for federated learning. While this re-parameterization makes only minor modifications to the method itself, these modifications have important ramifications both in theory and in practice. Theoretically, we provide convergence guarantees for our framework when learning over data from non-identical distributions (statistical heterogeneity), and while adhering to device-level systems constraints by allowing each participating device to perform a variable amount of work (systems heterogeneity). Practically, we demonstrate that FedProx allows for more robust convergence than FedAvg across a suite of realistic federated datasets. In particular, in highly heterogeneous settings, FedProx demonstrates significantly more stable and accurate convergence behavior relative to FedAvg---improving absolute test accuracy by 22% on average.", "year": 2018, "venue": "Conference on Machine Learning and Systems", "authors": [ "Anit Kumar Sahu", "Tian Li", "Maziar Sanjabi", "M. Zaheer", "Ameet Talwalkar", "Virginia Smith" ], "externalIds": { "ArXiv": "1812.06127", "DBLP": "conf/mlsys/LiSZSTS20", "MAG": "3038022836", "CorpusId": 59316566 }, "url": "https://www.semanticscholar.org/paper/1284ed4bf6a043ecf8cebca09e4811f1e3b83b65", "referenceCount": 50, "citationCount": 3919, "influentialCitationCount": 852, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Multi-class classification method using twin support vector machines with multi-information for steel surface defects", "abstract": null, "year": 2018, "venue": "", "authors": [ "Mao-xiang Chu", "Xiaoping Liu", "Rongfen Gong", "Liming Liu" ], "externalIds": { "MAG": "2794659401", "DOI": "10.1016/J.CHEMOLAB.2018.03.014", "CorpusId": 126076593 }, "url": "https://www.semanticscholar.org/paper/0349d980cf054c308771a190c8da583388a49636", "referenceCount": 31, "citationCount": 37, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MobileNetV2: Inverted Residuals and Linear Bottlenecks", "abstract": "In this paper we describe a new mobile architecture, MobileNetV2, that improves the state of the art performance of mobile models on multiple tasks and benchmarks as well as across a spectrum of different model sizes. We also describe efficient ways of applying these mobile models to object detection in a novel framework we call SSDLite. Additionally, we demonstrate how to build mobile semantic segmentation models through a reduced form of DeepLabv3 which we call Mobile DeepLabv3. is based on an inverted residual structure where the shortcut connections are between the thin bottleneck layers. The intermediate expansion layer uses lightweight depthwise convolutions to filter features as a source of non-linearity. Additionally, we find that it is important to remove non-linearities in the narrow layers in order to maintain representational power. We demonstrate that this improves performance and provide an intuition that led to this design. Finally, our approach allows decoupling of the input/output domains from the expressiveness of the transformation, which provides a convenient framework for further analysis. We measure our performance on ImageNet [1] classification, COCO object detection [2], VOC image segmentation [3]. We evaluate the trade-offs between accuracy, and number of operations measured by multiply-adds (MAdd), as well as actual latency, and the number of parameters.", "year": 2018, "venue": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition", "authors": [ "M. Sandler", "Andrew G. Howard", "Menglong Zhu", "A. Zhmoginov", "Liang-Chieh Chen" ], "externalIds": { "MAG": "2796438033", "DBLP": "conf/cvpr/SandlerHZZC18", "ArXiv": "1801.04381", "DOI": "10.1109/CVPR.2018.00474", "CorpusId": 4555207 }, "url": "https://www.semanticscholar.org/paper/dd9cfe7124c734f5a6fc90227d541d3dbcd72ba4", "referenceCount": 51, "citationCount": 16546, "influentialCitationCount": 2617, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Steel surface defects recognition based on multi-type statistical features and enhanced twin support vector machine", "abstract": null, "year": 2017, "venue": "", "authors": [ "Mao-xiang Chu", "Rongfen Gong", "Song Gao", "Jie Zhao" ], "externalIds": { "MAG": "2766108561", "DOI": "10.1016/J.CHEMOLAB.2017.10.020", "CorpusId": 125753405 }, "url": "https://www.semanticscholar.org/paper/cfabbbf52fca4d872293e8301bb958c60766dc6d", "referenceCount": 33, "citationCount": 73, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Distributed defect recognition on steel surfaces using an improved random forest algorithm with optimal multi-feature-set fusion", "abstract": null, "year": 2017, "venue": "Multimedia tools and applications", "authors": [ "Yalin Wang", "Haibing Xia", "Xiaofeng Yuan", "Ling Li", "Bei Sun" ], "externalIds": { "DBLP": "journals/mta/WangXYLS18", "MAG": "2763152181", "DOI": "10.1007/s11042-017-5238-0", "CorpusId": 29759228 }, "url": "https://www.semanticscholar.org/paper/1b6802b34d5da8bd4e91cdaba59f518ea5b0ad73", "referenceCount": 60, "citationCount": 40, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Communication-Efficient Learning of Deep Networks from Decentralized Data", "abstract": "Modern mobile devices have access to a wealth of data suitable for learning models, which in turn can greatly improve the user experience on the device. For example, language models can improve speech recognition and text entry, and image models can automatically select good photos. However, this rich data is often privacy sensitive, large in quantity, or both, which may preclude logging to the data center and training there using conventional approaches. We advocate an alternative that leaves the training data distributed on the mobile devices, and learns a shared model by aggregating locally-computed updates. We term this decentralized approach Federated Learning. \nWe present a practical method for the federated learning of deep networks based on iterative model averaging, and conduct an extensive empirical evaluation, considering five different model architectures and four datasets. These experiments demonstrate the approach is robust to the unbalanced and non-IID data distributions that are a defining characteristic of this setting. Communication costs are the principal constraint, and we show a reduction in required communication rounds by 10-100x as compared to synchronized stochastic gradient descent.", "year": 2016, "venue": "International Conference on Artificial Intelligence and Statistics", "authors": [ "H. B. McMahan", "Eider Moore", "Daniel Ramage", "S. Hampson", "B. A. Y. Arcas" ], "externalIds": { "MAG": "2950745363", "DBLP": "conf/aistats/McMahanMRHA17", "ArXiv": "1602.05629", "CorpusId": 14955348 }, "url": "https://www.semanticscholar.org/paper/d1dbf643447405984eeef098b1b320dee0b3b8a7", "referenceCount": 50, "citationCount": 13806, "influentialCitationCount": 3346, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep Residual Learning for Image Recognition", "abstract": "Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. We provide comprehensive empirical evidence showing that these residual networks are easier to optimize, and can gain accuracy from considerably increased depth. On the ImageNet dataset we evaluate residual nets with a depth of up to 152 layers - 8× deeper than VGG nets [40] but still having lower complexity. An ensemble of these residual nets achieves 3.57% error on the ImageNet test set. This result won the 1st place on the ILSVRC 2015 classification task. We also present analysis on CIFAR-10 with 100 and 1000 layers. The depth of representations is of central importance for many visual recognition tasks. Solely due to our extremely deep representations, we obtain a 28% relative improvement on the COCO object detection dataset. Deep residual nets are foundations of our submissions to ILSVRC & COCO 2015 competitions1, where we also won the 1st places on the tasks of ImageNet detection, ImageNet localization, COCO detection, and COCO segmentation.", "year": 2015, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Kaiming He", "X. Zhang", "Shaoqing Ren", "Jian Sun" ], "externalIds": { "DBLP": "conf/cvpr/HeZRS16", "MAG": "2949650786", "ArXiv": "1512.03385", "DOI": "10.1109/cvpr.2016.90", "CorpusId": 206594692 }, "url": "https://www.semanticscholar.org/paper/2c03df8b48bf3fa39054345bafabfeff15bfd11d", "referenceCount": 53, "citationCount": 172713, "influentialCitationCount": 28229, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Defect Detection Using Shuffle Net-CA-SSD Lightweight Network for Turbine Blades in IoT", "abstract": null, "year": 2024, "venue": "IEEE Internet of Things Journal", "authors": [ "Huimin Zhao", "Yongshun Gao", "Wu Deng" ], "externalIds": { "DOI": "10.1109/jiot.2024.3409823", "CorpusId": 270304894 }, "url": "https://www.semanticscholar.org/paper/b6ceefbb087276eb62a0874f2007d244ce1cf221", "referenceCount": 0, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Personalized Federated Learning With Differential Privacy and Convergence Guarantee", "abstract": "Personalized federated learning (PFL), as a novel federated learning (FL) paradigm, is capable of generating personalized models for heterogenous clients. Combined with a meta-learning mechanism, PFL can further improve the convergence performance with few-shot training. However, meta-learning based PFL has two stages of gradient descent in each local training round, therefore posing a more serious challenge in information leakage. In this paper, we propose a differential privacy (DP) based PFL (DP-PFL) framework and analyze its convergence performance. Specifically, we first design a privacy budget allocation scheme for inner and outer update stages based on the Rényi DP composition theory. Then, we develop two convergence bounds for the proposed DP-PFL framework under convex and non-convex loss function assumptions, respectively. Our developed convergence bounds reveal that 1) there is an optimal size of the DP-PFL model that can achieve the best convergence performance for a given privacy level, and 2) there is an optimal tradeoff among the number of communication rounds, convergence performance and privacy budget. Evaluations on various real-life datasets demonstrate that our theoretical results are consistent with experimental results. The derived theoretical results can guide the design of various DP-PFL algorithms with configurable tradeoff requirements on the convergence performance and privacy levels.", "year": 2023, "venue": "IEEE Transactions on Information Forensics and Security", "authors": [ "Kang Wei", "Jun Li", "Chuan Ma", "Ming Ding", "Wen Chen", "Jun Wu", "M. Tao", "H. Vincent Poor" ], "externalIds": { "DBLP": "journals/tifs/WeiLMDCWTP23", "DOI": "10.1109/TIFS.2023.3293417", "CorpusId": 259677333 }, "url": "https://www.semanticscholar.org/paper/18c81979b342593d5b6001bba51a3f0bffdfcaeb", "referenceCount": 46, "citationCount": 34, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Graph Embedding and Optimal Transport for Few-Shot Classification of Metal Surface Defect", "abstract": "Defect classification exhibits great importance in metal surface defect inspection. Most previous defect classification models are based on fully supervised learning, which requires a large amount of training data with image labels. However, collecting defective images in industrial scenarios is quite difficult due to the well-optimized manufacturing techniques. Besides, image annotation is also expensive and time-consuming. In this article, we propose a novel few-shot defect classification method, which aims to recognize novel defective classes with few labeled samples. Specifically, the proposed method follows a transductive paradigm and consists of two modules, i.e., graph embedding and distribution transformation (GEDT) module and optimal transport (OPT) module. The GEDT module not only makes full use of the relevant correlation information between different features in the support set and the query set but also ensures the consistent distribution of the graph embedding results. Then, the OPT module is leveraged to implement few-shot classification in a transductive manner. Finally, experiments conducted on the proposed metal surface defect dataset, and the results demonstrate that the proposed method achieves the state-of-the-art performance under both one-shot and five-shot settings.", "year": 2022, "venue": "IEEE Transactions on Instrumentation and Measurement", "authors": [ "Weiwei Xiao", "Kechen Song", "Jie Liu", "Yunhui Yan" ], "externalIds": { "DBLP": "journals/tim/XiaoSLY22", "DOI": "10.1109/tim.2022.3169547", "CorpusId": 248349161 }, "url": "https://www.semanticscholar.org/paper/0f49c30a21a7a6c0cdef01494609d59c74c3592d", "referenceCount": 0, "citationCount": 25, "influentialCitationCount": 6, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Triplet-Graph Reasoning Network for Few-Shot Metal Generic Surface Defect Segmentation", "abstract": "Metal surface defect segmentation can play an important role in dealing with the issue of quality control during the production and manufacturing stages. There are still two major challenges in industrial applications. One is the case that the number of metal surface defect samples is severely insufficient, and the other is that the most existing algorithms can only be used for specific surface defects and it is difficult to generalize to other metal surfaces. In this work, a theory of few-shot metal generic surface defect segmentation is introduced to solve these challenges. Simultaneously, the Triplet-Graph Reasoning Network (TGRNet) and a novel dataset Surface Defects- $4^{i}$ are proposed to achieve this theory. In our TGRNet, the surface defect triplet (including triplet encoder and trip loss) is proposed and is used to segment background and defect area, respectively. Through triplet, the few-shot metal surface defect segmentation problem is transformed into few-shot semantic segmentation problem of defect area and background area. For few-shot semantic segmentation, we propose a method of multi-graph reasoning to explore the similarity relationship between different images. And to improve segmentation performance in the industrial scene, an adaptive auxiliary prediction module is proposed. For Surface Defects- $4^{i}$ , it includes multiple categories of metal surface defect images to verify the generalization performance of our TGRNet and adds the nonmetal categories (leather and tile) as extensions. Through extensive comparative experiments and ablation experiments, it is proved that our architecture can achieve state-of-the-art results.", "year": 2021, "venue": "IEEE Transactions on Instrumentation and Measurement", "authors": [ "Yanqi Bao", "Kechen Song", "Jie Liu", "Yanyan Wang", "Yunhui Yan", "Han Yu", "Xingjie Li" ], "externalIds": { "DBLP": "journals/tim/BaoSLWYYL21", "DOI": "10.1109/TIM.2021.3083561", "CorpusId": 235384535 }, "url": "https://www.semanticscholar.org/paper/c3ce677da645368f908879b313234f85628ace43", "referenceCount": 44, "citationCount": 136, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "“Federated machine learning: Concept and applications,”", "abstract": null, "year": 2019, "venue": "ACM Trans. Intell. Syst. Technol.", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Clients return the updated local encoder E tk and the discrimination loss", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "The server aggregates the encoders following the consensus-aware aggregation mechanism", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Personalized Federated Learning via Backbone Self-Distillation": { "paper_title": "Personalized Federated Learning via Backbone Self-Distillation", "arxiv_id": "2409.15636v1", "keyword": "federate learning", "authors": [ "Pengju Wang", "Bochao Liu", "Dan Zeng", "Chenggang Yan", "Shiming Ge" ], "references": [ { "title": "Communication-Efficient Federated Learning on Non-IID Data Using Two-Step Knowledge Distillation", "abstract": "Federated learning (FL) has shown its great potential for achieving distributed intelligence in privacy-sensitive IoT. However, popular FL approaches, such as FedAvg and its variants share model parameters among clients during the training process and thus cause significant communication overhead in IoT. Moreover, nonindependent and identically distributed (non-IID) data across learning devices severely affect the convergence and speed of FL. To address these challenges, we propose a communication-efficient FL framework based on Two-step Knowledge Distillation, Fed2KD, which boosts the classification accuracy through privacy-preserving data generation while improving communication efficiency through a new knowledge distillation scheme empowered by an attention mechanism and metric learning. The generalization ability of Fed2KD is analyzed from the view of domain adaption. Extensive simulation experiments are conducted on Fashion-MNIST, CIFAR-10, and ImageNet data sets with various non-IID data distributions. The performance results show that Fed2KD can reduce the communication overhead and improve classification accuracy compared to FedAvg and its latest variants.", "year": 2023, "venue": "IEEE Internet of Things Journal", "authors": [ "Hancong Duan", "Geyong", "Hui Wen", "Yuehua Wu", "Jia Hu", "Z. Wang", "Geyong Min" ], "externalIds": { "DBLP": "journals/iotj/WenWHWDM23", "DOI": "10.1109/JIOT.2023.3276865", "CorpusId": 262135098 }, "url": "https://www.semanticscholar.org/paper/1e06ebbcecf93f4a333b701100ac5ef1982ee072", "referenceCount": 49, "citationCount": 6, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FedMPT: Federated Learning for Multiple Personalized Tasks Over Mobile Computing", "abstract": "Federated learning (FL) is a privacy-preserving collaborative learning framework that can be used in mobile computing where multiple user devices jointly train a deep learning model without uploading their data to a centralized server. An essential issue of FL is to reduce the significant communication overhead during training. Existing FL schemes mostly address this issue regarding single task learning. However, each user generally has multiple related tasks on the mobile device such as multi-content recommendation, and traditional FL schemes need to train an individual model per task which consumes a substantial number of resources. In this work, we formulate an FL problem with multiple personalized tasks, which aims to minimize the communication cost in learning different personalized tasks on each device. To solve the formulated problem, we incorporate multi-task learning into FL which trains a model for multiple tasks concurrently and propose an FL framework named FedMPT. FedMPT modifies the efficient acceleration algorithm and quantization compression strategy delicately to achieve superior performance regarding the communication efficiency. We implement and evaluate FedMPT on two datasets, Multi-MNIST and CelebA, in the FL environment. Experimental results show that FedMPT significantly outperforms the traditional FL scheme considering communication cost and average accuracy.", "year": 2023, "venue": "IEEE Transactions on Network Science and Engineering", "authors": [ "Xinglin Zhang", "Zhaojing Ou", "Zheng Yang" ], "externalIds": { "DBLP": "journals/tnse/ZhangOY23", "DOI": "10.1109/TNSE.2023.3246463", "CorpusId": 257065832 }, "url": "https://www.semanticscholar.org/paper/c7feb1056d4f67e191ddc8ac73ef7e16307344a5", "referenceCount": 66, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Personalized Edge Intelligence via Federated Self-Knowledge Distillation", "abstract": "Federated Learning (FL) is an emerging approach in edge computing for collaboratively training machine learning models among multiple devices, which aims to address limited bandwidth, system heterogeneity, and privacy issues in traditional centralized training. However, the existing federated learning methods focus on learning a shared global model for all devices, which may not always be ideal for different devices. Such situations become even worse when each edge device has its own data distribution or task. In this paper, we study personalized federated learning in which our goal is to train models to perform well for individual clients. We observe that the initialization in each communication round causes the forgetting of historical personalized knowledge. Based on this observation, we propose a novel Personalized Federated Learning (PFL) framework via self-knowledge distillation, named pFedSD. By allowing clients to distill the knowledge of previous personalized models to current local models, pFedSD accelerates the process of recalling the personalized knowledge for the latest initialized clients. Moreover, self-knowledge distillation provides different views of data in feature space to realize an implicit ensemble of local models. Extensive experiments on various datasets and settings demonstrate the effectiveness and robustness of pFedSD.", "year": 2023, "venue": "IEEE Transactions on Parallel and Distributed Systems", "authors": [ "Hai Jin", "Dongshan Bai", "Dezhong Yao", "Yutong Dai", "Lin Gu", "Chen Yu", "Lichao Sun" ], "externalIds": { "DBLP": "journals/tpds/JinBYDGYS23", "DOI": "10.1109/TPDS.2022.3225185", "CorpusId": 254089825 }, "url": "https://www.semanticscholar.org/paper/39ed94e10fafc0e57e7a2d587df0a889b7f6a551", "referenceCount": 53, "citationCount": 33, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FedALA: Adaptive Local Aggregation for Personalized Federated Learning", "abstract": "A key challenge in federated learning (FL) is the statistical heterogeneity that impairs the generalization of the global model on each client. To address this, we propose a method Federated learning with Adaptive Local Aggregation (FedALA) by capturing the desired information in the global model for client models in personalized FL. The key component of FedALA is an Adaptive Local Aggregation (ALA) module, which can adaptively aggregate the downloaded global model and local model towards the local objective on each client to initialize the local model before training in each iteration. To evaluate the effectiveness of FedALA, we conduct extensive experiments with five benchmark datasets in computer vision and natural language processing domains. FedALA outperforms eleven state-of-the-art baselines by up to 3.27% in test accuracy. Furthermore, we also apply ALA module to other federated learning methods and achieve up to 24.19% improvement in test accuracy. Code is available at https://github.com/TsingZ0/FedALA.", "year": 2022, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Jianqing Zhang", "Yang Hua", "Hao Wang", "Tao Song", "Zhengui Xue", "Ruhui Ma", "Haibing Guan" ], "externalIds": { "DBLP": "conf/aaai/ZhangHWSXMG23", "ArXiv": "2212.01197", "DOI": "10.1609/aaai.v37i9.26330", "CorpusId": 254220922 }, "url": "https://www.semanticscholar.org/paper/8022ea93560908bb57b5ff0a668f21079d0ccdaa", "referenceCount": 36, "citationCount": 90, "influentialCitationCount": 8, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Feeling Without Sharing: A Federated Video Emotion Recognition Framework Via Privacy-Agnostic Hybrid Aggregation", "abstract": "The explosion of video data brings new opportunities and challenges for emotion recognition. Video emotion applications have great commercial value, but the potential to involve illegal snooping on personal feelings has led to controversy over privacy protection. The federated learning (FL) paradigm can substantially address the growing public concerns about data privacy in video emotion recognition. However, conventional FL methods perform poorly due to the uniqueness of the task: the data are heterogeneous across clients induced by emotional label skew and cross-culture expression differences. To mitigate the heterogeneous data, we propose EmoFed, a practical framework of federated learning video-based emotion recognition via multi-group clustering and privacy-agnostic hybrid aggregation. It yields a generically applicable and improved model while protecting privacy, which trains local models under group-aware personalized aggregation. To further encourage communicating comprehensive and privacy-agnostic information among clients, we upload model parameters of both the global layers and personalization layers to the server. We utilize the homomorphically encrypted method for personalization layers, which incurs no learning accuracy loss since no noise is added to the model updates during the encryption/decryption process. The proposed method works on video-based emotion recognition tasks to predict actors' emotional expressions and induced emotion by viewers. Extensive experiments and ablation studies on four benchmarks have demonstrated the efficacy and practicability of our method.", "year": 2022, "venue": "ACM Multimedia", "authors": [ "Fan Qi", "Zixin Zhang", "Xianshan Yang", "Huaiwen Zhang", "Changsheng Xu" ], "externalIds": { "DBLP": "conf/mm/QiZYZX22", "DOI": "10.1145/3503161.3548278", "CorpusId": 252782271 }, "url": "https://www.semanticscholar.org/paper/3d133dafd5b5b63ddd40630d7fa4b4ecca17ccdb", "referenceCount": 73, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Personalized Federated Learning on Non-IID Data via Group-based Meta-learning", "abstract": "Personalized federated learning (PFL) has emerged as a paradigm to provide a personalized model that can fit the local data distribution of each client. One natural choice for PFL is to leverage the fast adaptation capability of meta-learning, where it first obtains a single global model, and each client achieves a personalized model by fine-tuning the global one with its local data. However, existing meta-learning-based approaches implicitly assume that the data distribution among different clients is similar, which may not be applicable due to the property of data heterogeneity in federated learning. In this work, we propose a Group-based Federated Meta-Learning framework, called G-FML, which adaptively divides the clients into groups based on the similarity of their data distribution, and the personalized models are obtained with meta-learning within each group. In particular, we develop a simple yet effective grouping mechanism to adaptively partition the clients into multiple groups. Our mechanism ensures that each group is formed by the clients with similar data distribution such that the group-wise meta-model can achieve “personalization” at large. By doing so, our framework can be generalized to a highly heterogeneous environment. We evaluate the effectiveness of our proposed G-FML framework on three heterogeneous benchmarking datasets. The experimental results show that our framework improves the model accuracy by up to 13.15% relative to the state-of-the-art federated meta-learning.", "year": 2022, "venue": "ACM Transactions on Knowledge Discovery from Data", "authors": [ "Lei Yang", "Jiaming Huang", "Wanyu Lin", "Jiannong Cao" ], "externalIds": { "DBLP": "journals/tkdd/YangHLC23", "DOI": "10.1145/3558005", "CorpusId": 251724303 }, "url": "https://www.semanticscholar.org/paper/0ddab3ca23cfe726bb2cc5fc755d4031201a7bc5", "referenceCount": 53, "citationCount": 40, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FedMSplit: Correlation-Adaptive Federated Multi-Task Learning across Multimodal Split Networks", "abstract": "With the advancement of data collection techniques, end users are interested in how different types of data can collaborate to improve our life experiences. Multimodal Federated Learning (MFL) is an emerging area allowing many distributed clients, each of which can collect data from multiple types of sensors, to participate in the training of some multimodal data-related models without sharing their data. In this paper, we address a novel challenging issue in MFL, the modality incongruity, where clients may have heterogeneous setups of sensors and their local data consists of different combinations of modalities. With the modality incongruity, clients may solve different tasks on different parameter spaces, which escalates the difficulties in dealing with the statistical heterogeneity problem of federated learning; also, it would be hard to perform accurate model aggregation across different types of clients. To tackle these challenges, in this work, we propose the FedMSplit framework, which allows federated training over multimodal distributed data without assuming similar active sensors in all clients. The key idea is to employ a dynamic and multi-view graph structure to adaptively capture the correlations amongst multimodal client models. More specifically, we split client models into smaller shareable blocks and allow each type of blocks to provide a specific view on client relationships. With the graph representation, the underlying correlations between clients can be captured as the edge features in the multi-view graph, and then be utilized to promote local model relations through the neighborhood message passing in the graph. Our experimental results demonstrate the effectiveness of our method under different sensor setups with statistical heterogeneity.", "year": 2022, "venue": "Knowledge Discovery and Data Mining", "authors": [ "Jiayi Chen", "Aidong Zhang" ], "externalIds": { "DBLP": "conf/kdd/ChenZ22", "DOI": "10.1145/3534678.3539384", "CorpusId": 251518178 }, "url": "https://www.semanticscholar.org/paper/e22e7d258cd90f1ce1df7fb31c985b41dddd0fb8", "referenceCount": 42, "citationCount": 53, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Federated optimization via knowledge codistillation", "abstract": null, "year": 2021, "venue": "Expert systems with applications", "authors": [ "Xuanming Ni", "Xinyuan Shen", "Huimin Zhao" ], "externalIds": { "DBLP": "journals/eswa/NiSZ22", "DOI": "10.1016/j.eswa.2021.116310", "CorpusId": 245135621 }, "url": "https://www.semanticscholar.org/paper/ef3a5cbd97e65f39a7cbc9eb3bd7d13ed970e544", "referenceCount": 21, "citationCount": 20, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Federated benchmarking of medical artificial intelligence with MedPerf", "abstract": null, "year": 2021, "venue": "Nature Machine Intelligence", "authors": [ "A. Karargyris", "R. Umeton", "Micah J. Sheller", "Alejandro Aristizabal", "Johnu George", "Srini Bala", "Daniel J. Beutel", "Victor Bittorf", "Akshay Chaudhari", "Alexander Chowdhury", "Cody Coleman", "Bala Desinghu", "G. Diamos", "Debo Dutta", "Diane Feddema", "G. Fursin", "Junyi Guo", "Xinyuan Huang", "David Kanter", "Satyananda Kashyap", "N. Lane", "I. Mallick", "P. Mascagni", "Virendra Mehta", "Vivek Natarajan", "Nikolay Nikolov", "N. Padoy", "Gennady Pekhimenko", "V. Reddi", "G. A. Reina", "Pablo Ribalta", "Jacob Rosenthal", "Abhishek Singh", "Jayaraman J. Thiagarajan", "A. Wuest", "M. Xenochristou", "Daguang Xu", "Poonam Yadav", "Michael Rosenthal", "M. Loda", "Jason M. Johnson", "Peter Mattson" ], "externalIds": { "PubMedCentral": "11068064", "ArXiv": "2110.01406", "DBLP": "journals/corr/abs-2110-01406", "DOI": "10.1038/s42256-023-00652-2", "CorpusId": 238259637, "PubMed": "38706981" }, "url": "https://www.semanticscholar.org/paper/99ad042e52b52252dc201dd0f1172ccfda0b5a50", "referenceCount": 139, "citationCount": 44, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Communication-efficient federated learning via knowledge distillation", "abstract": null, "year": 2021, "venue": "Nature Communications", "authors": [ "Chuhan Wu", "Fangzhao Wu", "Lingjuan Lyu", "Yongfeng Huang", "Xing Xie" ], "externalIds": { "PubMedCentral": "9018897", "ArXiv": "2108.13323", "DBLP": "journals/corr/abs-2108-13323", "DOI": "10.1038/s41467-022-29763-x", "CorpusId": 237353469, "PubMed": "35440643" }, "url": "https://www.semanticscholar.org/paper/56708004ada46632aa25d1d7a3f2d5b92b54166f", "referenceCount": 72, "citationCount": 261, "influentialCitationCount": 8, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "FedED: Federated Learning via Ensemble Distillation for Medical Relation Extraction", "abstract": "Unlike other domains, medical texts are inevitably accompanied by private information, so sharing or copying these texts is strictly restricted. However, training a medical relation extraction model requires collecting these privacy-sensitive texts and storing them on one machine, which comes in conflict with privacy protection. In this paper, we propose a privacy-preserving medical relation extraction model based on federated learning, which enables training a central model with no single piece of private local data being shared or exchanged. Though federated learning has distinct advantages in privacy protection, it suffers from the communication bottleneck, which is mainly caused by the need to upload cumbersome local parameters. To overcome this bottleneck, we leverage a strategy based on knowledge distillation. Such a strategy uses the uploaded predictions of ensemble local models to train the central model without requiring uploading local parameters. Experiments on three publicly available medical relation extraction datasets demonstrate the effectiveness of our method.", "year": 2020, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Dianbo Sui", "Yubo Chen", "Jun Zhao", "Yantao Jia", "Yuan-tao Xie", "Weijian Sun" ], "externalIds": { "DBLP": "conf/emnlp/SuiCZJXS20", "MAG": "3102031770", "ACL": "2020.emnlp-main.165", "DOI": "10.18653/v1/2020.emnlp-main.165", "CorpusId": 226262346 }, "url": "https://www.semanticscholar.org/paper/fefa0bd805beef371a1679a74a1b9f9f8a1baacd", "referenceCount": 58, "citationCount": 100, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Performance Optimization of Federated Person Re-identification via Benchmark Analysis", "abstract": "Federated learning is a privacy-preserving machine learning technique that learns a shared model across decentralized clients. It can alleviate privacy concerns of personal re-identification, an important computer vision task. In this work, we implement federated learning to person re-identification (FedReID) and optimize its performance affected by statistical heterogeneity in the real-world scenario. We first construct a new benchmark to investigate the performance of FedReID. This benchmark consists of (1) nine datasets with different volumes sourced from different domains to simulate the heterogeneous situation in reality, (2) two federated scenarios, and (3) an enhanced federated algorithm for FedReID. The benchmark analysis shows that the client-edge-cloud architecture, represented by the federated-by-dataset scenario, has better performance than client-server architecture in FedReID. It also reveals the bottlenecks of FedReID under the real-world scenario, including poor performance of large datasets caused by unbalanced weights in model aggregation and challenges in convergence. Then we propose two optimization methods: (1) To address the unbalanced weight problem, we propose a new method to dynamically change the weights according to the scale of model changes in clients in each training round; (2) To facilitate convergence, we adopt knowledge distillation to refine the server model with knowledge generated from client models on a public dataset. Experiment results demonstrate that our strategies can achieve much better convergence with superior performance on all datasets. We believe that our work will inspire the community to further explore the implementation of federated learning on more computer vision tasks in real-world scenarios.", "year": 2020, "venue": "ACM Multimedia", "authors": [ "Weiming Zhuang", "Yonggang Wen", "Xuesen Zhang", "Xin Gan", "Daiying Yin", "Dongzhan Zhou", "Shuai Zhang", "Shuai Yi" ], "externalIds": { "MAG": "3080411117", "ArXiv": "2008.11560", "DBLP": "conf/mm/Zhuang0ZGYZZY20", "DOI": "10.1145/3394171.3413814", "CorpusId": 221319684 }, "url": "https://www.semanticscholar.org/paper/d0f726095226b23ec63d604d382fea52e2686070", "referenceCount": 31, "citationCount": 83, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Advances and Open Problems in Federated Learning", "abstract": "Federated learning (FL) is a machine learning setting where many clients (e.g. mobile devices or whole organizations) collaboratively train a model under the orchestration of a central server (e.g. service provider), while keeping the training data decentralized. FL embodies the principles of focused data collection and minimization, and can mitigate many of the systemic privacy risks and costs resulting from traditional, centralized machine learning and data science approaches. Motivated by the explosive growth in FL research, this paper discusses recent advances and presents an extensive collection of open problems and challenges.", "year": 2019, "venue": "Found. Trends Mach. Learn.", "authors": [ "P. Kairouz", "H. B. McMahan", "Brendan Avent", "A. Bellet", "M. Bennis", "A. Bhagoji", "Keith Bonawitz", "Zachary B. Charles", "Graham Cormode", "Rachel Cummings", "Rafael G. L. D'Oliveira", "S. E. Rouayheb", "David Evans", "Josh Gardner", "Zachary Garrett", "Adrià Gascón", "Badih Ghazi", "Phillip B. Gibbons", "M. Gruteser", "Zaïd Harchaoui", "Chaoyang He", "Lie He", "Zhouyuan Huo", "Ben Hutchinson", "Justin Hsu", "Martin Jaggi", "T. Javidi", "Gauri Joshi", "M. Khodak", "Jakub Konecný", "A. Korolova", "F. Koushanfar", "Oluwasanmi Koyejo", "Tancrède Lepoint", "Yang Liu", "Prateek Mittal", "M. Mohri", "R. Nock", "A. Özgür", "R. Pagh", "Mariana Raykova", "Hang Qi", "Daniel Ramage", "R. Raskar", "D. Song", "Weikang Song", "Sebastian U. Stich", "Ziteng Sun", "A. Suresh", "Florian Tramèr", "Praneeth Vepakomma", "Jianyu Wang", "Li Xiong", "Zheng Xu", "Qiang Yang", "Felix X. Yu", "Han Yu", "Sen Zhao" ], "externalIds": { "MAG": "3111681398", "DBLP": "journals/corr/abs-1912-04977", "ArXiv": "1912.04977", "DOI": "10.1561/2200000083", "CorpusId": 209202606 }, "url": "https://www.semanticscholar.org/paper/07912741c6c96e6ad5b2c2d6c6c3b2de5c8a271b", "referenceCount": 517, "citationCount": 4997, "influentialCitationCount": 393, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "A Database for Handwritten Text Recognition Research", "abstract": "An image database for handwritten text recognition research is described. Digital images of approximately 5000 city names, 5000 state names, 10000 ZIP Codes, and 50000 alphanumeric characters are included. Each image was scanned from mail in a working post office at 300 pixels/in in 8-bit gray scale on a high-quality flat bed digitizer. The data were unconstrained for the writer, style, and method of preparation. These characteristics help overcome the limitations of earlier databases that contained only isolated characters or were prepared in a laboratory setting under prescribed circumstances. Also, the database is divided into explicit training and testing sets to facilitate the sharing of results among researchers as well as performance comparisons. >", "year": 1994, "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", "authors": [ "J. Hull" ], "externalIds": { "DBLP": "journals/pami/Hull94", "MAG": "2100659887", "DOI": "10.1109/34.291440", "CorpusId": 8148915 }, "url": "https://www.semanticscholar.org/paper/62a134740314b4469c83c8921ae2e1beea22b8f5", "referenceCount": 5, "citationCount": 2163, "influentialCitationCount": 265, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": ". A Survey on Deep Learning: Algorithms, Techniques, and Applications", "abstract": null, "year": 2018, "venue": "ACM CSUR", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Gradient-based learning applied to document recognition", "abstract": "Multilayer neural networks trained with the back-propagation algorithm constitute the best example of a successful gradient based learning technique. Given an appropriate network architecture, gradient-based learning algorithms can be used to synthesize a complex decision surface that can classify high-dimensional patterns, such as handwritten characters, with minimal preprocessing. This paper reviews various methods applied to handwritten character recognition and compares them on a standard handwritten digit recognition task. Convolutional neural networks, which are specifically designed to deal with the variability of 2D shapes, are shown to outperform all other techniques. Real-life document recognition systems are composed of multiple modules including field extraction, segmentation recognition, and language modeling. A new learning paradigm, called graph transformer networks (GTN), allows such multimodule systems to be trained globally using gradient-based methods so as to minimize an overall performance measure. Two systems for online handwriting recognition are described. Experiments demonstrate the advantage of global training, and the flexibility of graph transformer networks. A graph transformer network for reading a bank cheque is also described. It uses convolutional neural network character recognizers combined with global training techniques to provide record accuracy on business and personal cheques. It is deployed commercially and reads several million cheques per day.", "year": 1998, "venue": "Proceedings of the IEEE", "authors": [ "Yann LeCun", "L. Bottou", "Yoshua Bengio", "P. Haffner" ], "externalIds": { "MAG": "2112796928", "DBLP": "journals/pieee/LeCunBBH98", "DOI": "10.1109/5.726791", "CorpusId": 14542261 }, "url": "https://www.semanticscholar.org/paper/162d958ff885f1462aeda91cd72582323fd6a1f4", "referenceCount": 149, "citationCount": 49590, "influentialCitationCount": 5893, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] } ] }, "Stalactite: Toolbox for Fast Prototyping of Vertical Federated Learning Systems": { "paper_title": "Stalactite: Toolbox for Fast Prototyping of Vertical Federated Learning Systems", "arxiv_id": "2409.15558v1", "keyword": "federate learning", "authors": [ "Anastasiia Zakharova", "Dmitriy Alexandrov", "Maria Khodorchenko", "Nikolay Butakov", "Alexey Vasilev", "Maxim Savchenko", "Alexander Grigorievskiy" ], "references": [ { "title": "Does It Look Sequential? An Analysis of Datasets for Evaluation of Sequential Recommendations", "abstract": "Sequential recommender systems are an important and demanded area of research. Such systems aim to use the order of interactions in a user's history to predict future interactions. The premise is that the order of interactions and sequential patterns play an essential role. Therefore, it is crucial to use datasets that exhibit a sequential structure to evaluate sequential recommenders properly. We apply several methods based on the random shuffling of the user's sequence of interactions to assess the strength of sequential structure across 15 datasets, frequently used for sequential recommender systems evaluation in recent research papers presented at top-tier conferences. As shuffling explicitly breaks sequential dependencies inherent in datasets, we estimate the strength of sequential patterns by comparing metrics for shuffled and original versions of the dataset. Our findings show that several popular datasets have a rather weak sequential structure.", "year": 2024, "venue": "arXiv.org", "authors": [ "Anton Klenitskiy", "Anna Volodkevich", "Anton Pembek", "Alexey Vasilev" ], "externalIds": { "DBLP": "journals/corr/abs-2408-12008", "ArXiv": "2408.12008", "DOI": "10.1145/3640457.3688195", "CorpusId": 271924346 }, "url": "https://www.semanticscholar.org/paper/63373902e1cb0a01419c38da2d06c83cb2faa241", "referenceCount": 30, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Application Research of Vertical Federated Learning Technology in Banking Risk Control Model Strategy", "abstract": "This study centers on the application of vertical federated learning technology in the context of Internet banking loans, with a particular focus on innovations in data privacy protection, risk control model algorithms, and secure multi-party computation. Currently, banking risk control strategies mainly rely on traditional data processing technologies, which often fall short in protecting user privacy and ensuring data usage efficiency. We adopt vertical federated learning technology, offering an innovative solution for the Internet banking loan scenario. Firstly, regarding data privacy protection, we propose a differential privacy mechanism to safeguard user-sensitive data. Secondly, we innovatively apply risk control model algorithms, facilitating collaborative modeling across multiple Internet loan platforms through federated learning. Furthermore, we introduce secure multi-party computation technology to ensure the secure transmission of data and confidentiality of computation processes during federated learning. Through empirical experiments on real Internet loan datasets, we validate the effectiveness and feasibility of our proposed methods. After implementing our risk control model, the credit approval rate increased from 3.44% to 18.2%, with a single-day high reaching 25.53%. The average loan amount increased by 7,700 yuan, and the average interest rate slightly declined by 0.48%, marking a significant improvement and breakthrough compared to traditional risk control models. This study offers innovative solutions for data privacy protection and risk control in the Internet loan scenario, providing safer and more reliable services for financial institutions and users. Moreover, our methods possess high practicality and promotional value. The potential widespread impact on the industry is profound.", "year": 2023, "venue": "2023 IEEE Intl Conf on Parallel & Distributed Processing with Applications, Big Data & Cloud Computing, Sustainable Computing & Communications, Social Computing & Networking (ISPA/BDCloud/SocialCom/SustainCom)", "authors": [ "Yong Luo", "Zhi Lu", "Xiaofei Yin", "Songfeng Lu", "Yiting Weng" ], "externalIds": { "DBLP": "conf/bdcloud/LuoLYLW23", "DOI": "10.1109/ISPA-BDCloud-SocialCom-SustainCom59178.2023.00103", "CorpusId": 269091907 }, "url": "https://www.semanticscholar.org/paper/df854a54e3c03942142c3d91a61274ae12eb0132", "referenceCount": 25, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "IHVFL: a privacy-enhanced intention-hiding vertical federated learning framework for medical data", "abstract": null, "year": 2023, "venue": "Cybersecurity", "authors": [ "Fei Tang", "Shikai Liang", "Guowei Ling", "Jinyong Shan" ], "externalIds": { "DBLP": "journals/cybersec/TangLLS23", "DOI": "10.1186/s42400-023-00166-9", "CorpusId": 263612687 }, "url": "https://www.semanticscholar.org/paper/0021242eee6ead1bb4846ee5e5a40dd3bce08507", "referenceCount": 50, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "NVIDIA FLARE: Federated Learning from Simulation to Real-World", "abstract": "Federated learning (FL) enables building robust and generalizable AI models by leveraging diverse datasets from multiple collaborators without centralizing the data. We created NVIDIA FLARE as an open-source software development kit (SDK) to make it easier for data scientists to use FL in their research and real-world applications. The SDK includes solutions for state-of-the-art FL algorithms and federated machine learning approaches, which facilitate building workflows for distributed learning across enterprises and enable platform developers to create a secure, privacy-preserving offering for multiparty collaboration utilizing homomorphic encryption or differential privacy. The SDK is a lightweight, flexible, and scalable Python package. It allows researchers to apply their data science workflows in any training libraries (PyTorch, TensorFlow, XGBoost, or even NumPy) in real-world FL settings. This paper introduces the key design principles of NVFlare and illustrates some use cases (e.g., COVID analysis) with customizable FL workflows that implement different privacy-preserving algorithms. Code is available at https://github.com/NVIDIA/NVFlare.", "year": 2022, "venue": "IEEE Data Engineering Bulletin", "authors": [ "H. Roth", "Yan Cheng", "Yuhong Wen", "Isaac Yang", "Ziyue Xu", "Yuan-Ting Hsieh", "Kristopher Kersten", "A. Harouni", "Can Zhao", "Kevin Lu", "Zhihong Zhang", "Wenqi Li", "A. Myronenko", "Dong Yang", "Sean Bin Yang", "Nicola Rieke", "Abood Quraini", "Chester Chen", "Daguang Xu", "Nic Ma", "Prerna Dogra", "Mona G. Flores", "Andrew Feng" ], "externalIds": { "ArXiv": "2210.13291", "DBLP": "journals/corr/abs-2210-13291", "DOI": "10.48550/arXiv.2210.13291", "CorpusId": 253098004 }, "url": "https://www.semanticscholar.org/paper/04315fbe6554edc9b5d76c99f64b8d1a6f26f7af", "referenceCount": 51, "citationCount": 67, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "VFed-SSD: Towards Practical Vertical Federated Advertising", "abstract": "As an emerging secure learning paradigm in lever-aging cross-agency private data, vertical federatedlearning (VFL) is expected to improve advertising models by enabling the joint learning of complementary user attributes privately owned by the advertiser and the publisher. However, there are two key challenges in applying it to advertising systems: a) the limited scale of labeled overlapping samples, and b) the high cost of real-time cross-agency serving. In this paper, we propose a semi-supervised split distillation framework VFed-SSD to alleviate the two limitations. We identify that: i)there are massive unlabeled overlapped data available in advertising systems, and ii) we can keep a balance between model performance and inference cost by decomposing the federated model. Specifically, we develop a self-supervised task MatchedPair Detection (MPD) to exploit the vertically partitioned unlabeled data and propose the Split Knowledge Distillation (SplitKD) schema to avoid cross-agency serving. Empirical studies on three industrial datasets exhibit the effectiveness of ourmethods, with the median AUC over all datasets improved by 0.86% and 2.6% in the local andthe federated deployment mode respectively. Overall, our framework provides an efficient federation-enhanced solution for real-time display advertising with minimal deploying cost and significant performance lift.", "year": 2022, "venue": "", "authors": [ "Wenjie Li", "Qiaolin Xia", "Junfeng Deng", "Hao Cheng", "Jiangming Liu", "Kouying Xue", "Yong Cheng", "Shutao Xia" ], "externalIds": { "ArXiv": "2205.15987", "CorpusId": 259203197 }, "url": "https://www.semanticscholar.org/paper/1e5c13afa00d2514f400ec9e4d9132d006b1d227", "referenceCount": 55, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Exploiting Data Sparsity in Secure Cross-Platform Social Recommendation", "abstract": "Social recommendation has shown promising improvements over traditional systems since it leverages social correlation data as an additional input. Most existing work assumes that all data are available to the recommendation platform. However, in practice, user-item interaction data (e.g.,rating) and user-user social data are usually generated by different platforms, and both of which contain sensitive information. Therefore,\"How to perform secure and efficient social recommendation across different platforms, where the data are highly-sparse in nature\"remains an important challenge. In this work, we bring secure computation techniques into social recommendation, and propose S3Rec, a sparsity-aware secure cross-platform social recommendation framework. As a result, our model can not only improve the recommendation performance of the rating platform by incorporating the sparse social data on the social platform, but also protect data privacy of both platforms. Moreover, to further improve model training efficiency, we propose two secure sparse matrix multiplication protocols based on homomorphic encryption and private information retrieval. Our experiments on two benchmark datasets demonstrate the effectiveness of S3Rec.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Jamie Cui", "Chaochao Chen", "Lingjuan Lyu", "Carl Yang", "Li Wang" ], "externalIds": { "DBLP": "conf/nips/CuiCLYW21", "ArXiv": "2202.07253", "CorpusId": 240006790 }, "url": "https://www.semanticscholar.org/paper/f21460781e25eb9adda2e3feed542326dc391265", "referenceCount": 36, "citationCount": 26, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "OpenFL: the open federated learning library", "abstract": "Objective. Federated learning (FL) is a computational paradigm that enables organizations to collaborate on machine learning (ML) and deep learning (DL) projects without sharing sensitive data, such as patient records, financial data, or classified secrets. Approach. Open federated learning (OpenFL) framework is an open-source python-based tool for training ML/DL algorithms using the data-private collaborative learning paradigm of FL, irrespective of the use case. OpenFL works with training pipelines built with both TensorFlow and PyTorch, and can be easily extended to other ML and DL frameworks. Main results. In this manuscript, we present OpenFL and summarize its motivation and development characteristics, with the intention of facilitating its application to existing ML/DL model training in a production environment. We further provide recommendations to secure a federation using trusted execution environments to ensure explicit model security and integrity, as well as maintain data confidentiality. Finally, we describe the first real-world healthcare federations that use the OpenFL library, and highlight how it can be applied to other non-healthcare use cases. Significance. The OpenFL library is designed for real world scalability, trusted execution, and also prioritizes easy migration of centralized ML models into a federated training pipeline. Although OpenFL’s initial use case was in healthcare, it is applicable beyond this domain and is now reaching wider adoption both in research and production settings. The tool is open-sourced at github.com/intel/openfl.", "year": 2021, "venue": "Physics in Medicine and Biology", "authors": [ "G. A. Reina", "Alexey Gruzdev", "Patrick Foley", "O. Perepelkina", "Mansi Sharma", "Igor Davidyuk", "Ilya Trushkin", "Maksim Radionov", "Aleksandr Mokrov", "Dmitry Agapov", "Jason Martin", "Brandon Edwards", "Micah J. Sheller", "Sarthak Pati", "Prakash Narayana Moorthy", "Shih-Han Wang", "Prashant Shah", "S. Bakas" ], "externalIds": { "ArXiv": "2105.06413", "DBLP": "journals/corr/abs-2105-06413", "PubMedCentral": "9715347", "DOI": "10.1088/1361-6560/ac97d9", "CorpusId": 234482966, "PubMed": "36198326" }, "url": "https://www.semanticscholar.org/paper/e2d9926fafc75e91f20134b9cbf14db5c098ffa7", "referenceCount": 45, "citationCount": 81, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine", "Physics" ] }, { "title": "When Homomorphic Encryption Marries Secret Sharing: Secure Large-Scale Sparse Logistic Regression and Applications in Risk Control", "abstract": "Logistic Regression (LR) is the most widely used machine learning model in industry for its efficiency, robustness, and interpretability. Due to the problem of data isolation and the requirement of high model performance, many applications in industry call for building a secure and efficient LR model for multiple parties. Most existing work uses either Homomorphic Encryption (HE) or Secret Sharing (SS) to build secure LR. HE based methods can deal with high-dimensional sparse features, but they incur potential security risks. SS based methods have provable security, but they have efficiency issue under high-dimensional sparse features. In this paper, we first present CAESAR, which combines HE and SS to build secure large-scale sparse logistic regression model and achieves both efficiency and security. We then present the distributed implementation of CAESAR for scalability requirement. We have deployed CAESAR in a risk control task and conducted comprehensive experiments. Our experimental results show that CAESAR improves the state-of-the-art model by around 130 times.", "year": 2020, "venue": "Knowledge Discovery and Data Mining", "authors": [ "Chaochao Chen", "Jun Zhou", "L. xilinx Wang", "Xibin Wu", "Wenjing Fang", "Jin Tan", "Lei Wang", "Xiaoxi Ji", "A. Liu", "Hao Wang", "Cheng Hong" ], "externalIds": { "DBLP": "conf/kdd/0001ZWWFTWLWH21", "MAG": "3056423153", "ArXiv": "2008.08753", "DOI": "10.1145/3447548.3467210", "CorpusId": 221186951 }, "url": "https://www.semanticscholar.org/paper/dd5e020e0f064a9061e0abf1998e30a7ba628312", "referenceCount": 54, "citationCount": 71, "influentialCitationCount": 8, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Flower: A Friendly Federated Learning Research Framework", "abstract": "Federated Learning (FL) has emerged as a promising technique for edge devices to collaboratively learn a shared prediction model, while keeping their training data on the device, thereby decoupling the ability to do machine learning from the need to store the data in the cloud. However, FL is difficult to implement realistically, both in terms of scale and systems heterogeneity. Although there are a number of research frameworks available to simulate FL algorithms, they do not support the study of scalable FL workloads on heterogeneous edge devices. In this paper, we present Flower -- a comprehensive FL framework that distinguishes itself from existing platforms by offering new facilities to execute large-scale FL experiments and consider richly heterogeneous FL device scenarios. Our experiments show Flower can perform FL experiments up to 15M in client size using only a pair of high-end GPUs. Researchers can then seamlessly migrate experiments to real devices to examine other parts of the design space. We believe Flower provides the community with a critical new tool for FL study and development.", "year": 2020, "venue": "", "authors": [ "Daniel J. Beutel", "Taner Topal", "Akhil Mathur", "Xinchi Qiu", "Titouan Parcollet", "N. Lane" ], "externalIds": { "ArXiv": "2007.14390", "CorpusId": 220831008 }, "url": "https://www.semanticscholar.org/paper/a199a03e11b68c4132be880b5fcabc57251bc477", "referenceCount": 46, "citationCount": 579, "influentialCitationCount": 55, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Split Learning for collaborative deep learning in healthcare", "abstract": "Shortage of labeled data has been holding the surge of deep learning in healthcare back, as sample sizes are often small, patient information cannot be shared openly, and multi-center collaborative studies are a burden to set up. Distributed machine learning methods promise to mitigate these problems. We argue for a split learning based approach and apply this distributed learning method for the first time in the medical field to compare performance against (1) centrally hosted and (2) non collaborative configurations for a range of participants. Two medical deep learning tasks are used to compare split learning to conventional single and multi center approaches: a binary classification problem of a data set of 9000 fundus photos, and multi-label classification problem of a data set of 156,535 chest X-rays. The several distributed learning setups are compared for a range of 1-50 distributed participants. Performance of the split learning configuration remained constant for any number of clients compared to a single center study, showing a marked difference compared to the non collaborative configuration after 2 clients (p < 0.001) for both sets. Our results affirm the benefits of collaborative training of deep neural networks in health care. Our work proves the significant benefit of distributed learning in healthcare, and paves the way for future real-world implementations.", "year": 2019, "venue": "arXiv.org", "authors": [ "M. Poirot", "Praneeth Vepakomma", "Ken Chang", "Jayashree Kalpathy-Cramer", "Rajiv Gupta", "R. Raskar" ], "externalIds": { "DBLP": "journals/corr/abs-1912-12115", "ArXiv": "1912.12115", "MAG": "2998600867", "CorpusId": 209500485 }, "url": "https://www.semanticscholar.org/paper/113e4c4ee777ce0cae57f3293a5c19a4c11dae13", "referenceCount": 27, "citationCount": 112, "influentialCitationCount": 6, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Communication-Efficient Learning of Deep Networks from Decentralized Data", "abstract": "Modern mobile devices have access to a wealth of data suitable for learning models, which in turn can greatly improve the user experience on the device. For example, language models can improve speech recognition and text entry, and image models can automatically select good photos. However, this rich data is often privacy sensitive, large in quantity, or both, which may preclude logging to the data center and training there using conventional approaches. We advocate an alternative that leaves the training data distributed on the mobile devices, and learns a shared model by aggregating locally-computed updates. We term this decentralized approach Federated Learning. \nWe present a practical method for the federated learning of deep networks based on iterative model averaging, and conduct an extensive empirical evaluation, considering five different model architectures and four datasets. These experiments demonstrate the approach is robust to the unbalanced and non-IID data distributions that are a defining characteristic of this setting. Communication costs are the principal constraint, and we show a reduction in required communication rounds by 10-100x as compared to synchronized stochastic gradient descent.", "year": 2016, "venue": "International Conference on Artificial Intelligence and Statistics", "authors": [ "H. B. McMahan", "Eider Moore", "Daniel Ramage", "S. Hampson", "B. A. Y. Arcas" ], "externalIds": { "MAG": "2950745363", "DBLP": "conf/aistats/McMahanMRHA17", "ArXiv": "1602.05629", "CorpusId": 14955348 }, "url": "https://www.semanticscholar.org/paper/d1dbf643447405984eeef098b1b320dee0b3b8a7", "referenceCount": 50, "citationCount": 13806, "influentialCitationCount": 3346, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FATE: An Industrial Grade Platform for Collaborative Learning With Data Protection", "abstract": "Collaborative and federated learning has become an emerging solution to many industrial applications where data values from different sites are exploit jointly with privacy protection. We introduce FATE, an industrial-grade project that supports enterprises and institutions to build machine learning models collaboratively at large-scale in a distributed manner. FATE supports a variety of secure computation protocols and machine learning algorithms, and features out-of-box usability with end-to-end building modules and visualization tools. Documentations are available at https://github.com/FederatedAI/FATE . Case studies and other information are available at https://www.fedai.org .", "year": 2021, "venue": "Journal of machine learning research", "authors": [ "Yang Liu", "Tao Fan", "Tianjian Chen", "Qian Xu", "Qiang Yang" ], "externalIds": { "DBLP": "journals/jmlr/LiuFCXY21", "CorpusId": 246432494 }, "url": "https://www.semanticscholar.org/paper/3d73e21af71bde8dc7984bd72f7077fb691b2523", "referenceCount": 18, "citationCount": 158, "influentialCitationCount": 7, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ParallelDistributed Logistic Regression for Vertical Federated Learning without Third-Party Coordi-nator", "abstract": null, "year": 2019, "venue": "ArXiv abs", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2024. VFLAIR: A Research Library and Benchmark for Vertical Federated Learning", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Concepts, Advances, and Challenges", "abstract": null, "year": null, "venue": "IEEE Transactions on Knowledge and Data Engineering", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2022. Differential Private Knowledge Transfer for Privacy-Preserving Cross-Domain Recommendation", "abstract": null, "year": null, "venue": "Proceedings of the ACM Web Conference 2022 (VirtualEvent,Lyon,France) (WWW’22)", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2022. Vertical Federated Learning: A Structured Literature Review", "abstract": null, "year": null, "venue": "ArXiv abs", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2024.FromVariabilitytoStability:AdvancingRecSysBenchmarkingPractices", "abstract": null, "year": null, "venue": "Proceedings of the 30th ACM SIGKDD Conference on Knowledge Dis-coveryandDataMining (Barcelona,Spain) (KDD’24)", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "FLeNS: Federated Learning with Enhanced Nesterov-Newton Sketch": { "paper_title": "FLeNS: Federated Learning with Enhanced Nesterov-Newton Sketch", "arxiv_id": "2409.15216v1", "keyword": "federate learning", "authors": [ "Sunny Gupta", "Mohit", "Pankhi Kashyap", "Pranav Jeevan", "Amit Sethi" ], "references": [ { "title": "FedNS: A Fast Sketching Newton-Type Algorithm for Federated Learning", "abstract": "Recent Newton-type federated learning algorithms have demonstrated linear convergence with respect to the communication rounds. However, communicating Hessian matrices is often unfeasible due to their quadratic communication complexity. In this paper, we introduce a novel approach to tackle this issue while still achieving fast convergence rates. Our proposed method, named as Federated Newton Sketch methods (FedNS), approximates the centralized Newton's method by communicating the sketched square-root Hessian instead of the exact Hessian. To enhance communication efficiency, we reduce the sketch size to match the effective dimension of the Hessian matrix. We provide convergence analysis based on statistical learning for the federated Newton sketch approaches. Specifically, our approaches reach super-linear convergence rates w.r.t. the communication rounds for the first time. We validate the effectiveness of our algorithms through various experiments, which coincide with our theoretical findings.", "year": 2024, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Jian Li", "Yong Liu", "Wei Wang", "Haoran Wu", "Weiping Wang" ], "externalIds": { "DBLP": "conf/aaai/LiLW24b", "ArXiv": "2401.02734", "DOI": "10.48550/arXiv.2401.02734", "CorpusId": 266818238 }, "url": "https://www.semanticscholar.org/paper/f3cae8dd01b5da7b44921c5ed3ada6048dacb513", "referenceCount": 46, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FedNew: A Communication-Efficient and Privacy-Preserving Newton-Type Method for Federated Learning", "abstract": "Newton-type methods are popular in federated learning due to their fast convergence. Still, they suffer from two main issues, namely: low communication efficiency and low privacy due to the requirement of sending Hessian information from clients to parameter server (PS). In this work, we introduced a novel framework called FedNew in which there is no need to transmit Hessian information from clients to PS, hence resolving the bottleneck to improve communication efficiency. In addition, FedNew hides the gradient information and results in a privacy-preserving approach compared to the existing state-of-the-art. The core novel idea in FedNew is to introduce a two level framework, and alternate between updating the inverse Hessian-gradient product using only one alternating direction method of multipliers (ADMM) step and then performing the global model update using Newton's method. Though only one ADMM pass is used to approximate the inverse Hessian-gradient product at each iteration, we develop a novel theoretical approach to show the converging behavior of FedNew for convex problems. Additionally, a significant reduction in communication overhead is achieved by utilizing stochastic quantization. Numerical results using real datasets show the superiority of FedNew compared to existing methods in terms of communication costs.", "year": 2022, "venue": "International Conference on Machine Learning", "authors": [ "Anis Elgabli", "Chaouki Ben Issaid", "A. S. Bedi", "K. Rajawat", "M. Bennis", "V. Aggarwal" ], "externalIds": { "DBLP": "journals/corr/abs-2206-08829", "ArXiv": "2206.08829", "DOI": "10.48550/arXiv.2206.08829", "CorpusId": 249848088 }, "url": "https://www.semanticscholar.org/paper/8987f1c37e7cf4156d6e1016dc1a5727dfb006ce", "referenceCount": 35, "citationCount": 19, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "A Newton-type algorithm for federated learning based on incremental Hessian eigenvector sharing", "abstract": null, "year": 2022, "venue": "at - Automatisierungstechnik", "authors": [ "Nicolò Dal Fabbro", "S. Dey", "M. Rossi", "L. Schenato" ], "externalIds": { "ArXiv": "2202.05800", "DBLP": "journals/corr/abs-2202-05800", "DOI": "10.1016/j.automatica.2023.111460", "CorpusId": 246822878 }, "url": "https://www.semanticscholar.org/paper/242717d06e5669abdeb7897d7a5c2ae60e4fc3c8", "referenceCount": 59, "citationCount": 12, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Sharp Bounds for Federated Averaging (Local SGD) and Continuous Perspective", "abstract": "Federated Averaging (FedAvg), also known as Local SGD, is one of the most popular algorithms in Federated Learning (FL). Despite its simplicity and popularity, the convergence rate of FedAvg has thus far been undetermined. Even under the simplest assumptions (convex, smooth, homogeneous, and bounded covariance), the best-known upper and lower bounds do not match, and it is not clear whether the existing analysis captures the capacity of the algorithm. In this work, we first resolve this question by providing a lower bound for FedAvg that matches the existing upper bound, which shows the existing FedAvg upper bound analysis is not improvable. Additionally, we establish a lower bound in a heterogeneous setting that nearly matches the existing upper bound. While our lower bounds show the limitations of FedAvg, under an additional assumption of third-order smoothness, we prove more optimistic state-of-the-art convergence results in both convex and non-convex settings. Our analysis stems from a notion we call iterate bias, which is defined by the deviation of the expectation of the SGD trajectory from the noiseless gradient descent trajectory with the same initialization. We prove novel sharp bounds on this quantity, and show intuitively how to analyze this quantity from a Stochastic Differential Equation (SDE) perspective.", "year": 2021, "venue": "International Conference on Artificial Intelligence and Statistics", "authors": [ "Margalit Glasgow", "Honglin Yuan", "Tengyu Ma" ], "externalIds": { "DBLP": "journals/corr/abs-2111-03741", "ArXiv": "2111.03741", "CorpusId": 243847652 }, "url": "https://www.semanticscholar.org/paper/31ba311a2fb22bcb575bedc73d5a80e620157276", "referenceCount": 76, "citationCount": 33, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "What Do We Mean by Generalization in Federated Learning?", "abstract": "Federated learning data is drawn from a distribution of distributions: clients are drawn from a meta-distribution, and their data are drawn from local data distributions. Thus generalization studies in federated learning should separate performance gaps from unseen client data (out-of-sample gap) from performance gaps from unseen client distributions (participation gap). In this work, we propose a framework for disentangling these performance gaps. Using this framework, we observe and explain differences in behavior across natural and synthetic federated datasets, indicating that dataset synthesis strategy can be important for realistic simulations of generalization in federated learning. We propose a semantic synthesis strategy that enables realistic simulation without naturally-partitioned data. Informed by our findings, we call out community suggestions for future federated learning works.", "year": 2021, "venue": "International Conference on Learning Representations", "authors": [ "Honglin Yuan", "W. Morningstar", "Lin Ning", "K. Singhal" ], "externalIds": { "DBLP": "conf/iclr/0002MNS22", "ArXiv": "2110.14216", "CorpusId": 239998253 }, "url": "https://www.semanticscholar.org/paper/58dd7435865a37e5e3fb67bf42a025b4b6491d7e", "referenceCount": 105, "citationCount": 54, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Newton-LESS: Sparsification without Trade-offs for the Sketched Newton Update", "abstract": "In second-order optimization, a potential bottleneck can be computing the Hessian matrix of the optimized function at every iteration. Randomized sketching has emerged as a powerful technique for constructing estimates of the Hessian which can be used to perform approximate Newton steps. This involves multiplication by a random sketching matrix, which introduces a trade-off between the computational cost of sketching and the convergence rate of the optimization algorithm. A theoretically desirable but practically much too expensive choice is to use a dense Gaussian sketching matrix, which produces unbiased estimates of the exact Newton step and which offers strong problem-independent convergence guarantees. We show that the Gaussian sketching matrix can be drastically sparsified, significantly reducing the computational cost of sketching, without substantially affecting its convergence properties. This approach, called Newton-LESS, is based on a recently introduced sketching technique: LEverage Score Sparsified (LESS) embeddings. We prove that Newton-LESS enjoys nearly the same problem-independent local convergence rate as Gaussian embeddings, not just up to constant factors but even down to lower order terms, for a large class of optimization tasks. In particular, this leads to a new state-of-the-art convergence result for an iterative least squares solver. Finally, we extend LESS embeddings to include uniformly sparsified random sign matrices which can be implemented efficiently and which perform well in numerical experiments.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Michal Derezinski", "Jonathan Lacotte", "Mert Pilanci", "Michael W. Mahoney" ], "externalIds": { "ArXiv": "2107.07480", "DBLP": "journals/corr/abs-2107-07480", "CorpusId": 235899247 }, "url": "https://www.semanticscholar.org/paper/655f64b750cf8334f8957ac84d11abe6aa66960f", "referenceCount": 50, "citationCount": 25, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "A Non-parametric View of FedAvg and FedProx:Beyond Stationary Points", "abstract": "Federated Learning (FL) is a promising decentralized learning framework and has great potentials in privacy preservation and in lowering the computation load at the cloud. Recent work showed that FedAvg and FedProx - the two widely-adopted FL algorithms - fail to reach the stationary points of the global optimization objective even for homogeneous linear regression problems. Further, it is concerned that the common model learned might not generalize well locally at all in the presence of heterogeneity. In this paper, we analyze the convergence and statistical efficiency of FedAvg and FedProx, addressing the above two concerns. Our analysis is based on the standard non-parametric regression in a reproducing kernel Hilbert space (RKHS), and allows for heterogeneous local data distributions and unbalanced local datasets. We prove that the estimation errors, measured in either the empirical norm or the RKHS norm, decay with a rate of 1/t in general and exponentially for finite-rank kernels. In certain heterogeneous settings, these upper bounds also imply that both FedAvg and FedProx achieve the optimal error rate. To further analytically quantify the impact of the heterogeneity at each client, we propose and characterize a novel notion-federation gain, defined as the reduction of the estimation error for a client to join the FL. We discover that when the data heterogeneity is moderate, a client with limited local data can benefit from a common model with a large federation gain. Numerical experiments further corroborate our theoretical findings.", "year": 2021, "venue": "Journal of machine learning research", "authors": [ "Lili Su", "Jiaming Xu", "Pengkun Yang" ], "externalIds": { "DBLP": "journals/jmlr/SuXY23", "ArXiv": "2106.15216", "CorpusId": 246863861 }, "url": "https://www.semanticscholar.org/paper/a750496f661eb3aa894bdc91ce70e77961eb20e9", "referenceCount": 44, "citationCount": 8, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "FedNL: Making Newton-Type Methods Applicable to Federated Learning", "abstract": "Inspired by recent work of Islamov et al (2021), we propose a family of Federated Newton Learn ( FedNL ) methods, which we believe is a marked step in the direction of making second-order methods applicable to FL. In contrast to the aforementioned work, FedNL employs a different Hessian learning technique which i) enhances privacy as it does not rely on the training data to be revealed to the coordinating server, ii) makes it applicable beyond generalized linear models, and iii) provably works with general contractive compression operators for compressing the local Hessians, such as Top- K or Rank- R , which are vastly superior in practice. Notably, we do not need to rely on error feedback for our methods to work with contractive compressors. Moreover, we develop FedNL-PP , FedNL-CR and FedNL-LS , which are variants of FedNL that support partial participation, and globalization via cubic regularization and line search, respectively, and FedNL-BC , which is a variant that can further benefit from bidirectional compression of gradients and models, i.e., smart uplink gradient and smart downlink model compression.Weprove local convergence rates that are independent of the condition number, the number of training data points, and compression variance. Our communication efficient Hessian learning technique provably learns the Hessian at the optimum. Finally, we perform a variety of numerical experiments that show that our FedNL methods have state-of-the-art communication complexity when compared to key baselines.", "year": 2021, "venue": "International Conference on Machine Learning", "authors": [ "M. Safaryan", "Rustem Islamov", "Xun Qian", "Peter Richtárik" ], "externalIds": { "DBLP": "conf/icml/SafaryanIQR22", "ArXiv": "2106.02969", "CorpusId": 235358296 }, "url": "https://www.semanticscholar.org/paper/04e6636306e23fc5fe438311a0d5a604ac323126", "referenceCount": 43, "citationCount": 64, "influentialCitationCount": 11, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "LocalNewton: Reducing Communication Bottleneck for Distributed Learning", "abstract": "To address the communication bottleneck problem in distributed optimization within a master-worker framework, we propose LocalNewton, a distributed second-order algorithm with local averaging. In LocalNewton, the worker machines update their model in every iteration by finding a suitable second-order descent direction using only the data and model stored in their own local memory. We let the workers run multiple such iterations locally and communicate the models to the master node only once every few (say L) iterations. LocalNewton is highly practical since it requires only one hyperparameter, the number L of local iterations. We use novel matrix concentration-based techniques to obtain theoretical guarantees for LocalNewton, and we validate them with detailed empirical evaluation. To enhance practicability, we devise an adaptive scheme to choose L, and we show that this reduces the number of local iterations in worker machines between two model synchronizations as the training proceeds, successively refining the model quality at the master. Via extensive experiments using several real-world datasets with AWS Lambda workers and an AWS EC2 master, we show that LocalNewton requires fewer than 60% of the communication rounds (between master and workers) and less than 40% of the end-to-end running time, compared to state-of-the-art algorithms, to reach the same training~loss.", "year": 2021, "venue": "arXiv.org", "authors": [ "Vipul Gupta", "Avishek Ghosh", "Michal Derezinski", "Rajiv Khanna", "K. Ramchandran", "Michael W. Mahoney" ], "externalIds": { "ArXiv": "2105.07320", "DBLP": "journals/corr/abs-2105-07320", "CorpusId": 234742279 }, "url": "https://www.semanticscholar.org/paper/773e4c3c0d2cfdab8e0670cbdf3588de1c41991d", "referenceCount": 55, "citationCount": 10, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Adaptive Newton Sketch: Linear-time Optimization with Quadratic Convergence and Effective Hessian Dimensionality", "abstract": "We propose a randomized algorithm with quadratic convergence rate for convex optimization problems with a self-concordant, composite, strongly convex objective function. Our method is based on performing an approximate Newton step using a random projection of the Hessian. Our first contribution is to show that, at each iteration, the embedding dimension (or sketch size) can be as small as the effective dimension of the Hessian matrix. Leveraging this novel fundamental result, we design an algorithm with a sketch size proportional to the effective dimension and which exhibits a quadratic rate of convergence. This result dramatically improves on the classical linear-quadratic convergence rates of state-of-the-art sub-sampled Newton methods. However, in most practical cases, the effective dimension is not known beforehand, and this raises the question of how to pick a sketch size as small as the effective dimension while preserving a quadratic convergence rate. Our second and main contribution is thus to propose an adaptive sketch size algorithm with quadratic convergence rate and which does not require prior knowledge or estimation of the effective dimension: at each iteration, it starts with a small sketch size, and increases it until quadratic progress is achieved. Importantly, we show that the embedding dimension remains proportional to the effective dimension throughout the entire path and that our method achieves state-of-the-art computational complexity for solving convex optimization programs with a strongly convex component.", "year": 2021, "venue": "International Conference on Machine Learning", "authors": [ "Jonathan Lacotte", "Yifei Wang", "Mert Pilanci" ], "externalIds": { "ArXiv": "2105.07291", "DBLP": "journals/corr/abs-2105-07291", "CorpusId": 234742103 }, "url": "https://www.semanticscholar.org/paper/1b85a08e660aa0d57c6e1c1ad8849cdbe2713ec0", "referenceCount": 44, "citationCount": 15, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Effective Dimension Adaptive Sketching Methods for Faster Regularized Least-Squares Optimization", "abstract": "We propose a new randomized algorithm for solving L2-regularized least-squares problems based on sketching. We consider two of the most popular random embeddings, namely, Gaussian embeddings and the Subsampled Randomized Hadamard Transform (SRHT). While current randomized solvers for least-squares optimization prescribe an embedding dimension at least greater than the data dimension, we show that the embedding dimension can be reduced to the effective dimension of the optimization problem, and still preserve high-probability convergence guarantees. In this regard, we derive sharp matrix deviation inequalities over ellipsoids for both Gaussian and SRHT embeddings. Specifically, we improve on the constant of a classical Gaussian concentration bound whereas, for SRHT embeddings, our deviation inequality involves a novel technical approach. Leveraging these bounds, we are able to design a practical and adaptive algorithm which does not require to know the effective dimension beforehand. Our method starts with an initial embedding dimension equal to 1 and, over iterations, increases the embedding dimension up to the effective one. Finally, we prove that our algorithm improves the state-of-the-art computational complexity for solving regularized least-squares problems. Further, we show numerically that it outperforms standard least-squares solvers such as the conjugate gradient method and its pre-conditioned version on several standard machine learning datasets.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Jonathan Lacotte", "Mert Pilanci" ], "externalIds": { "DBLP": "conf/nips/LacotteP20", "ArXiv": "2006.05874", "MAG": "3103183085", "CorpusId": 219559310 }, "url": "https://www.semanticscholar.org/paper/99853f345b2e8919ddf64bdf53143c18087d46fa", "referenceCount": 48, "citationCount": 23, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Distributed Newton Can Communicate Less and Resist Byzantine Workers", "abstract": "We develop a distributed second order optimization algorithm that is communication-efficient as well as robust against Byzantine failures of the worker machines. We propose COMRADE (COMunication-efficient and Robust Approximate Distributed nEwton), an iterative second order algorithm, where the worker machines communicate only once per iteration with the center machine. This is in sharp contrast with the state-of-the-art distributed second order algorithms like GIANT [34] and DINGO[7], where the worker machines send (functions of) local gradient and Hessian sequentially; thus ending up communicating twice with the center machine per iteration. Moreover, we show that the worker machines can further compress the local information before sending it to the center. In addition, we employ a simple norm based thresholding rule to filter-out the Byzantine worker machines. We establish the linear-quadratic rate of convergence of COMRADE and establish that the communication savings and Byzantine resilience result in only a small statistical error rate for arbitrary convex loss functions. To the best of our knowledge, this is the first work that addresses the issue of Byzantine resilience in second order distributed optimization. Furthermore, we validate our theoretical results with extensive experiments on synthetic and benchmark LIBSVM [5] data-sets and demonstrate convergence guarantees.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Avishek Ghosh", "R. Maity", "A. Mazumdar" ], "externalIds": { "DBLP": "journals/corr/abs-2006-08737", "MAG": "3104744097", "ArXiv": "2006.08737", "CorpusId": 219708174 }, "url": "https://www.semanticscholar.org/paper/5de5f27378ff586b123f898cdbc56fd4c6c1e1ed", "referenceCount": 40, "citationCount": 29, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "FedSplit: An algorithmic framework for fast federated optimization", "abstract": "Motivated by federated learning, we consider the hub-and-spoke model of distributed optimization in which a central authority coordinates the computation of a solution among many agents while limiting communication. We first study some past procedures for federated optimization, and show that their fixed points need not correspond to stationary points of the original optimization problem, even in simple convex settings with deterministic updates. In order to remedy these issues, we introduce FedSplit, a class of algorithms based on operator splitting procedures for solving distributed convex minimization with additive structure. We prove that these procedures have the correct fixed points, corresponding to optima of the original optimization problem, and we characterize their convergence rates under different settings. Our theory shows that these methods are provably robust to inexact computation of intermediate local quantities. We complement our theory with some simple experiments that demonstrate the benefits of our methods in practice.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Reese Pathak", "M. Wainwright" ], "externalIds": { "MAG": "3105347258", "DBLP": "conf/nips/PathakW20", "ArXiv": "2005.05238", "CorpusId": 218581246 }, "url": "https://www.semanticscholar.org/paper/7c3c799801ff60e878111373e0fc05cd3b1eab18", "referenceCount": 33, "citationCount": 165, "influentialCitationCount": 14, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "SCAFFOLD: Stochastic Controlled Averaging for Federated Learning", "abstract": "Federated Averaging (FedAvg) has emerged as the algorithm of choice for federated learning due to its simplicity and low communication cost. However, in spite of recent research efforts, its performance is not fully understood. We obtain tight convergence rates for FedAvg and prove that it suffers from `client-drift' when the data is heterogeneous (non-iid), resulting in unstable and slow convergence. \nAs a solution, we propose a new algorithm (SCAFFOLD) which uses control variates (variance reduction) to correct for the `client-drift' in its local updates. We prove that SCAFFOLD requires significantly fewer communication rounds and is not affected by data heterogeneity or client sampling. Further, we show that (for quadratics) SCAFFOLD can take advantage of similarity in the client's data yielding even faster convergence. The latter is the first result to quantify the usefulness of local-steps in distributed optimization.", "year": 2019, "venue": "International Conference on Machine Learning", "authors": [ "Sai Praneeth Karimireddy", "Satyen Kale", "M. Mohri", "Sashank J. Reddi", "Sebastian U. Stich", "A. Suresh" ], "externalIds": { "MAG": "3006555759", "DBLP": "conf/icml/KarimireddyKMRS20", "CorpusId": 214069261 }, "url": "https://www.semanticscholar.org/paper/fc7b1823bd8b59a590d0bc33bd7a145518fd71c5", "referenceCount": 80, "citationCount": 2033, "influentialCitationCount": 374, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "On the Convergence of FedAvg on Non-IID Data", "abstract": "Federated learning enables a large amount of edge computing devices to jointly learn a model without data sharing. As a leading algorithm in this setting, Federated Averaging (\\texttt{FedAvg}) runs Stochastic Gradient Descent (SGD) in parallel on a small subset of the total devices and averages the sequences only once in a while. Despite its simplicity, it lacks theoretical guarantees under realistic settings. In this paper, we analyze the convergence of \\texttt{FedAvg} on non-iid data and establish a convergence rate of $\\mathcal{O}(\\frac{1}{T})$ for strongly convex and smooth problems, where $T$ is the number of SGDs. Importantly, our bound demonstrates a trade-off between communication-efficiency and convergence rate. As user devices may be disconnected from the server, we relax the assumption of full device participation to partial device participation and study different averaging schemes; low device participation rate can be achieved without severely slowing down the learning. Our results indicate that heterogeneity of data slows down the convergence, which matches empirical observations. Furthermore, we provide a necessary condition for \\texttt{FedAvg} on non-iid data: the learning rate $\\eta$ must decay, even if full-gradient is used; otherwise, the solution will be $\\Omega (\\eta)$ away from the optimal.", "year": 2019, "venue": "International Conference on Learning Representations", "authors": [ "Xiang Li", "Kaixuan Huang", "Wenhao Yang", "Shusen Wang", "Zhihua Zhang" ], "externalIds": { "DBLP": "conf/iclr/LiHYWZ20", "MAG": "2955213239", "ArXiv": "1907.02189", "CorpusId": 195798643 }, "url": "https://www.semanticscholar.org/paper/c802ceb7a9ff904220c48ee44ae9b671be6d6379", "referenceCount": 50, "citationCount": 1940, "influentialCitationCount": 313, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Agnostic Federated Learning", "abstract": "A key learning scenario in large-scale applications is that of federated learning, where a centralized model is trained based on data originating from a large number of clients. We argue that, with the existing training and inference, federated models can be biased towards different clients. Instead, we propose a new framework of agnostic federated learning, where the centralized model is optimized for any target distribution formed by a mixture of the client distributions. We further show that this framework naturally yields a notion of fairness. We present data-dependent Rademacher complexity guarantees for learning with this objective, which guide the definition of an algorithm for agnostic federated learning. We also give a fast stochastic optimization algorithm for solving the corresponding optimization problem, for which we prove convergence bounds, assuming a convex loss function and hypothesis set. We further empirically demonstrate the benefits of our approach in several datasets. Beyond federated learning, our framework and algorithm can be of interest to other learning scenarios such as cloud computing, domain adaptation, drifting, and other contexts where the training and test distributions do not coincide.", "year": 2019, "venue": "International Conference on Machine Learning", "authors": [ "M. Mohri", "Gary Sivek", "A. Suresh" ], "externalIds": { "MAG": "2912592113", "ArXiv": "1902.00146", "DBLP": "conf/icml/MohriSS19", "CorpusId": 59553531 }, "url": "https://www.semanticscholar.org/paper/159395b0f7a2b9ea04f9a758d18887bcb970ee78", "referenceCount": 69, "citationCount": 820, "influentialCitationCount": 94, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Federated Optimization in Heterogeneous Networks", "abstract": "Federated Learning is a distributed learning paradigm with two key challenges that differentiate it from traditional distributed optimization: (1) significant variability in terms of the systems characteristics on each device in the network (systems heterogeneity), and (2) non-identically distributed data across the network (statistical heterogeneity). In this work, we introduce a framework, FedProx, to tackle heterogeneity in federated networks. FedProx can be viewed as a generalization and re-parametrization of FedAvg, the current state-of-the-art method for federated learning. While this re-parameterization makes only minor modifications to the method itself, these modifications have important ramifications both in theory and in practice. Theoretically, we provide convergence guarantees for our framework when learning over data from non-identical distributions (statistical heterogeneity), and while adhering to device-level systems constraints by allowing each participating device to perform a variable amount of work (systems heterogeneity). Practically, we demonstrate that FedProx allows for more robust convergence than FedAvg across a suite of realistic federated datasets. In particular, in highly heterogeneous settings, FedProx demonstrates significantly more stable and accurate convergence behavior relative to FedAvg---improving absolute test accuracy by 22% on average.", "year": 2018, "venue": "Conference on Machine Learning and Systems", "authors": [ "Anit Kumar Sahu", "Tian Li", "Maziar Sanjabi", "M. Zaheer", "Ameet Talwalkar", "Virginia Smith" ], "externalIds": { "ArXiv": "1812.06127", "DBLP": "conf/mlsys/LiSZSTS20", "MAG": "3038022836", "CorpusId": 59316566 }, "url": "https://www.semanticscholar.org/paper/1284ed4bf6a043ecf8cebca09e4811f1e3b83b65", "referenceCount": 50, "citationCount": 3919, "influentialCitationCount": 852, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Optimal Convergence for Distributed Learning with Stochastic Gradient Methods and Spectral Algorithms", "abstract": "We study generalization properties of distributed algorithms in the setting of nonparametric regression over a reproducing kernel Hilbert space (RKHS). We first investigate distributed stochastic gradient methods (SGM), with mini-batches and multi-passes over the data. We show that optimal generalization error bounds can be retained for distributed SGM provided that the partition level is not too large. We then extend our results to spectral-regularization algorithms (SRA), including kernel ridge regression (KRR), kernel principal component analysis, and gradient methods. Our results are superior to the state-of-the-art theory. Particularly, our results show that distributed SGM has a smaller theoretical computational complexity, compared with distributed KRR and classic SGM. Moreover, even for non-distributed SRA, they provide the first optimal, capacity-dependent convergence rates, considering the case that the regression function may not be in the RKHS.", "year": 2018, "venue": "Journal of machine learning research", "authors": [ "Junhong Lin", "V. Cevher" ], "externalIds": { "MAG": "2890702650", "ArXiv": "1801.07226", "DBLP": "journals/jmlr/LinC20a", "CorpusId": 52834039 }, "url": "https://www.semanticscholar.org/paper/091a0796b1fd2d896295e7bedb7f8c05c2dba9df", "referenceCount": 73, "citationCount": 28, "influentialCitationCount": 6, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Federated Learning: Strategies for Improving Communication Efficiency", "abstract": "Federated Learning is a machine learning setting where the goal is to train a high-quality centralized model while training data remains distributed over a large number of clients each with unreliable and relatively slow network connections. We consider learning algorithms for this setting where on each round, each client independently computes an update to the current model based on its local data, and communicates this update to a central server, where the client-side updates are aggregated to compute a new global model. The typical clients in this setting are mobile phones, and communication efficiency is of the utmost importance. In this paper, we propose two ways to reduce the uplink communication costs: structured updates, where we directly learn an update from a restricted space parametrized using a smaller number of variables, e.g. either low-rank or a random mask; and sketched updates, where we learn a full model update and then compress it using a combination of quantization, random rotations, and subsampling before sending it to the server. Experiments on both convolutional and recurrent networks show that the proposed methods can reduce the communication cost by two orders of magnitude.", "year": 2016, "venue": "arXiv.org", "authors": [ "Jakub Konecný", "H. B. McMahan", "Felix X. Yu", "Peter Richtárik", "A. Suresh", "D. Bacon" ], "externalIds": { "DBLP": "journals/corr/KonecnyMYRSB16", "ArXiv": "1610.05492", "MAG": "2535838896", "CorpusId": 14999259 }, "url": "https://www.semanticscholar.org/paper/7fcb90f68529cbfab49f471b54719ded7528d0ef", "referenceCount": 26, "citationCount": 4138, "influentialCitationCount": 225, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Optimization Methods for Large-Scale Machine Learning", "abstract": "This paper provides a review and commentary on the past, present, and future of numerical optimization algorithms in the context of machine learning applications. Through case studies on text classification and the training of deep neural networks, we discuss how optimization problems arise in machine learning and what makes them challenging. A major theme of our study is that large-scale machine learning represents a distinctive setting in which the stochastic gradient (SG) method has traditionally played a central role while conventional gradient-based nonlinear optimization techniques typically falter. Based on this viewpoint, we present a comprehensive theory of a straightforward, yet versatile SG algorithm, discuss its practical behavior, and highlight opportunities for designing algorithms with improved performance. This leads to a discussion about the next generation of optimization methods for large-scale machine learning, including an investigation of two main streams of research on techniques that diminish noise in the stochastic directions and methods that make use of second-order derivative approximations.", "year": 2016, "venue": "SIAM Review", "authors": [ "L. Bottou", "Frank E. Curtis", "J. Nocedal" ], "externalIds": { "ArXiv": "1606.04838", "MAG": "2950363690", "DBLP": "journals/siamrev/BottouCN18", "DOI": "10.1137/16M1080173", "CorpusId": 3119488 }, "url": "https://www.semanticscholar.org/paper/d21703674ae562bae4a849a75847cdd9ead417df", "referenceCount": 183, "citationCount": 2887, "influentialCitationCount": 392, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Communication-Efficient Learning of Deep Networks from Decentralized Data", "abstract": "Modern mobile devices have access to a wealth of data suitable for learning models, which in turn can greatly improve the user experience on the device. For example, language models can improve speech recognition and text entry, and image models can automatically select good photos. However, this rich data is often privacy sensitive, large in quantity, or both, which may preclude logging to the data center and training there using conventional approaches. We advocate an alternative that leaves the training data distributed on the mobile devices, and learns a shared model by aggregating locally-computed updates. We term this decentralized approach Federated Learning. \nWe present a practical method for the federated learning of deep networks based on iterative model averaging, and conduct an extensive empirical evaluation, considering five different model architectures and four datasets. These experiments demonstrate the approach is robust to the unbalanced and non-IID data distributions that are a defining characteristic of this setting. Communication costs are the principal constraint, and we show a reduction in required communication rounds by 10-100x as compared to synchronized stochastic gradient descent.", "year": 2016, "venue": "International Conference on Artificial Intelligence and Statistics", "authors": [ "H. B. McMahan", "Eider Moore", "Daniel Ramage", "S. Hampson", "B. A. Y. Arcas" ], "externalIds": { "MAG": "2950745363", "DBLP": "conf/aistats/McMahanMRHA17", "ArXiv": "1602.05629", "CorpusId": 14955348 }, "url": "https://www.semanticscholar.org/paper/d1dbf643447405984eeef098b1b320dee0b3b8a7", "referenceCount": 50, "citationCount": 13806, "influentialCitationCount": 3346, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Newton Sketch: A Near Linear-Time Optimization Algorithm with Linear-Quadratic Convergence", "abstract": "We propose a randomized second-order method for optimization known as the Newton Sketch: it is based on performing an approximate Newton step using a randomly projected or sub-sampled Hessian. For self-concordant functions, we prove that the algorithm has super-linear convergence with exponentially high probability, with convergence and complexity guarantees that are independent of condition numbers and related problem-dependent quantities. Given a suitable initialization, similar guarantees also hold for strongly convex and smooth objectives without self-concordance. When implemented using randomized projections based on a sub-sampled Hadamard basis, the algorithm typically has substantially lower complexity than Newton's method. We also describe extensions of our methods to programs involving convex constraints that are equipped with self-concordant barriers. We discuss and illustrate applications to linear programs, quadratic programs with convex constraints, logistic regression and other generalized linear models, as well as semidefinite programs.", "year": 2015, "venue": "SIAM Journal on Optimization", "authors": [ "Mert Pilanci", "M. Wainwright" ], "externalIds": { "MAG": "2963060476", "ArXiv": "1505.02250", "DBLP": "journals/corr/PilanciW15", "DOI": "10.1137/15M1021106", "CorpusId": 14393040 }, "url": "https://www.semanticscholar.org/paper/c39d9755c90ac48314064839e37839120b081c6b", "referenceCount": 42, "citationCount": 257, "influentialCitationCount": 28, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Introductory Lectures on Convex Optimization - A Basic Course", "abstract": null, "year": 2014, "venue": "Applied Optimization", "authors": [ "Y. Nesterov" ], "externalIds": { "MAG": "2124541940", "DBLP": "books/sp/Nesterov04", "DOI": "10.1007/978-1-4419-8853-9", "CorpusId": 62288331 }, "url": "https://www.semanticscholar.org/paper/d0b0c3e5a1e768490bc9b759685930541957508b", "referenceCount": 0, "citationCount": 5843, "influentialCitationCount": 970, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "LIBSVM: A library for support vector machines", "abstract": "LIBSVM is a library for Support Vector Machines (SVMs). We have been actively developing this package since the year 2000. The goal is to help users to easily apply SVM to their applications. LIBSVM has gained wide popularity in machine learning and many other areas. In this article, we present all implementation details of LIBSVM. Issues such as solving SVM optimization problems theoretical convergence multiclass classification probability estimates and parameter selection are discussed in detail.", "year": 2011, "venue": "TIST", "authors": [ "Chih-Chung Chang", "Chih-Jen Lin" ], "externalIds": { "MAG": "332049209", "DBLP": "journals/tist/ChangL11", "DOI": "10.1145/1961189.1961199", "CorpusId": 961425 }, "url": "https://www.semanticscholar.org/paper/273dfbcb68080251f5e9ff38b4413d7bd84b10a1", "referenceCount": 61, "citationCount": 43306, "influentialCitationCount": 3681, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Convex Optimization", "abstract": "This textbook is based on lectures given by the authors at MIPT (Moscow), HSE (Moscow), FEFU (Vladivostok), V.I. Vernadsky KFU (Simferopol), ASU (Republic of Adygea), and the University of Grenoble-Alpes (Grenoble, France). First of all, the authors focused on the program of a two-semester course of lectures on convex optimization, which is given to students of MIPT. The first chapter of this book contains the materials of the first semester (\"Fundamentals of convex analysis and optimization\"), the second and third chapters contain the materials of the second semester (\"Numerical methods of convex optimization\"). The textbook has a number of features. First, in contrast to the classic manuals, this book does not provide proofs of all the theorems mentioned. This allowed, on one side, to describe more themes, but on the other side, made the presentation less self-sufficient. The second important point is that part of the material is advanced and is published in the Russian educational literature, apparently for the first time. Third, the accents that are given do not always coincide with the generally accepted accents in the textbooks that are now popular. First of all, we talk about a sufficiently advanced presentation of conic optimization, including robust optimization, as a vivid demonstration of the capabilities of modern convex analysis.", "year": 2010, "venue": "IEEE Transactions on Automatic Control", "authors": [ "Stephen P. Boyd", "L. Vandenberghe" ], "externalIds": { "ArXiv": "2106.01946", "DBLP": "journals/corr/abs-2106-01946", "MAG": "2913411363", "DOI": "10.1201/9781584888239-c32", "CorpusId": 37925315 }, "url": "https://www.semanticscholar.org/paper/0b14178e7d79ac426d0a39700e1ac8b2c6f2e752", "referenceCount": 340, "citationCount": 39962, "influentialCitationCount": 4425, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Robust Stochastic Approximation Approach to Stochastic Programming", "abstract": "In this paper we consider optimization problems where the objective function is given in a form of the expectation. A basic difficulty of solving such stochastic optimization problems is that the involved multidimensional integrals (expectations) cannot be computed with high accuracy. The aim of this paper is to compare two computational approaches based on Monte Carlo sampling techniques, namely, the stochastic approximation (SA) and the sample average approximation (SAA) methods. Both approaches, the SA and SAA methods, have a long history. Current opinion is that the SAA method can efficiently use a specific (say, linear) structure of the considered problem, while the SA approach is a crude subgradient method, which often performs poorly in practice. We intend to demonstrate that a properly modified SA approach can be competitive and even significantly outperform the SAA method for a certain class of convex stochastic problems. We extend the analysis to the case of convex-concave stochastic saddle point problems and present (in our opinion highly encouraging) results of numerical experiments.", "year": 2008, "venue": "SIAM Journal on Optimization", "authors": [ "A. Nemirovski", "A. Juditsky", "Guanghui Lan", "Alexander Shapiro" ], "externalIds": { "MAG": "1992208280", "DBLP": "journals/siamjo/NemirovskiJLS09", "DOI": "10.1137/070704277", "CorpusId": 268069803 }, "url": "https://www.semanticscholar.org/paper/da0877799c8daab985853c0aeed7c04f987bad4a", "referenceCount": 32, "citationCount": 421, "influentialCitationCount": 59, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Approximate nearest neighbors and the fast Johnson-Lindenstrauss transform", "abstract": "We introduce a new low-distortion embedding of l2d into lpO(log n) (p=1,2), called the Fast-Johnson-Linden-strauss-Transform. The FJLT is faster than standard random projections and just as easy to implement. It is based upon the preconditioning of a sparse projection matrix with a randomized Fourier transform. Sparse random projections are unsuitable for low-distortion embeddings. We overcome this handicap by exploiting the \"Heisenberg principle\" of the Fourier transform, ie, its local-global duality. The FJLT can be used to speed up search algorithms based on low-distortion embeddings in l1 and l2. We consider the case of approximate nearest neighbors in l2d. We provide a faster algorithm using classical projections, which we then further speed up by plugging in the FJLT. We also give a faster algorithm for searching over the hypercube.", "year": 2006, "venue": "Symposium on the Theory of Computing", "authors": [ "Nir Ailon", "B. Chazelle" ], "externalIds": { "MAG": "2152402969", "DBLP": "conf/stoc/AilonC06", "DOI": "10.1145/1132516.1132597", "CorpusId": 490517 }, "url": "https://www.semanticscholar.org/paper/b4d6a34f9d25b7a7ca665087ad7cb82f58d89d51", "referenceCount": 37, "citationCount": 523, "influentialCitationCount": 76, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Fast Curvature Matrix-Vector Products for Second-Order Gradient Descent", "abstract": "We propose a generic method for iteratively approximating various second-order gradient steps-Newton, Gauss-Newton, Levenberg-Marquardt, and natural gradient-in linear time per iteration, using special curvature matrix-vector products that can be computed in O(n). Two recent acceleration techniques for on-line learning, matrix momentum and stochastic meta-descent (SMD), implement this approach. Since both were originally derived by very different routes, this offers fresh insight into their operation, resulting in further improvements to SMD.", "year": 2002, "venue": "Neural Computation", "authors": [ "N. Schraudolph" ], "externalIds": { "DBLP": "journals/neco/Schraudolph02", "MAG": "2130984546", "DOI": "10.1162/08997660260028683", "CorpusId": 11017566, "PubMed": "12079553" }, "url": "https://www.semanticscholar.org/paper/ffa94bba647817fa5e8f8d3250fc977435b5ca76", "referenceCount": 33, "citationCount": 358, "influentialCitationCount": 23, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science", "Medicine" ] }, { "title": "On the limited memory BFGS method for large scale optimization", "abstract": null, "year": 1989, "venue": "Mathematical programming", "authors": [ "Dong C. Liu", "J. Nocedal" ], "externalIds": { "DBLP": "journals/mp/LiuN89", "MAG": "2051434435", "DOI": "10.1007/BF01589116", "CorpusId": 5681609 }, "url": "https://www.semanticscholar.org/paper/1267fe36b5ece49a9d8f913eb67716a040bbcced", "referenceCount": 44, "citationCount": 7298, "influentialCitationCount": 606, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Numerical methods for unconstrained optimization and nonlinear equations", "abstract": "Preface 1. Introduction. Problems to be considered Characteristics of 'real-world' problems Finite-precision arithmetic and measurement of error Exercises 2. Nonlinear Problems in One Variable. What is not possible Newton's method for solving one equation in one unknown Convergence of sequences of real numbers Convergence of Newton's method Globally convergent methods for solving one equation in one uknown Methods when derivatives are unavailable Minimization of a function of one variable Exercises 3. Numerical Linear Algebra Background. Vector and matrix norms and orthogonality Solving systems of linear equations-matrix factorizations Errors in solving linear systems Updating matrix factorizations Eigenvalues and positive definiteness Linear least squares Exercises 4. Multivariable Calculus Background Derivatives and multivariable models Multivariable finite-difference derivatives Necessary and sufficient conditions for unconstrained minimization Exercises 5. Newton's Method for Nonlinear Equations and Unconstrained Minimization. Newton's method for systems of nonlinear equations Local convergence of Newton's method The Kantorovich and contractive mapping theorems Finite-difference derivative methods for systems of nonlinear equations Newton's method for unconstrained minimization Finite difference derivative methods for unconstrained minimization Exercises 6. Globally Convergent Modifications of Newton's Method. The quasi-Newton framework Descent directions Line searches The model-trust region approach Global methods for systems of nonlinear equations Exercises 7. Stopping, Scaling, and Testing. Scaling Stopping criteria Testing Exercises 8. Secant Methods for Systems of Nonlinear Equations. Broyden's method Local convergence analysis of Broyden's method Implementation of quasi-Newton algorithms using Broyden's update Other secant updates for nonlinear equations Exercises 9. Secant Methods for Unconstrained Minimization. The symmetric secant update of Powell Symmetric positive definite secant updates Local convergence of positive definite secant methods Implementation of quasi-Newton algorithms using the positive definite secant update Another convergence result for the positive definite secant method Other secant updates for unconstrained minimization Exercises 10. Nonlinear Least Squares. The nonlinear least-squares problem Gauss-Newton-type methods Full Newton-type methods Other considerations in solving nonlinear least-squares problems Exercises 11. Methods for Problems with Special Structure. The sparse finite-difference Newton method Sparse secant methods Deriving least-change secant updates Analyzing least-change secant methods Exercises Appendix A. A Modular System of Algorithms for Unconstrained Minimization and Nonlinear Equations (by Robert Schnabel) Appendix B. Test Problems (by Robert Schnabel) References Author Index Subject Index.", "year": 1983, "venue": "Prentice Hall series in computational mathematics", "authors": [ "J. Dennis", "Bobby Schnabel" ], "externalIds": { "DBLP": "books/daglib/0072307", "MAG": "2068484625", "DOI": "10.2307/2288097", "CorpusId": 27578127 }, "url": "https://www.semanticscholar.org/paper/e1053197256c6c3c0631377ec23a3f7dc1cb4781", "referenceCount": 1, "citationCount": 8028, "influentialCitationCount": 548, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "INEXACT NEWTON METHODS", "abstract": "A classical algorithm for solving the system of nonlinear equations $F(x) = 0$ is Newton’s method \\[ x_{k + 1} = x_k + s_k ,\\quad {\\text{where }}F'(x_k )s_k = - F(x_k ),\\quad x_0 {\\text{ given}}.\\]...", "year": 1982, "venue": "", "authors": [ "R. Dembo", "S. Eisenstat", "T. Steihaug" ], "externalIds": { "MAG": "2107501462", "DOI": "10.1137/0719025", "CorpusId": 122513309 }, "url": "https://www.semanticscholar.org/paper/8e9ef67f33ee5d027cd29b317cb0ee96df2b6f7a", "referenceCount": 12, "citationCount": 1642, "influentialCitationCount": 140, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "The Convergence of a Class of Double-rank Minimization Algorithms 1. General Considerations", "abstract": "This paper presents a more detailed analysis of a class of minimization algorithms, which includes as a special case the DFP (Davidon-Fletcher-Powell) method, than has previously appeared. Only quadratic functions are considered but particular attention is paid to the magnitude of successive errors and their dependence upon the initial matrix. On the basis of this a possible explanation of some of the observed characteristics of the class is tentatively suggested. PROBABLY the best-known algorithm for determining the unconstrained minimum of a function of many variables, where explicit expressions are available for the first partial derivatives, is that of Davidon (1959) as modified by Fletcher & Powell (1963). This algorithm has many virtues. It is simple and does not require at any stage the solution of linear equations. It minimizes a quadratic function exactly in a finite number of steps and this property makes convergence of this algorithm rapid, when applied to more general functions, in the neighbourhood of the solution. It is, at least in theory, stable since the iteration matrix H,, which transforms the jth gradient into the /th step direction, may be shown to be positive definite. In practice the algorithm has been generally successful, but it has exhibited some puzzling behaviour. Broyden (1967) noted that H, does not always remain positive definite, and attributed this to rounding errors. Pearson (1968) found that for some problems the solution was obtained more efficiently if H, was reset to a positive definite matrix, often the unit matrix, at intervals during the computation. Bard (1968) noted that H, could become singular, attributed this to rounding error and suggested the use of suitably chosen scaling factors as a remedy. In this paper we analyse the more general algorithm given by Broyden (1967), of which the DFP algorithm is a special case, and determine how for quadratic functions the choice of an arbitrary parameter affects convergence. We investigate how the successive errors depend, again for quadratic functions, upon the initial choice of iteration matrix paying particular attention to the cases where this is either the unit matrix or a good approximation to the inverse Hessian. We finally give a tentative explanation of some of the observed experimental behaviour in the case where the function to be minimized is not quadratic.", "year": 1970, "venue": "", "authors": [ "C. G. Broyden" ], "externalIds": { "MAG": "2989128237", "DOI": "10.1093/IMAMAT/6.1.76", "CorpusId": 53868113 }, "url": "https://www.semanticscholar.org/paper/6583e799ce560a21d5fc8bf726a9fa4accbf036b", "referenceCount": 0, "citationCount": 2519, "influentialCitationCount": 55, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Optimal Convergence Rates for Distributed Nystroem Approximation", "abstract": "The distributed kernel ridge regression (DKRR) has shown great potential in processing complicated tasks. However, DKRR only made use of the local samples that failed to capture the global characteristics. Besides, the existing optimal learning guarantees were provided in expectation and only pertain to the attainable case that the target regression lies exactly in the kernel space. In this paper, we propose distributed learning with globally-shared Nystr¨om centers ( DNystr¨om ), which utilizes global information across the local clients. We also study the statistical properties of DNystr¨om in expectation and in probability, respectively, and obtain several state-of-the-art results with the minimax optimal learning rates. Note that, the optimal convergence rates for DNystr¨om pertain to the non-attainable case, while the statistical results allow more partitions and require fewer Nystr¨om centers. Finally, we conduct experiments on several real-world datasets to validate the effectiveness of the proposed algorithm, and the empirical results coincide with our theoretical findings.", "year": 2023, "venue": "Journal of machine learning research", "authors": [ "Jian Li", "Yong Liu", "Weiping Wang" ], "externalIds": { "DBLP": "journals/jmlr/00400023", "CorpusId": 259149895 }, "url": "https://www.semanticscholar.org/paper/05a1727ae8c4eb2122cf5f05addc7c4d5be63696", "referenceCount": 51, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Federated Learning for Non-IID Data: From Theory to Algorithm", "abstract": null, "year": 2021, "venue": "Pacific Rim International Conference on Artificial Intelligence", "authors": [ "Bojian Wei", "Jian Li", "Yong Liu", "Weiping Wang" ], "externalIds": { "DBLP": "conf/pricai/WeiLLW21", "DOI": "10.1007/978-3-030-89188-6_3", "CorpusId": 239838392 }, "url": "https://www.semanticscholar.org/paper/38c78e69f9b0865ca786c7b4e8ffc9e71530e81d", "referenceCount": 41, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A method for solving the convex programming problem with convergence rate O(1/k^2)", "abstract": null, "year": 1983, "venue": "", "authors": [ "Y. Nesterov" ], "externalIds": { "MAG": "2936995161", "CorpusId": 145918791 }, "url": "https://www.semanticscholar.org/paper/8d3a318b62d2e970122da35b2a2e70a5d12cc16f", "referenceCount": 0, "citationCount": 4031, "influentialCitationCount": 522, "isOpenAccess": false, "fieldsOfStudy": [ "Biology" ] } ] }, "Robust Federated Learning Over the Air: Combating Heavy-Tailed Noise with Median Anchored Clipping": { "paper_title": "Robust Federated Learning Over the Air: Combating Heavy-Tailed Noise with Median Anchored Clipping", "arxiv_id": "2409.15100v1", "keyword": "federate learning", "authors": [ "Jiaxing Li", "Zihan Chen", "Kai Fong Ernest Chong", "Bikramjit Das", "Tony Q. S. Quek", "Howard H. Yang" ], "references": [ { "title": "Over-The-Air Federated Learning: Status Quo, Open Challenges, and Future Directions", "abstract": "The development of applications based on artificial intelligence and implemented over wireless networks is increasingly rapidly and is expected to grow dramatically in the future. The resulting demand for the aggregation of large amounts of data has caused serious communication bottlenecks in wireless networks and particularly at the network edge. Over-the-air federated learning (OTA-FL), leveraging the superposition feature of multi-access channels (MACs), enables users at the network edge to share spectrum resources and achieves efficient and low-latency global model aggregation. This paper provides a holistic review of progress in OTA-FL and points to potential future research directions. Specifically, we classify OTA-FL from the perspective of system settings, including single-antenna OTA-FL, multi-antenna OTA-FL, and OTA-FL with the aid of the emerging reconfigurable intelligent surface (RIS) technology, and the contributions of existing works in these areas are summarized. Moreover, we discuss the trust, security and privacy aspects of OTA-FL, and highlight concerns arising from security and privacy. Finally, challenges and potential research directions are discussed to promote the future development of OTA-FL in terms of improving system performance, reliability, and trustworthiness. Specifical challenges to be addressed include model distortion under channel fading, the ineffective OTA aggregation of local models trained on substantially unbalanced data, and the limited accessibility and verifiability of individual local models.", "year": 2023, "venue": "Fundamental Research", "authors": [ "Bingnan Xiao", "Xichen Yu", "Wei Ni", "Xin Wang", "H. Poor" ], "externalIds": { "ArXiv": "2307.00974", "DBLP": "journals/corr/abs-2307-00974", "DOI": "10.48550/arXiv.2307.00974", "CorpusId": 259316855 }, "url": "https://www.semanticscholar.org/paper/23fdf74a3ff55279b814173bbe53030838ce29b9", "referenceCount": 122, "citationCount": 12, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Engineering", "Computer Science" ] }, { "title": "Edge Intelligence Over the Air: Two Faces of Interference in Federated Learning", "abstract": "Federated edge learning is envisioned as the bedrock of enabling intelligence in next-generation wireless networks, but the limited spectral resources often constrain its scalability. In light of this challenge, a line of recent research suggested integrating analog over-the-air computations into federated edge learning systems, to exploit the superposition property of electromagnetic waves for fast aggregation of intermediate parameters and achieve (almost) unlimited scalability. Over-the-air computations also benefit the system in other aspects, such as low hardware cost, reduced access latency, and enhanced privacy protection. Despite these advantages, the interference introduced by wireless communications also influences various aspects of the model training process, while its importance is not well recognized yet. This article provides a comprehensive overview of the positive and negative effects of interference on over-the-air computation-based edge learning systems. The potential open issues and research trends are also discussed.", "year": 2023, "venue": "IEEE Communications Magazine", "authors": [ "Zihan Chen", "Howard H. Yang", "Tony Q. S. Quek" ], "externalIds": { "DBLP": "journals/cm/ChenYQ23", "ArXiv": "2306.10299", "DOI": "10.1109/MCOM.004.2200913", "CorpusId": 259203417 }, "url": "https://www.semanticscholar.org/paper/f874b26130e50c324f6f91935c37436e73a9d688", "referenceCount": 22, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Over-the-Air Consensus for Distributed Vehicle Platooning Control", "abstract": "A distributed control of vehicle platooning is referred to as distributed consensus (DC) since many autonomous vehicles (AVs) reach a consensus to move as one body with the same velocity and inter-distance. For DC control to be stable, other AVs' real-time position information should be inputted to each AV's controller via vehicle-to-vehicle (V2V) communications. On the other hand, too many V2V links should be simultaneously established and frequently retrained, causing frequent packet loss and longer communication latency. We propose a novel DC algorithm called over-the-air consensus (AirCons), a joint communication-and-control design with two key features to overcome the above limitations. First, exploiting a wireless signal's superposition and broadcasting properties renders all AVs' signals to converge to a specific value proportional to participating AVs' average position without individual V2V channel information. Second, the estimated average position is used to control each AV's dynamics instead of each AV's individual position. Through analytic and numerical studies, the effectiveness of the proposed AirCons designed on the state-of-the-art New Radio architecture is verified by showing a 14.22% control gain compared to the benchmark without the average position.", "year": 2022, "venue": "ICC 2023 - IEEE International Conference on Communications", "authors": [ "Jihoon Lee", "Yonghoon Jang", "Hansol Kim", "Seong-Lyun Kim", "Seung-Woo Ko" ], "externalIds": { "ArXiv": "2211.06225", "DBLP": "conf/icc/LeeJKKK23", "DOI": "10.1109/ICC45041.2023.10279484", "CorpusId": 253499281 }, "url": "https://www.semanticscholar.org/paper/632f3d188755145398a5c2cac7fd3d05bbe2da1d", "referenceCount": 17, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "A Survey on Over-the-Air Computation", "abstract": "Communication and computation are often viewed as separate tasks. This approach is very effective from the perspective of engineering as isolated optimizations can be performed. However, for many computation-oriented applications, the main interest is a function of the local information at the devices, rather than the local information itself. In such scenarios, information theoretical results show that harnessing the interference in a multiple access channel for computation, i.e., over-the-air computation (OAC), can provide a significantly higher achievable computation rate than separating communication and computation tasks. Moreover, the gap between OAC and separation in terms of computation rate increases with more participating nodes. Given this motivation, in this study, we provide a comprehensive survey on practical OAC methods. After outlining fundamentals related to OAC, we discuss the available OAC schemes with their pros and cons. We provide an overview of the enabling mechanisms for achieving reliable computation in the wireless channel. Finally, we summarize the potential applications of OAC and point out some future directions.", "year": 2022, "venue": "IEEE Communications Surveys and Tutorials", "authors": [ "Alphan Șahin", "Rui Yang" ], "externalIds": { "DBLP": "journals/comsur/SahinY23", "ArXiv": "2210.11350", "DOI": "10.1109/COMST.2023.3264649", "CorpusId": 253018673 }, "url": "https://www.semanticscholar.org/paper/2677d97764051f005b55ad931cd15381038f3b3b", "referenceCount": 267, "citationCount": 52, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "Taming Fat-Tailed (\"Heavier-Tailed\" with Potentially Infinite Variance) Noise in Federated Learning", "abstract": "A key assumption in most existing works on FL algorithms' convergence analysis is that the noise in stochastic first-order information has a finite variance. Although this assumption covers all light-tailed (i.e., sub-exponential) and some heavy-tailed noise distributions (e.g., log-normal, Weibull, and some Pareto distributions), it fails for many fat-tailed noise distributions (i.e., ``heavier-tailed'' with potentially infinite variance) that have been empirically observed in the FL literature. To date, it remains unclear whether one can design convergent algorithms for FL systems that experience fat-tailed noise. This motivates us to fill this gap in this paper by proposing an algorithmic framework called FAT-Clipping (\\ul{f}ederated \\ul{a}veraging with \\ul{t}wo-sided learning rates and \\ul{clipping}), which contains two variants: FAT-Clipping per-round (FAT-Clipping-PR) and FAT-Clipping per-iteration (FAT-Clipping-PI). Specifically, for the largest $\\alpha \\in (1,2]$ such that the fat-tailed noise in FL still has a bounded $\\alpha$-moment, we show that both variants achieve $\\mathcal{O}((mT)^{\\frac{2-\\alpha}{\\alpha}})$ and $\\mathcal{O}((mT)^{\\frac{1-\\alpha}{3\\alpha-2}})$ convergence rates in the strongly-convex and general non-convex settings, respectively, where $m$ and $T$ are the numbers of clients and communication rounds. Moreover, at the expense of more clipping operations compared to FAT-Clipping-PR, FAT-Clipping-PI further enjoys a linear speedup effect with respect to the number of local updates at each client and being lower-bound-matching (i.e., order-optimal). Collectively, our results advance the understanding of designing efficient algorithms for FL systems that exhibit fat-tailed first-order oracle information.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Haibo Yang", "Pei-Yuan Qiu", "Jia Liu" ], "externalIds": { "DBLP": "journals/corr/abs-2210-00690", "ArXiv": "2210.00690", "DOI": "10.48550/arXiv.2210.00690", "CorpusId": 252683656 }, "url": "https://www.semanticscholar.org/paper/3ec3e525a760708a691cff24047d3ef7c051bb7d", "referenceCount": 50, "citationCount": 5, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Bandwidth Expansion for Over-the-Air Computation with One-Sided CSI", "abstract": "We consider a distributed computation problem over a multiple access channel (MAC), with N devices. It is known that over-the-air computation (OAC) can provide significant gains for this problem, but existing works are limited to the scenario with matched source and channel bandwidths. We propose OAC schemes for block-fading MACs that modulate the source to fit the available channel bandwidth in a wideband channel, while having channel state information (CSI) only at the transmitter or the receiver. Our results show that the proposed OAC scheme outperforms even ideal capacity-achieving digital schemes when the CSI is available only at the transmitter, and the distortion does not scale with the number of participating devices. We demonstrate the effectiveness of our proposed scheme in federated edge learning (FEEL), where OAC is used to aggregate model updates from the participating devices.", "year": 2022, "venue": "International Symposium on Information Theory", "authors": [ "N. Mital", "Deniz Gündüz" ], "externalIds": { "DBLP": "conf/isit/MitalG22", "DOI": "10.1109/ISIT50566.2022.9834270", "CorpusId": 251324213 }, "url": "https://www.semanticscholar.org/paper/497633aa736d1896fb59a4538dbbfa0a0a8f8d55", "referenceCount": 18, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Resource Consumption for Supporting Federated Learning in Wireless Networks", "abstract": "Federated learning (FL) has recently become one of the hottest focuses in wireless edge networks with the ever-increasing computing capability of user equipment (UE). In FL, UEs train local machine learning models and transmit them to an aggregator, where a global model is formed and then sent back to UEs. In wireless networks, local training and model transmission can be unsuccessful due to constrained computing resources, wireless channel impairments, bandwidth limitations, etc., which degrades FL performance in model accuracy and/or training time. Moreover, we need to quantify the benefits and cost of deploying edge intelligence, as model training and transmission consume certain amount of resources. Therefore, it is imperative to deeply understand the relationship between FL performance and multiple-dimensional resources. In this paper, we construct an analytical model to investigate the relationship between the FL model accuracy and consumed resources in FL empowered wireless edge networks. Based on the analytical model, we explicitly quantify the model accuracy, available computing resources and communication resources. Numerical results validate the effectiveness of our theoretical modeling and analysis, and demonstrate the trade-off between the communication and computing resources for achieving a certain model accuracy.", "year": 2022, "venue": "IEEE Transactions on Wireless Communications", "authors": [ "Yijing Liu", "Shuang Qin", "Yao Sun", "G. Feng" ], "externalIds": { "DBLP": "journals/twc/LiuQSF22", "ArXiv": "2204.03850", "DOI": "10.1109/TWC.2022.3181611", "CorpusId": 248069263 }, "url": "https://www.semanticscholar.org/paper/5021969846185cb03ed3e742c7e9e36be6dc2fa5", "referenceCount": 39, "citationCount": 13, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Federated Learning Based on Dynamic Regularization", "abstract": "We propose a novel federated learning method for distributively training neural network models, where the server orchestrates cooperation between a subset of randomly chosen devices in each round. We view Federated Learning problem primarily from a communication perspective and allow more device level computations to save transmission costs. We point out a fundamental dilemma, in that the minima of the local-device level empirical loss are inconsistent with those of the global empirical loss. Different from recent prior works, that either attempt inexact minimization or utilize devices for parallelizing gradient computation, we propose a dynamic regularizer for each device at each round, so that in the limit the global and device solutions are aligned. We demonstrate both through empirical results on real and synthetic data as well as analytical results that our scheme leads to efficient training, in both convex and non-convex settings, while being fully agnostic to device heterogeneity and robust to large number of devices, partial participation and unbalanced data.", "year": 2021, "venue": "International Conference on Learning Representations", "authors": [ "D. A. E. Acar", "Yue Zhao", "Ramon Matas Navarro", "Matthew Mattina", "P. Whatmough", "Venkatesh Saligrama" ], "externalIds": { "DBLP": "journals/corr/abs-2111-04263", "ArXiv": "2111.04263", "CorpusId": 235614315 }, "url": "https://www.semanticscholar.org/paper/5a3d70689925df014c46d1cd50dfc8a368cb4c86", "referenceCount": 42, "citationCount": 594, "influentialCitationCount": 110, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Revisiting Analog Over-the-Air Machine Learning: The Blessing and Curse of Interference", "abstract": "We study a distributed machine learning problem carried out by an edge server and multiple agents in a wireless network. The objective is to minimize a global function that is a sum of the agents’ local loss functions. And the optimization is conducted by analog over-the-air model training. Specifically, each agent modulates its local gradient onto a set of waveforms and transmits to the edge server simultaneously. From the received analog signal the edge server extracts a noisy aggregated gradient which is distorted by the channel fading and interference, and uses it to update the global model and feedbacks to all the agents for another round of local computing. Since the electromagnetic interference generally exhibits a heavy-tailed intrinsic, we use the $\\alpha$-stable distribution to model its statistic. In consequence, the global gradient has an infinite variance that hinders the use of conventional techniques for convergence analysis that rely on second-order moments’ existence. To circumvent this challenge, we take a new route to establish the analysis of convergence rate, as well as generalization error, of the algorithm. We also show that the training algorithm can be run in tandem with the momentum scheme to accelerate the convergence. Our analyses reveal a two-sided effect of the interference on the overall training procedure. On the negative side, heavy tail noise slows down the convergence rate of the model training: the heavier the tail in the distribution of interference, the slower the algorithm converges. On the positive side, heavy tail noise has the potential to increase the generalization power of the trained model: the heavier the tail, the better the model generalizes. This perhaps counterintuitive conclusion implies that the prevailing thinking on interference – that it is only detrimental to the edge learning system – is outdated and we shall seek new techniques that exploit, rather than simply mitigate, the interference for better machine learning in wireless networks.", "year": 2021, "venue": "IEEE Journal on Selected Topics in Signal Processing", "authors": [ "H. Yang", "Zihan Chen", "Tony Q. S. Quek", "H. Poor" ], "externalIds": { "ArXiv": "2107.11733", "DBLP": "journals/corr/abs-2107-11733", "DOI": "10.1109/jstsp.2021.3139231", "CorpusId": 236429017 }, "url": "https://www.semanticscholar.org/paper/7f295726a3ad2ed2c12db5037a241678c29d2793", "referenceCount": 37, "citationCount": 31, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Turning Channel Noise into an Accelerator for Over-the-Air Principal Component Analysis", "abstract": "The enormous data distributed at the network edge and ubiquitous connectivity have led to the emergence of the new paradigm of distributed machine learning and large-scale data analytics. Distributed principal component analysis (PCA) concerns finding a low-dimensional subspace that contains the most important information of high-dimensional data distributed over the network edge. The subspace is useful for distributed data compression and feature extraction. This work advocates the application of over-the-air federated learning to efficient implementation of distributed PCA in a wireless network under a data-privacy constraint, termed AirPCA. The design features the exploitation of the waveform-superposition property of a multi-access channel to realize over-the-air aggregation of local subspace updates computed and simultaneously transmitted by devices to a server, thereby reducing the multi-access latency. The original drawback of this class of techniques, namely channel-noise perturbation to uncoded analog modulated signals, is turned into a mechanism for escaping from saddle points during stochastic gradient descent (SGD) in the AirPCA algorithm. As a result, the convergence of the AirPCA algorithm is accelerated. To materialize the idea, descent speeds in different types of descent regions are analyzed mathematically using martingale theory by accounting for wireless propagation and techniques including broadband transmission, over-the-air aggregation, channel fading and noise. The results reveal the accelerating effect of noise in saddle regions and the opposite effect in other types of regions. The insight and results are applied to designing an online scheme for adapting receive signal power to the type of current descent region. Specifically, the scheme amplifies the noise effects in saddle regions by reducing signal power and applies the power savings to suppressing the effects in other regions. From experiments using real datasets, such power control is found to accelerate convergence while achieving the same convergence accuracy as in the ideal case of centralized PCA.", "year": 2021, "venue": "IEEE Transactions on Wireless Communications", "authors": [ "Zezhong Zhang", "Guangxu Zhu", "Rui Wang", "V. Lau", "Kaibin Huang" ], "externalIds": { "DBLP": "journals/corr/abs-2104-10095", "ArXiv": "2104.10095", "DOI": "10.1109/twc.2022.3162868", "CorpusId": 233306967 }, "url": "https://www.semanticscholar.org/paper/45fd79ab82656f6791533f6aa3f16031d92a2e3e", "referenceCount": 42, "citationCount": 27, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "Analog Gradient Aggregation for Federated Learning Over Wireless Networks: Customized Design and Convergence Analysis", "abstract": "This article investigates the analog gradient aggregation (AGA) solution to overcome the communication bottleneck for wireless federated learning applications by exploiting the idea of analog over-the-air transmission. Despite the various advantages, this special transmission solution also brings new challenges to both transceiver design and learning algorithm design due to the nonstationary local gradients and the time-varying wireless channels in different communication rounds. To address these issues, we propose a novel design of both the transceiver and learning algorithm for the AGA solution. In particular, the parameters in the transceiver are optimized with the consideration of the nonstationarity in the local gradients based on a simple feedback variable. Moreover, a novel learning rate design is proposed for the stochastic gradient descent algorithm, which is adaptive to the quality of the gradient estimation. Theoretical analyses are provided on the convergence rate of the proposed AGA solution. Finally, the effectiveness of the proposed solution is confirmed by two separate experiments based on linear regression and the shallow neural network. The simulation results verify that the proposed solution outperforms various state-of-the-art baseline schemes with a much faster convergence speed.", "year": 2021, "venue": "IEEE Internet of Things Journal", "authors": [ "Huayan Guo", "An Liu", "V. Lau" ], "externalIds": { "DBLP": "journals/iotj/GuoLL21", "MAG": "3036822879", "DOI": "10.1109/JIOT.2020.3002925", "CorpusId": 226544825 }, "url": "https://www.semanticscholar.org/paper/50e6fe1747997c278ca18e5c26b5053a117a19bf", "referenceCount": 40, "citationCount": 90, "influentialCitationCount": 8, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Over-the-Air Computing for Wireless Data Aggregation in Massive IoT", "abstract": "Wireless data aggregation (WDA), referring to aggregating data distributed at devices (e.g., sensors and smartphones), is a common operation in 5G-and-beyond machine-type communications to support the Internet of Things (IoT), which lays the foundation for diversified applications such as distributed sensing, learning, and control. Conventional WDA techniques, which are designed in a task-agnostic manner separating communication and computation, encounter difficulty in accommodating the massive access under the limited radio resource and stringent latency constraints imposed by emerging applications or tasks (e.g, auto-driving). To address this issue, over-the-air computation (AirComp) is being developed as a new task-oriented solution for WDA by seamlessly integrating communication and computation. By exploiting the waveform superposition property of multiple access channels, AirComp turns the air into a computer for computing and communicating functions of distributed data at many devices, thereby allowing low-latency WDA over massive IoT networks. In view of growing interest in Air-Comp, this article provides a timely overview of the technology by introducing basic principles, discussing advanced techniques and applications, and identifying promising research opportunities.", "year": 2020, "venue": "IEEE wireless communications", "authors": [ "Guangxu Zhu", "Jie Xu", "Kaibin Huang", "Shuguang Cui" ], "externalIds": { "DBLP": "journals/wc/ZhuXHC21", "MAG": "3103139084", "DOI": "10.1109/MWC.011.2000467", "CorpusId": 226976531 }, "url": "https://www.semanticscholar.org/paper/2c75d11ef5798b68979469ccbc8685b0414fdec4", "referenceCount": 15, "citationCount": 143, "influentialCitationCount": 9, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Over-the-Air Computation in Correlated Channels", "abstract": "Over-the-Air (OTA) computation is the problem of computing functions of distributed data without transmitting the entirety of the data to a central point. By avoiding such costly transmissions, OTA computation schemes can achieve a better-than-linear (depending on the function, often logarithmic or even constant) scaling of the communication cost as the number of transmitters grows. In this work, we propose and analyze an analog OTA computation scheme for a class of functions that contains linear functions as well as some nonlinear functions such as $p$-norms of vectors. We prove error bounds that are valid for fast-fading channels and all distributions of fading and noise in the class of sub-Gaussian distributions. This class includes Gaussian distributions, but also many other practically relevant cases such as Class A Middleton noise and fading with dominant line-of-sight components. Moreover, there can be correlations in the fading and noise so that the presented results also apply to, for example, block fading channels and channels with bursty interference. There is no assumption that the distributed function arguments follow a particular probability law; in particular, they do not need to be independent or identically distributed. Our analysis is nonasymptotic and therefore provides error bounds that are valid for a finite number of channel uses. OTA computation has a huge potential for reducing communication cost in applications such as Machine Learning (ML)-based distributed anomaly detection in large wireless sensor networks. We illustrate this potential through extensive numerical simulations.", "year": 2020, "venue": "IEEE Transactions on Signal Processing", "authors": [ "M. Frey", "I. Bjelakovic", "S. Stańczak" ], "externalIds": { "DBLP": "journals/tsp/FreyBS21", "MAG": "3195068536", "ArXiv": "2101.04690", "DOI": "10.1109/TSP.2021.3106115", "CorpusId": 227049227 }, "url": "https://www.semanticscholar.org/paper/c59c256b16b02532de3e2c8586d0cfa2bce0b415", "referenceCount": 62, "citationCount": 21, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "Experimental Evidence for Heavy Tailed Interference in the IoT", "abstract": "5G and beyond sees an ever increasing density of connected things. As not all devices are coordinated, there are limited opportunities to mitigate interference. As such, it is crucial to characterize the interference in order to understand its impact on coding, waveform and receiver design. While a number of theoretical models have been developed for the interference statistics in communications for the IoT, there is very little experimental validation. In this letter, we address this key gap in understanding by performing statistical analysis on recent measurements in the unlicensed 863 MHz to 870 MHz band in different regions of Aalborg, Denmark. In particular, we show that the measurement data suggests the distribution of the interference power is heavy tailed, confirming predictions from theoretical models.", "year": 2020, "venue": "IEEE Communications Letters", "authors": [ "L. Clavier", "T. Pedersen", "I. Larrad", "Mads Lauridsen", "Malcolm Egan" ], "externalIds": { "MAG": "3047717353", "DBLP": "journals/icl/ClavierPLLE21", "DOI": "10.1109/LCOMM.2020.3034430", "CorpusId": 216123537 }, "url": "https://www.semanticscholar.org/paper/27d42a0c605ab0681c7e77437ed6caae9519273d", "referenceCount": 17, "citationCount": 26, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Threats to Federated Learning: A Survey", "abstract": "With the emergence of data silos and popular privacy awareness, the traditional centralized approach of training artificial intelligence (AI) models is facing strong challenges. Federated learning (FL) has recently emerged as a promising solution under this new reality. Existing FL protocol design has been shown to exhibit vulnerabilities which can be exploited by adversaries both within and without the system to compromise data privacy. It is thus of paramount importance to make FL system designers to be aware of the implications of future FL algorithm design on privacy-preservation. Currently, there is no survey on this topic. In this paper, we bridge this important gap in FL literature. By providing a concise introduction to the concept of FL, and a unique taxonomy covering threat models and two major attacks on FL: 1) poisoning attacks and 2) inference attacks, this paper provides an accessible review of this important topic. We highlight the intuitions, key techniques as well as fundamental assumptions adopted by various attacks, and discuss promising future research directions towards more robust privacy preservation in FL.", "year": 2020, "venue": "arXiv.org", "authors": [ "Lingjuan Lyu", "Han Yu", "Qiang Yang" ], "externalIds": { "ArXiv": "2003.02133", "MAG": "3010262580", "DBLP": "journals/corr/abs-2003-02133", "CorpusId": 211990905 }, "url": "https://www.semanticscholar.org/paper/f3b684f3d2ddd29134c842f6d31664157703a089", "referenceCount": 46, "citationCount": 393, "influentialCitationCount": 21, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Measuring the Effects of Non-Identical Data Distribution for Federated Visual Classification", "abstract": "Federated Learning enables visual models to be trained in a privacy-preserving way using real-world data from mobile devices. Given their distributed nature, the statistics of the data across these devices is likely to differ significantly. In this work, we look at the effect such non-identical data distributions has on visual classification via Federated Learning. We propose a way to synthesize datasets with a continuous range of identicalness and provide performance measures for the Federated Averaging algorithm. We show that performance degrades as distributions differ more, and propose a mitigation strategy via server momentum. Experiments on CIFAR-10 demonstrate improved classification performance over a range of non-identicalness, with classification accuracy improved from 30.1% to 76.9% in the most skewed settings.", "year": 2019, "venue": "arXiv.org", "authors": [ "T. Hsu", "Qi", "Matthew Brown" ], "externalIds": { "ArXiv": "1909.06335", "MAG": "2972570881", "DBLP": "journals/corr/abs-1909-06335", "CorpusId": 202572978 }, "url": "https://www.semanticscholar.org/paper/46d8c9e2dc9c12615eb5f6813d18f967d61c7e0d", "referenceCount": 13, "citationCount": 909, "influentialCitationCount": 112, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Federated Learning: Challenges, Methods, and Future Directions", "abstract": "Federated learning involves training statistical models over remote devices or siloed data centers, such as mobile phones or hospitals, while keeping data localized. Training in heterogeneous and potentially massive networks introduces novel challenges that require a fundamental departure from standard approaches for large-scale machine learning, distributed optimization, and privacy-preserving data analysis. In this article, we discuss the unique characteristics and challenges of federated learning, provide a broad overview of current approaches, and outline several directions of future work that are relevant to a wide range of research communities.", "year": 2019, "venue": "IEEE Signal Processing Magazine", "authors": [ "Tian Li", "Anit Kumar Sahu", "Ameet Talwalkar", "Virginia Smith" ], "externalIds": { "DBLP": "journals/corr/abs-1908-07873", "ArXiv": "1908.07873", "MAG": "3103802018", "DOI": "10.1109/MSP.2020.2975749", "CorpusId": 201126242 }, "url": "https://www.semanticscholar.org/paper/49bdeb07b045dd77f0bfe2b44436608770235a23", "referenceCount": 156, "citationCount": 3706, "influentialCitationCount": 215, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "On Analog Gradient Descent Learning Over Multiple Access Fading Channels", "abstract": "We consider a distributed learning problem over multiple access channel (MAC) using a large wireless network. The computation is made by the network edge and is based on received data from a large number of distributed nodes which transmit over a noisy fading MAC. The objective function is a sum of the nodes’ local loss functions. This problem has attracted a growing interest in distributed sensing systems, and more recently in federated learning. We develop a novel Gradient-Based Multiple Access (GBMA) algorithm to solve the distributed learning problem over MAC. Specifically, the nodes transmit an analog function of the local gradient using common shaping waveforms and the network edge receives a superposition of the analog transmitted signals used for updating the estimate. GBMA does not require power control or beamforming to cancel the fading effect as in other algorithms, and operates directly with noisy distorted gradients. We analyze the performance of GBMA theoretically, and prove that it can approach the convergence rate of the centralized gradient descent (GD) algorithm in large networks. Specifically, we establish a finite-sample bound of the error for both convex and strongly convex loss functions with Lipschitz gradient. Furthermore, we provide energy scaling laws for approaching the centralized convergence rate as the number of nodes increases. Finally, experimental results support the theoretical findings, and demonstrate strong performance of GBMA using synthetic and real data.", "year": 2019, "venue": "IEEE Transactions on Signal Processing", "authors": [ "Tomer Sery", "Kobi Cohen" ], "externalIds": { "MAG": "3018183923", "DBLP": "journals/tsp/SeryC20", "ArXiv": "1908.07463", "DOI": "10.1109/TSP.2020.2989580", "CorpusId": 201106136 }, "url": "https://www.semanticscholar.org/paper/1dba0a0091768dd79fb91605053445da2f4c6246", "referenceCount": 61, "citationCount": 142, "influentialCitationCount": 20, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics", "Psychology" ] }, { "title": "Machine Learning at the Wireless Edge: Distributed Stochastic Gradient Descent Over-the-Air", "abstract": "We study collaborative machine learning at the wireless edge, where power and bandwidth-limited devices (workers), with limited local datasets, implement distributed stochastic gradient descent (DSGD) over-the-air with the help of a remote parameter server (PS). We consider a wireless multiple access channel (MAC) from the workers to the PS for communicating the local gradient estimates. We first introduce a digital DSGD (D-DSGD) scheme, assuming that the workers operate on the boundary of the MAC capacity region at each iteration of the DSGD algorithm, and digitize their estimates within the bit budget allowed by the employed power allocation. We then introduce an analog scheme, called A-DSGD, motivated by the additive nature of the wireless MAC, where the workers send their gradient estimates over the MAC through the available channel bandwidth without employing any digital code. Numerical results show that A-DSGD converges much faster than D-DSGD. The improvement is particularly compelling at low power and low bandwidth regimes. We also observe that the performance of A-DSGD improves with the number of workers, while D-DSGD deteriorates, limiting the ability of the latter in harnessing the computation power of many edge devices.", "year": 2019, "venue": "International Symposium on Information Theory", "authors": [ "M. Amiri", "Deniz Gündüz" ], "externalIds": { "MAG": "2975064142", "DBLP": "journals/corr/abs-1901-00844", "DOI": "10.1109/ISIT.2019.8849334", "CorpusId": 57375790 }, "url": "https://www.semanticscholar.org/paper/eeaedfddff1e377e4eabaeb0e45cab4e9e5d6f65", "referenceCount": 45, "citationCount": 469, "influentialCitationCount": 54, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Federated Learning via Over-the-Air Computation", "abstract": "The stringent requirements for low-latency and privacy of the emerging high-stake applications with intelligent devices such as drones and smart vehicles make the cloud computing inapplicable in these scenarios. Instead, edge machine learning becomes increasingly attractive for performing training and inference directly at network edges without sending data to a centralized data center. This stimulates a nascent field termed as federated learning for training a machine learning model on computation, storage, energy and bandwidth limited mobile devices in a distributed manner. To preserve data privacy and address the issues of unbalanced and non-IID data points across different devices, the federated averaging algorithm has been proposed for global model aggregation by computing the weighted average of locally updated model at each selected device. However, the limited communication bandwidth becomes the main bottleneck for aggregating the locally computed updates. We thus propose a novel over-the-air computation based approach for fast global model aggregation via exploring the superposition property of a wireless multiple-access channel. This is achieved by joint device selection and beamforming design, which is modeled as a sparse and low-rank optimization problem to support efficient algorithms design. To achieve this goal, we provide a difference-of-convex-functions (DC) representation for the sparse and low-rank function to enhance sparsity and accurately detect the fixed-rank constraint in the procedure of device selection. A DC algorithm is further developed to solve the resulting DC program with global convergence guarantees. The algorithmic advantages and admirable performance of the proposed methodologies are demonstrated through extensive numerical results.", "year": 2018, "venue": "IEEE Transactions on Wireless Communications", "authors": [ "Kai Yang", "Tao Jiang", "Yuanming Shi", "Z. Ding" ], "externalIds": { "ArXiv": "1812.11750", "MAG": "2908240230", "DBLP": "journals/corr/abs-1812-11750", "DOI": "10.1109/TWC.2019.2961673", "CorpusId": 57189445 }, "url": "https://www.semanticscholar.org/paper/957309ccfdb9d1263cd778c046c807ebee7aab1b", "referenceCount": 59, "citationCount": 788, "influentialCitationCount": 81, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "Broadband Analog Aggregation for Low-Latency Federated Edge Learning", "abstract": "To leverage rich data distributed at the network edge, a new machine-learning paradigm, called edge learning, has emerged where learning algorithms are deployed at the edge for providing intelligent services to mobile users. While computing speeds are advancing rapidly, the communication latency is becoming the bottleneck of fast edge learning. To address this issue, this work is focused on designing a low-latency multi-access scheme for edge learning. To this end, we consider a popular privacy-preserving framework, federated edge learning (FEEL), where a global AI-model at an edge-server is updated by aggregating (averaging) local models trained at edge devices. It is proposed that the updates simultaneously transmitted by devices over broadband channels should be analog aggregated “over-the-air” by exploiting the waveform-superposition property of a multi-access channel. Such broadband analog aggregation (BAA) results in dramatical communication-latency reduction compared with the conventional orthogonal access (i.e., OFDMA). In this work, the effects of BAA on learning performance are quantified targeting a single-cell random network. First, we derive two tradeoffs between communication-and-learning metrics, which are useful for network planning and optimization. The power control (“truncated channel inversion”) required for BAA results in a tradeoff between the update-reliability [as measured by the receive signal-to-noise ratio (SNR)] and the expected update-truncation ratio. Consider the scheduling of cell-interior devices to constrain path loss. This gives rise to the other tradeoff between the receive SNR and fraction of data exploited in learning. Next, the latency-reduction ratio of the proposed BAA with respect to the traditional OFDMA scheme is proved to scale almost linearly with the device population. Experiments based on a neural network and a real dataset are conducted for corroborating the theoretical results.", "year": 2018, "venue": "IEEE Transactions on Wireless Communications", "authors": [ "Guangxu Zhu", "Yong Wang", "Kaibin Huang" ], "externalIds": { "MAG": "2944099577", "DBLP": "journals/twc/ZhuWH20", "ArXiv": "1812.11494", "DOI": "10.1109/TWC.2019.2946245", "CorpusId": 58004591 }, "url": "https://www.semanticscholar.org/paper/43572a7cc087e388f7f312a0f2e17915682ff27c", "referenceCount": 45, "citationCount": 583, "influentialCitationCount": 86, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Wireless Network Intelligence at the Edge", "abstract": "Fueled by the availability of more data and computing power, recent breakthroughs in cloud-based machine learning (ML) have transformed every aspect of our lives from face recognition and medical diagnosis to natural language processing. However, classical ML exerts severe demands in terms of energy, memory, and computing resources, limiting their adoption for resource-constrained edge devices. The new breed of intelligent devices and high-stake applications (drones, augmented/virtual reality, autonomous systems, and so on) requires a novel paradigm change calling for distributed, low-latency and reliable ML at the wireless network edge (referred to as edge ML). In edge ML, training data are unevenly distributed over a large number of edge nodes, which have access to a tiny fraction of the data. Moreover, training and inference are carried out collectively over wireless links, where edge devices communicate and exchange their learned models (not their private data). In a first of its kind, this article explores the key building blocks of edge ML, different neural network architectural splits and their inherent tradeoffs, as well as theoretical and technical enablers stemming from a wide range of mathematical disciplines. Finally, several case studies pertaining to various high-stake applications are presented to demonstrate the effectiveness of edge ML in unlocking the full potential of 5G and beyond.", "year": 2018, "venue": "Proceedings of the IEEE", "authors": [ "Jihong Park", "S. Samarakoon", "M. Bennis", "M. Debbah" ], "externalIds": { "MAG": "2904648777", "DBLP": "journals/pieee/ParkSBD19", "ArXiv": "1812.02858", "DOI": "10.1109/JPROC.2019.2941458", "CorpusId": 54457410 }, "url": "https://www.semanticscholar.org/paper/7f7bb204806ed819b323953cf6ca04cc65a27698", "referenceCount": 212, "citationCount": 493, "influentialCitationCount": 9, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "LEAF: A Benchmark for Federated Settings", "abstract": "Modern federated networks, such as those comprised of wearable devices, mobile phones, or autonomous vehicles, generate massive amounts of data each day. This wealth of data can help to learn models that can improve the user experience on each device. However, the scale and heterogeneity of federated data presents new challenges in research areas such as federated learning, meta-learning, and multi-task learning. As the machine learning community begins to tackle these challenges, we are at a critical time to ensure that developments made in these areas are grounded with realistic benchmarks. To this end, we propose LEAF, a modular benchmarking framework for learning in federated settings. LEAF includes a suite of open-source federated datasets, a rigorous evaluation framework, and a set of reference implementations, all geared towards capturing the obstacles and intricacies of practical federated environments.", "year": 2018, "venue": "arXiv.org", "authors": [ "S. Caldas", "Peter Wu", "Tian Li", "Jakub Konecný", "H. B. McMahan", "Virginia Smith", "Ameet Talwalkar" ], "externalIds": { "ArXiv": "1812.01097", "DBLP": "journals/corr/abs-1812-01097", "MAG": "2902113386", "CorpusId": 53701546 }, "url": "https://www.semanticscholar.org/paper/8dcbcaaf337d7bd22e580f1bb7a795ed4bb604fd", "referenceCount": 41, "citationCount": 1207, "influentialCitationCount": 289, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Communication-Efficient Learning of Deep Networks from Decentralized Data", "abstract": "Modern mobile devices have access to a wealth of data suitable for learning models, which in turn can greatly improve the user experience on the device. For example, language models can improve speech recognition and text entry, and image models can automatically select good photos. However, this rich data is often privacy sensitive, large in quantity, or both, which may preclude logging to the data center and training there using conventional approaches. We advocate an alternative that leaves the training data distributed on the mobile devices, and learns a shared model by aggregating locally-computed updates. We term this decentralized approach Federated Learning. \nWe present a practical method for the federated learning of deep networks based on iterative model averaging, and conduct an extensive empirical evaluation, considering five different model architectures and four datasets. These experiments demonstrate the approach is robust to the unbalanced and non-IID data distributions that are a defining characteristic of this setting. Communication costs are the principal constraint, and we show a reduction in required communication rounds by 10-100x as compared to synchronized stochastic gradient descent.", "year": 2016, "venue": "International Conference on Artificial Intelligence and Statistics", "authors": [ "H. B. McMahan", "Eider Moore", "Daniel Ramage", "S. Hampson", "B. A. Y. Arcas" ], "externalIds": { "MAG": "2950745363", "DBLP": "conf/aistats/McMahanMRHA17", "ArXiv": "1602.05629", "CorpusId": 14955348 }, "url": "https://www.semanticscholar.org/paper/d1dbf643447405984eeef098b1b320dee0b3b8a7", "referenceCount": 50, "citationCount": 13806, "influentialCitationCount": 3346, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep Residual Learning for Image Recognition", "abstract": "Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. We provide comprehensive empirical evidence showing that these residual networks are easier to optimize, and can gain accuracy from considerably increased depth. On the ImageNet dataset we evaluate residual nets with a depth of up to 152 layers - 8× deeper than VGG nets [40] but still having lower complexity. An ensemble of these residual nets achieves 3.57% error on the ImageNet test set. This result won the 1st place on the ILSVRC 2015 classification task. We also present analysis on CIFAR-10 with 100 and 1000 layers. The depth of representations is of central importance for many visual recognition tasks. Solely due to our extremely deep representations, we obtain a 28% relative improvement on the COCO object detection dataset. Deep residual nets are foundations of our submissions to ILSVRC & COCO 2015 competitions1, where we also won the 1st places on the tasks of ImageNet detection, ImageNet localization, COCO detection, and COCO segmentation.", "year": 2015, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Kaiming He", "X. Zhang", "Shaoqing Ren", "Jian Sun" ], "externalIds": { "DBLP": "conf/cvpr/HeZRS16", "MAG": "2949650786", "ArXiv": "1512.03385", "DOI": "10.1109/cvpr.2016.90", "CorpusId": 206594692 }, "url": "https://www.semanticscholar.org/paper/2c03df8b48bf3fa39054345bafabfeff15bfd11d", "referenceCount": 53, "citationCount": 172713, "influentialCitationCount": 28229, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "On the difficulty of training recurrent neural networks", "abstract": "There are two widely known issues with properly training recurrent neural networks, the vanishing and the exploding gradient problems detailed in Bengio et al. (1994). In this paper we attempt to improve the understanding of the underlying issues by exploring these problems from an analytical, a geometric and a dynamical systems perspective. Our analysis is used to justify a simple yet effective solution. We propose a gradient norm clipping strategy to deal with exploding gradients and a soft constraint for the vanishing gradients problem. We validate empirically our hypothesis and proposed solutions in the experimental section.", "year": 2012, "venue": "International Conference on Machine Learning", "authors": [ "Razvan Pascanu", "Tomas Mikolov", "Yoshua Bengio" ], "externalIds": { "ArXiv": "1211.5063", "DBLP": "conf/icml/PascanuMB13", "MAG": "2949190276", "CorpusId": 14650762 }, "url": "https://www.semanticscholar.org/paper/84069287da0a6b488b8c933f3cb5be759cb6237e", "referenceCount": 36, "citationCount": 5046, "influentialCitationCount": 334, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Computing the geometric mean over multiple-access channels: Error analysis and comparisons", "abstract": "The paper deals with the problem of data transmission and function computation of the sensed data in wireless sensor networks, in which multiple sensor nodes transmit their data to one sink node over a wireless multiple-access channel. We focus on the problem of computing the geometric mean at the sink node by merging the data transmission and function computation into one step via an explicit utilization of channel collisions caused by simultaneous transmissions of sensor nodes. The paper provides the analysis of the estimation error and compares the scheme with traditional time division multiple-access based schemes to indicate potential for significant performance gains.", "year": 2010, "venue": "2010 Conference Record of the Forty Fourth Asilomar Conference on Signals, Systems and Computers", "authors": [ "Mario Goldenbaum", "S. Stańczak" ], "externalIds": { "MAG": "2548443295", "DOI": "10.1109/ACSSC.2010.5757936", "CorpusId": 45493600 }, "url": "https://www.semanticscholar.org/paper/d4a9b2a60ff1c2624f744c5800c97630a0be90f1", "referenceCount": 8, "citationCount": 14, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Computation Over Multiple-Access Channels", "abstract": "The problem of reliably reconstructing a function of sources over a multiple-access channel (MAC) is considered. It is shown that there is no source-channel separation theorem even when the individual sources are independent. Joint source-channel strategies are developed that are optimal when the structure of the channel probability transition matrix and the function are appropriately matched. Even when the channel and function are mismatched, these computation codes often outperform separation-based strategies. Achievable distortions are given for the distributed refinement of the sum of Gaussian sources over a Gaussian multiple-access channel with a joint source-channel lattice code. Finally, computation codes are used to determine the multicast capacity of finite-field multiple-access networks, thus linking them to network coding.", "year": 2007, "venue": "IEEE Transactions on Information Theory", "authors": [ "B. Nazer", "M. Gastpar" ], "externalIds": { "MAG": "2140227342", "DBLP": "journals/tit/NazerG07", "DOI": "10.1109/TIT.2007.904785", "CorpusId": 14570768 }, "url": "https://www.semanticscholar.org/paper/63c97a47dec8f96a1d0b3a4d2342a6212456aa47", "referenceCount": 47, "citationCount": 662, "influentialCitationCount": 51, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Stable Non-Gaussian Random Processes : Stochastic Models with Infinite Variance", "abstract": "Stable random variables on the real line Multivariate stable distributions Stable stochastic integrals Dependence structures of multivariate stable distributions Non-linear regression Complex stable stochastic integrals and harmonizable processes Self-similar processes Chentsov random fields Introduction to sample path properties Boundedness, continuity and oscillations Measurability, integrability and absolute continuity Boundedness and continuity via metric entropy Integral representation Historical notes and extensions.", "year": 1995, "venue": "", "authors": [ "G. Samorodnitsky", "M. Taqqu" ], "externalIds": { "MAG": "2022894179", "DOI": "10.1201/9780203738818", "CorpusId": 6903581 }, "url": "https://www.semanticscholar.org/paper/f095037659c85285e845c165ab5251e2086707bf", "referenceCount": 0, "citationCount": 2565, "influentialCitationCount": 322, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Statistical-Physical Models of Electromagnetic Interference", "abstract": "Most man-made and natural electromagnetic interference, or \"noise,\" are highly non-Gaussian random processes, whose degrading effects on system performance can be severe, particularly on most conventional systems, which are designed for optimal or near optimal performance against normal noise. In addition, the nature, origins, measurement, and prediction of the general EM interference environment are a major concern of any adequate spectral management program. Accordingly, this study is devoted to the development of analytically tractable, experimentally verifiable, statistical-physical models of such electromagnetic interference. Here, classification into three major types of noise is made: Class A (narrow band vis-á-vis the receiver), Class B (broad band vis-á-vis the receiver), and Class C (= Class A + Class B). First-order statistical models are constructed for the Class A and Class B cases. In particular, the APD (a posteriori probability distribution) or exceedance probability, PD, vis;P1 (¿ > ¿o)A,B, (and the associated probability densities, pdf's w1(¿)A,B,[1]) of the envelope are obtained; (the phase is shown to be uniformly distributed in (0, 2¿). These results are canonical, i.e., their analytic forms are invariant of the particular noise source and its quantifying parameter values, levels, etc. Class A interference is described by a 3-parameter model, Class B noise by a 6-parameter model.", "year": 1977, "venue": "IEEE transactions on electromagnetic compatibility (Print)", "authors": [ "D. Middleton" ], "externalIds": { "MAG": "3014449343", "DOI": "10.1109/TEMC.1977.303527", "CorpusId": 24855014 }, "url": "https://www.semanticscholar.org/paper/6bcc3c26b563783f949fff6cb3eddd78e64e2eab", "referenceCount": 11, "citationCount": 876, "influentialCitationCount": 73, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Learning Multiple Layers of Features from Tiny Images", "abstract": "Groups at MIT and NYU have collected a dataset of millions of tiny colour images from the web. It is, in principle, an excellent dataset for unsupervised training of deep generative models, but previous researchers who have tried this have found it dicult to learn a good set of lters from the images. We show how to train a multi-layer generative model that learns to extract meaningful features which resemble those found in the human visual cortex. Using a novel parallelization algorithm to distribute the work among multiple machines connected on a network, we show how training such a model can be done in reasonable time. A second problematic aspect of the tiny images dataset is that there are no reliable class labels which makes it hard to use for object recognition experiments. We created two sets of reliable labels. The CIFAR-10 set has 6000 examples of each of 10 classes and the CIFAR-100 set has 600 examples of each of 100 non-overlapping classes. Using these labels, we show that object recognition is signicantly improved by pre-training a layer of features on a large set of unlabeled tiny images.", "year": 2009, "venue": "", "authors": [ "A. Krizhevsky" ], "externalIds": { "MAG": "2945315962", "CorpusId": 18268744 }, "url": "https://www.semanticscholar.org/paper/5d90f06bb70a0a3dced62413346235c02b1aa086", "referenceCount": 15, "citationCount": 31347, "influentialCitationCount": 7621, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] } ] }, "Challenges of Generating Structurally Diverse Graphs": { "paper_title": "Challenges of Generating Structurally Diverse Graphs", "arxiv_id": "2409.18859v1", "keyword": "generative model", "authors": [ "Fedor Velikonivtsev", "Mikhail Mironov", "Liudmila Prokhorenkova" ], "references": [ { "title": "The Vendi Score: A Diversity Evaluation Metric for Machine Learning", "abstract": "Diversity is an important criterion for many areas of machine learning (ML), including generative modeling and dataset curation. However, existing metrics for measuring diversity are often domain-specific and limited in flexibility. In this paper, we address the diversity evaluation problem by proposing the Vendi Score, which connects and extends ideas from ecology and quantum statistical mechanics to ML. The Vendi Score is defined as the exponential of the Shannon entropy of the eigenvalues of a similarity matrix. This matrix is induced by a user-defined similarity function applied to the sample to be evaluated for diversity. In taking a similarity function as input, the Vendi Score enables its user to specify any desired form of diversity. Importantly, unlike many existing metrics in ML, the Vendi Score does not require a reference dataset or distribution over samples or labels, it is therefore general and applicable to any generative model, decoding algorithm, and dataset from any domain where similarity can be defined. We showcase the Vendi Score on molecular generative modeling where we found it addresses shortcomings of the current diversity metric of choice in that domain. We also applied the Vendi Score to generative models of images and decoding algorithms of text where we found it confirms known results about diversity in those domains. Furthermore, we used the Vendi Score to measure mode collapse, a known shortcoming of generative adversarial networks (GANs). In particular, the Vendi Score revealed that even GANs that capture all the modes of a labeled dataset can be less diverse than the original dataset. Finally, the interpretability of the Vendi Score allowed us to diagnose several benchmark ML datasets for diversity, opening the door for diversity-informed data augmentation.", "year": 2022, "venue": "Trans. Mach. Learn. Res.", "authors": [ "Dan Friedman", "Adji B. Dieng" ], "externalIds": { "DBLP": "journals/corr/abs-2210-02410", "ArXiv": "2210.02410", "DOI": "10.48550/arXiv.2210.02410", "CorpusId": 252715476 }, "url": "https://www.semanticscholar.org/paper/b03c078303326ff022f525fccdf028b73ccb1cb4", "referenceCount": 68, "citationCount": 60, "influentialCitationCount": 9, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Physics", "Mathematics" ] }, { "title": "DiGress: Discrete Denoising diffusion for graph generation", "abstract": "This work introduces DiGress, a discrete denoising diffusion model for generating graphs with categorical node and edge attributes. Our model utilizes a discrete diffusion process that progressively edits graphs with noise, through the process of adding or removing edges and changing the categories. A graph transformer network is trained to revert this process, simplifying the problem of distribution learning over graphs into a sequence of node and edge classification tasks. We further improve sample quality by introducing a Markovian noise model that preserves the marginal distribution of node and edge types during diffusion, and by incorporating auxiliary graph-theoretic features. A procedure for conditioning the generation on graph-level features is also proposed. DiGress achieves state-of-the-art performance on molecular and non-molecular datasets, with up to 3x validity improvement on a planar graph dataset. It is also the first model to scale to the large GuacaMol dataset containing 1.3M drug-like molecules without the use of molecule-specific representations.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Clément Vignac", "Igor Krawczuk", "Antoine Siraudin", "Bohan Wang", "V. Cevher", "P. Frossard" ], "externalIds": { "DBLP": "conf/iclr/VignacKSWCF23", "ArXiv": "2209.14734", "DOI": "10.48550/arXiv.2209.14734", "CorpusId": 252595881 }, "url": "https://www.semanticscholar.org/paper/c65c4b68ac153176354a4c33c37f0ba1d86772c0", "referenceCount": 62, "citationCount": 211, "influentialCitationCount": 59, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "SPECTRE : Spectral Conditioning Helps to Overcome the Expressivity Limits of One-shot Graph Generators", "abstract": "We approach the graph generation problem from a spectral perspective by first generating the dominant parts of the graph Laplacian spectrum and then building a graph matching these eigenvalues and eigenvectors. Spectral conditioning allows for direct modeling of the global and local graph structure and helps to overcome the expressivity and mode collapse issues of one-shot graph generators. Our novel GAN, called SPECTRE, enables the one-shot generation of much larger graphs than previously possible with one-shot models. SPECTRE outperforms state-of-the-art deep autoregressive generators in terms of modeling fidelity, while also avoiding expensive sequential generation and dependence on node ordering. A case in point, in sizable synthetic and real-world graphs SPECTRE achieves a 4-to-170 fold improvement over the best competitor that does not overfit and is 23-to-30 times faster than autoregressive generators.", "year": 2022, "venue": "International Conference on Machine Learning", "authors": [ "Karolis Martinkus", "Andreas Loukas", "Nathanael Perraudin", "R. Wattenhofer" ], "externalIds": { "ArXiv": "2204.01613", "DBLP": "journals/corr/abs-2204-01613", "DOI": "10.48550/arXiv.2204.01613", "CorpusId": 247939990 }, "url": "https://www.semanticscholar.org/paper/a5889e0d2dea37f5fccf74bd4b74ece183604c2a", "referenceCount": 83, "citationCount": 52, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "GraphWorld: Fake Graphs Bring Real Insights for GNNs", "abstract": "Despite advances in the field of Graph Neural Networks (GNNs), only a small number (~5) of datasets are currently used to evaluate new models. This continued reliance on a handful of datasets provides minimal insight into the performance differences between models, and is especially challenging for industrial practitioners who are likely to have datasets which are very different from academic benchmarks. In the course of our work on GNN infrastructure and open-source software at Google, we have sought to develop benchmarks that are robust, tunable, scalable, and generalizable. In this work we introduce GraphWorld, a novel methodology and system for benchmarking GNN models on an arbitrarily-large population ofsynthetic graphs for any conceivable GNN task. GraphWorld allows a user to efficiently generate a world with millions of statistically diverse datasets. It is accessible, scalable, and easy to use. GraphWorld can be run on a single machine without specialized hardware, or it can be easily scaled up to run on arbitrary clusters or cloud frameworks. Using GraphWorld, a user has fine-grained control over graph generator parameters, and can benchmark arbitrary GNN models with built-in hyperparameter tuning. We present insights from GraphWorld experiments regarding the performance characteristics of thirteen GNN models and baselines over millions of benchmark datasets. We further show that GraphWorld efficiently explores regions of benchmark dataset space uncovered by standard benchmarks, revealing comparisons between models that have not been historically obtainable. Using GraphWorld, we also are able to study in-detail the relationship between graph properties and task performance metrics, which is nearly impossible with the classic collection of real-world benchmarks.", "year": 2022, "venue": "Knowledge Discovery and Data Mining", "authors": [ "John Palowitch", "Anton Tsitsulin", "Brandon Mayer", "Bryan Perozzi" ], "externalIds": { "DBLP": "journals/corr/abs-2203-00112", "ArXiv": "2203.00112", "DOI": "10.1145/3534678.3539203", "CorpusId": 247188065 }, "url": "https://www.semanticscholar.org/paper/cd991591640fd5cf1cfb58027a9872184450b685", "referenceCount": 55, "citationCount": 60, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "On Evaluation Metrics for Graph Generative Models", "abstract": "In image generation, generative models can be evaluated naturally by visually inspecting model outputs. However, this is not always the case for graph generative models (GGMs), making their evaluation challenging. Currently, the standard process for evaluating GGMs suffers from three critical limitations: i) it does not produce a single score which makes model selection challenging, ii) in many cases it fails to consider underlying edge and node features, and iii) it is prohibitively slow to perform. In this work, we mitigate these issues by searching for scalar, domain-agnostic, and scalable metrics for evaluating and ranking GGMs. To this end, we study existing GGM metrics and neural-network-based metrics emerging from generative models of images that use embeddings extracted from a task-specific network. Motivated by the power of certain Graph Neural Networks (GNNs) to extract meaningful graph representations without any training, we introduce several metrics based on the features extracted by an untrained random GNN. We design experiments to thoroughly test metrics on their ability to measure the diversity and fidelity of generated graphs, as well as their sample and computational efficiency. Depending on the quantity of samples, we recommend one of two random-GNN-based metrics that we show to be more expressive than pre-existing metrics. While we focus on applying these metrics to GGM evaluation, in practice this enables the ability to easily compute the dissimilarity between any two sets of graphs regardless of domain. Our code is released at: https://github.com/uoguelph-mlrg/GGM-metrics.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Rylee Thompson", "Boris Knyazev", "Elahe Ghalebi", "Jungtaek Kim", "Graham W. Taylor" ], "externalIds": { "ArXiv": "2201.09871", "DBLP": "conf/iclr/Thompson0G0T22", "CorpusId": 246240044 }, "url": "https://www.semanticscholar.org/paper/410f0a0a2311c50e6dd2338f2708286ea8c87f23", "referenceCount": 55, "citationCount": 33, "influentialCitationCount": 8, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "How Much Space Has Been Explored? Measuring the Chemical Space Covered by Databases and Machine-Generated Molecules", "abstract": "Forming a molecular candidate set that contains a wide range of potentially effective compounds is crucial to the success of drug discovery. While most databases and machine-learning-based generation models aim to optimize particular chemical properties, there is limited literature on how to properly measure the coverage of the chemical space by those candidates included or generated. This problem is challenging due to the lack of formal criteria to select good measures of the chemical space. In this paper, we propose a novel evaluation framework for measures of the chemical space based on two analyses: an axiomatic analysis with three intuitive axioms that a good measure should obey, and an empirical analysis on the correlation between a measure and a proxy gold standard. Using this framework, we are able to identify #Circles, a new measure of chemical space coverage, which is superior to existing measures both analytically and empirically. We further evaluate how well the existing databases and generation models cover the chemical space in terms of #Circles. The results suggest that many generation models fail to explore a larger space over existing databases, which leads to new opportunities for improving generation models by encouraging exploration.", "year": 2021, "venue": "International Conference on Learning Representations", "authors": [ "Yutong Xie", "Ziqiao Xu", "Jiaqi Ma", "Qiaozhu Mei" ], "externalIds": { "DBLP": "conf/iclr/XieX0M23", "ArXiv": "2112.12542", "CorpusId": 252780973 }, "url": "https://www.semanticscholar.org/paper/71b4adf2c937c54e1e1ac57e3ecfea94c05b8a1c", "referenceCount": 67, "citationCount": 12, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Neural algorithmic reasoning", "abstract": null, "year": 2021, "venue": "Patterns", "authors": [ "Petar Velickovic", "C. Blundell" ], "externalIds": { "DBLP": "journals/corr/abs-2105-02761", "PubMedCentral": "8276006", "ArXiv": "2105.02761", "DOI": "10.1016/j.patter.2021.100273", "CorpusId": 233864602, "PubMed": "34286298" }, "url": "https://www.semanticscholar.org/paper/438a91dae6c0c7be7457055258699c0ccc40f43b", "referenceCount": 15, "citationCount": 77, "influentialCitationCount": 8, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine", "Mathematics" ] }, { "title": "Network comparison and the within-ensemble graph distance", "abstract": "Quantifying the differences between networks is a challenging and ever-present problem in network science. In recent years, a multitude of diverse, ad hoc solutions to this problem have been introduced. Here, we propose that simple and well-understood ensembles of random networks—such as Erdős–Rényi graphs, random geometric graphs, Watts–Strogatz graphs, the configuration model and preferential attachment networks—are natural benchmarks for network comparison methods. Moreover, we show that the expected distance between two networks independently sampled from a generative model is a useful property that encapsulates many key features of that model. To illustrate our results, we calculate this within-ensemble graph distance and related quantities for classic network models (and several parameterizations thereof) using 20 distance measures commonly used to compare graphs. The within-ensemble graph distance provides a new framework for developers of graph distances to better understand their creations and for practitioners to better choose an appropriate tool for their particular task.", "year": 2020, "venue": "Proceedings of the Royal Society A", "authors": [ "Harrison Hartle", "Brennan Klein", "S. McCabe", "Alexander Daniels", "G. St‐Onge", "Charles Murphy", "Laurent Hébert-Dufresne" ], "externalIds": { "MAG": "3106179373", "DBLP": "journals/corr/abs-2008-02415", "ArXiv": "2008.02415", "PubMedCentral": "7735290", "DOI": "10.1098/rspa.2019.0744", "CorpusId": 221006416, "PubMed": "33363435" }, "url": "https://www.semanticscholar.org/paper/359a46294cee5c437ba42cbb388402c76e75fe76", "referenceCount": 98, "citationCount": 36, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine", "Physics" ] }, { "title": "Comparing methods for comparing networks", "abstract": null, "year": 2019, "venue": "Scientific Reports", "authors": [ "Mattia Tantardini", "F. Ieva", "L. Tajoli", "C. Piccardi" ], "externalIds": { "PubMedCentral": "6879644", "MAG": "2991388718", "DOI": "10.1038/s41598-019-53708-y", "CorpusId": 208278792, "PubMed": "31772246" }, "url": "https://www.semanticscholar.org/paper/d2dc5081236a5e64260666e916f41c53879fc24b", "referenceCount": 69, "citationCount": 173, "influentialCitationCount": 10, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "DEGREE-BASED GINI INDEX FOR GRAPHS", "abstract": "Abstract In Balaji and Mahmoud [1], the authors introduced a distance-based Gini index for rooted trees. In this paper, we introduce a degree-based Gini index (or just simply degree Gini index) for graphs. The latter index is a topological measure on a graph capturing the proximity to regular graphs. When applied across the random members of a class of graphs, we can identify an average measure of regularity for the class. Whence, we can compare the classes of graphs from the vantage point of closeness to regularity. We develop a simplified computational formula for the degree Gini index and study its extreme values. We show that the degree Gini index falls in the interval [0, 1). The main focus in our study is the degree Gini index for the class of binary trees. Via a left-packing transformation, we show that, for an arbitrary sequence of binary trees, the Gini index has inferior and superior limits in the interval [0, 1/4]. We also show, via the degree Gini index, that uniform rooted binary trees are more regular than binary search trees grown from random permutations.", "year": 2019, "venue": "Probability in the engineering and informational sciences (Print)", "authors": [ "Carly Domicolo", "Hosam M. Mahmoud" ], "externalIds": { "MAG": "2921680251", "DOI": "10.1017/S0269964819000044", "CorpusId": 127486153 }, "url": "https://www.semanticscholar.org/paper/1d24bb6b8266335b05ed80bf9e7cee3086f7c0a1", "referenceCount": 23, "citationCount": 10, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "NetLSD: Hearing the Shape of a Graph", "abstract": "Comparison among graphs is ubiquitous in graph analytics. However, it is a hard task in terms of the expressiveness of the employed similarity measure and the efficiency of its computation. Ideally, graph comparison should be invariant to the order of nodes and the sizes of compared graphs, adaptive to the scale of graph patterns, and scalable. Unfortunately, these properties have not been addressed together. Graph comparisons still rely on direct approaches, graph kernels, or representation-based methods, which are all inefficient and impractical for large graph collections. In this paper, we propose the Network Laplacian Spectral Descriptor (NetLSD): the first, to our knowledge, permutation- and size-invariant, scale-adaptive, and efficiently computable graph representation method that allows for straightforward comparisons of large graphs. NetLSD extracts a compact signature that inherits the formal properties of the Laplacian spectrum, specifically its heat or wave kernel; thus, it \\em hears the shape of a graph. Our evaluation on a variety of real-world graphs demonstrates that it outperforms previous works in both expressiveness and efficiency.", "year": 2018, "venue": "Knowledge Discovery and Data Mining", "authors": [ "Anton Tsitsulin", "D. Mottin", "Panagiotis Karras", "A. Bronstein", "Emmanuel Müller" ], "externalIds": { "MAG": "3106039696", "ArXiv": "1805.10712", "DBLP": "conf/kdd/TsitsulinMKBM18", "DOI": "10.1145/3219819.3219991", "CorpusId": 44061224 }, "url": "https://www.semanticscholar.org/paper/20658628d56ffd106e1d36f9f0b3b3f4de4d2cac", "referenceCount": 56, "citationCount": 152, "influentialCitationCount": 22, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "An information-theoretic, all-scales approach to comparing networks", "abstract": null, "year": 2018, "venue": "Applied Network Science", "authors": [ "James P. Bagrow", "E. Bollt" ], "externalIds": { "DBLP": "journals/ans/BagrowB19", "MAG": "2962979788", "ArXiv": "1804.03665", "DOI": "10.1007/s41109-019-0156-x", "CorpusId": 4796224 }, "url": "https://www.semanticscholar.org/paper/d3e30618a8878c1d98045253d83651884ac59ba7", "referenceCount": 51, "citationCount": 73, "influentialCitationCount": 14, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics", "Physics" ] }, { "title": "GraphRNN: Generating Realistic Graphs with Deep Auto-regressive Models", "abstract": "Modeling and generating graphs is fundamental for studying networks in biology, engineering, and social sciences. However, modeling complex distributions over graphs and then efficiently sampling from these distributions is challenging due to the non-unique, high-dimensional nature of graphs and the complex, non-local dependencies that exist between edges in a given graph. Here we propose GraphRNN, a deep autoregressive model that addresses the above challenges and approximates any distribution of graphs with minimal assumptions about their structure. GraphRNN learns to generate graphs by training on a representative set of graphs and decomposes the graph generation process into a sequence of node and edge formations, conditioned on the graph structure generated so far. \nIn order to quantitatively evaluate the performance of GraphRNN, we introduce a benchmark suite of datasets, baselines and novel evaluation metrics based on Maximum Mean Discrepancy, which measure distances between sets of graphs. Our experiments show that GraphRNN significantly outperforms all baselines, learning to generate diverse graphs that match the structural characteristics of a target set, while also scaling to graphs 50 times larger than previous deep models.", "year": 2018, "venue": "International Conference on Machine Learning", "authors": [ "Jiaxuan You", "Rex Ying", "Xiang Ren", "William L. Hamilton", "J. Leskovec" ], "externalIds": { "MAG": "2806115886", "DBLP": "conf/icml/YouYRHL18", "ArXiv": "1802.08773", "CorpusId": 46937309 }, "url": "https://www.semanticscholar.org/paper/e1cef464322243feb12ac3f81873c912e071a1a6", "referenceCount": 36, "citationCount": 736, "influentialCitationCount": 154, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Revealing the Hidden Language of Complex Networks", "abstract": null, "year": 2014, "venue": "Scientific Reports", "authors": [ "Ömer Nebil Yaveroğlu", "N. Malod-Dognin", "Darren R. Davis", "Zoran Levnajic", "Vuk Janjic", "R. Karapandža", "A. Stojmirović", "Natasa Przulj" ], "externalIds": { "PubMedCentral": "3971399", "MAG": "2135303340", "DOI": "10.1038/srep04547", "CorpusId": 262594437, "PubMed": "24686408" }, "url": "https://www.semanticscholar.org/paper/91f7c92e12f4b73e9b58bd9717a5ffe3233f1929", "referenceCount": 48, "citationCount": 198, "influentialCitationCount": 17, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Generalized Preferential Attachment: Tunable Power-Law Degree Distribution and Clustering Coefficient", "abstract": null, "year": 2012, "venue": "Workshop on Algorithms and Models for the Web-Graph", "authors": [ "L. Ostroumova", "A. Ryabchenko", "E. Samosvat" ], "externalIds": { "ArXiv": "1205.3015", "MAG": "2741029933", "DBLP": "journals/corr/abs-1205-3015", "DOI": "10.1007/978-3-319-03536-9_15", "CorpusId": 18751683 }, "url": "https://www.semanticscholar.org/paper/6237b66b2be1f04fe3ba591ba9a0e5e84d005490", "referenceCount": 37, "citationCount": 50, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Stochastic blockmodels and community structure in networks", "abstract": "Stochastic blockmodels have been proposed as a tool for detecting community structure in networks as well as for generating synthetic networks for use as benchmarks. Most blockmodels, however, ignore variation in vertex degree, making them unsuitable for applications to real-world networks, which typically display broad degree distributions that can significantly affect the results. Here we demonstrate how the generalization of blockmodels to incorporate this missing element leads to an improved objective function for community detection in complex networks. We also propose a heuristic algorithm for community detection using this objective function or its non-degree-corrected counterpart and show that the degree-corrected version dramatically outperforms the uncorrected one in both real-world and synthetic networks.", "year": 2010, "venue": "Physical review. E, Statistical, nonlinear, and soft matter physics", "authors": [ "B. Karrer", "M. Newman" ], "externalIds": { "ArXiv": "1008.3926", "DBLP": "journals/corr/abs-1008-3926", "MAG": "2119998616", "DOI": "10.1103/PhysRevE.83.016107", "CorpusId": 9068097, "PubMed": "21405744" }, "url": "https://www.semanticscholar.org/paper/5625bbaf7dfdf5b675c5213c917939870b5aa0a2", "referenceCount": 31, "citationCount": 1852, "influentialCitationCount": 298, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science", "Physics", "Medicine" ] }, { "title": "Portraits of complex networks", "abstract": "We propose a method for characterizing large complex networks by introducing a new matrix structure, unique for a given network, which encodes structural information; provides useful visualization, even for very large networks; and allows for rigorous statistical comparison between networks. Dynamic processes such as percolation can be visualized using animation.", "year": 2007, "venue": "", "authors": [ "James P. Bagrow", "E. Bollt", "J. Skufca", "D. ben-Avraham" ], "externalIds": { "MAG": "2069788892", "ArXiv": "cond-mat/0703470", "DOI": "10.1209/0295-5075/81/68004", "CorpusId": 17598749 }, "url": "https://www.semanticscholar.org/paper/3bafe657fb060ad4bcbcc237f50b6baae566df77", "referenceCount": 24, "citationCount": 60, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Physics", "Computer Science" ] }, { "title": "Connected Components in Random Graphs with Given Expected Degree Sequences", "abstract": null, "year": 2002, "venue": "", "authors": [ "Fan Chung", "Linyuan Lu" ], "externalIds": { "MAG": "2112976607", "DOI": "10.1007/PL00012580", "CorpusId": 267924436 }, "url": "https://www.semanticscholar.org/paper/ebb5423c30246eeb2f7df7970c9c025255b7349a", "referenceCount": 38, "citationCount": 246, "influentialCitationCount": 18, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Evolutionary reconstruction of networks.", "abstract": "Can a graph specifying the pattern of connections of a dynamical network be reconstructed from statistical properties of a signal generated by such a system? In this model study, we present a Metropolis algorithm for reconstruction of graphs from their Laplacian spectra. Through a stochastic process of mutations and selection, evolving test networks converge to a reference graph. Applying the method to several examples of random graphs, clustered graphs, and small-world networks, we show that the proposed stochastic evolution allows exact reconstruction of relatively small networks and yields good approximations in the case of large sizes.", "year": 2001, "venue": "Physical review. E, Statistical, nonlinear, and soft matter physics", "authors": [ "M. Ipsen", "A. Mikhailov" ], "externalIds": { "ArXiv": "nlin/0111023", "MAG": "2045575714", "DOI": "10.1103/PhysRevE.66.046109", "CorpusId": 46327750, "PubMed": "12443261" }, "url": "https://www.semanticscholar.org/paper/c2fbed220489de81372d51082950fa39d39dc3fc", "referenceCount": 22, "citationCount": 94, "influentialCitationCount": 9, "isOpenAccess": true, "fieldsOfStudy": [ "Physics", "Medicine", "Mathematics" ] }, { "title": "Growing scale-free networks with tunable clustering.", "abstract": "We extend the standard scale-free network model to include a \"triad formation step.\" We analyze the geometric properties of networks generated by this algorithm both analytically and by numerical calculations, and find that our model possesses the same characteristics as the standard scale-free networks such as the power-law degree distribution and the small average geodesic length, but with the high clustering at the same time. In our model, the clustering coefficient is also shown to be tunable simply by changing a control parameter---the average number of triad formation trials per time step.", "year": 2001, "venue": "Physical review. E, Statistical, nonlinear, and soft matter physics", "authors": [ "Petter Holme", "Beom Jun Kim" ], "externalIds": { "ArXiv": "cond-mat/0110452", "MAG": "1983275606", "DOI": "10.1103/PhysRevE.65.026107", "CorpusId": 4643442, "PubMed": "11863587" }, "url": "https://www.semanticscholar.org/paper/b3c081e9c6f21ec9867d70b8a382c44f6e760276", "referenceCount": 9, "citationCount": 975, "influentialCitationCount": 74, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Mathematics", "Physics" ] }, { "title": "Stochastic blockmodels: First steps", "abstract": null, "year": 1983, "venue": "", "authors": [ "P. Holland", "Kathryn B. Laskey", "S. Leinhardt" ], "externalIds": { "MAG": "2102907934", "DOI": "10.1016/0378-8733(83)90021-7", "CorpusId": 34098453 }, "url": "https://www.semanticscholar.org/paper/996263c3ddbb50f0198354827445abd214f83030", "referenceCount": 6, "citationCount": 2669, "influentialCitationCount": 291, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Complex Networks: Structure and Dynamics", "abstract": null, "year": 2014, "venue": "", "authors": [ "Shilpa Chakravartula" ], "externalIds": { "MAG": "600525351", "CorpusId": 60311427 }, "url": "https://www.semanticscholar.org/paper/4aa76afb4df52271947b17f88dbd6afca43ddb9b", "referenceCount": 0, "citationCount": 3100, "influentialCitationCount": 170, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Random geometric graphs , volume 5", "abstract": null, "year": 2003, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Emergence of Scaling in Random Networks", "abstract": "level of Co and a maximum of inverse TMR is expected when the Fermi level of LSMO is approximately at the maximum of the spin2 DOS of Co. This is consistent with the maximum of inverse TMR observed at 20.4 V for Co/STO/LSMO junctions (Fig. 3A). For a positive bias, the TMR is expected to change sign and become normal above 1 V when the Fermi level of LSMO goes down into the energy range of the majority spin d-band of Co. This is also observed in Fig. 3A. For ALO and ALO/STO barriers, a predominant tunneling of s-character electrons (see arrow in Fig. 2B) is the usual explanation of the positive polarization (6–8). The rapid drop with bias (Fig. 3B) is similar to what has been observed in most junctions with ALO barriers, and completely different from what is obtained when the tunneling is predominantly by d-character electrons (Fig. 3A). The origin of this rapid decrease of the TMR at relatively small bias has never been clearly explained. This is roughly consistent with the energy dependence of the DOS induced by sp-d bonding effects on the first atomic layer of ALO in the calculation of Nguyen-Mahn et al. (8) for the Co-ALO interface. But Zhang et al. (13) have also shown that a large part of the TMR drop can be attributed to the excitation of spin waves. The experiments reported here and in several recent publications (3, 4) demonstrate the important role of the electronic structure of the metal-oxide interface in determining the spin polarization of the tunneling electrons. The negative polarization for the Co-STO interface has been ascribed to d-d bonding effects between Al and Ti (4). This interpretation is similar to that proposed to explain, in terms of sp-d bonding, the positive polarization at the Co-ALO interface (8). However, there is no general theory predicting the trend of the experimental results for Co—that is, a negative polarization with oxides of d elements (STO, CLO, Ta2O5) and a positive one when there are only s and p states (ALO). It is likely that the spin polarization should also depend on the position of the Fermi level with respect to the electronic levels of each character above and below the gap of the insulator. In addition, as an evanescent wave in an insulator is a Bloch wave with an imaginary wave vector, one can expect different decay lengths for Bloch waves of different character. This means that the final polarization could also depend on the thickness of the barrier, as illustrated by the calculations of MacLaren et al. for Fe/ZnSe/Fe junctions (14). The influence of the barrier on the spin polarization opens new ways to shape and optimize the TMR. Interesting bias dependencies can be obtained with barriers selecting the d electrons and probing the fine structure of the d-DOS, as in Fig. 3A. The DOS of a d-band can also be easily tailored by alloying (for example, by introduction of virtual bound states) to produce specific bias dependencies. Although here we concentrated on the problem of the spin polarization of the Co electrode and regarded the strongly spin-polarized LSMO only as a useful spin analyzer, the large TMR ratios obtained by combining Co and LSMO electrodes (50% with a STO barrier) are also an interesting result. The drawback arising from the low Curie temperature of LSMO (;350 K) is the reduction of the TMR at room temperature, down to about 5% at 300 K in Co/STO/ LSMO (4). However, other types of oxides of the double-perovskite family (for example, Sr2FeMoO6) combine electronic properties similar to those of manganites with a definitely higher Curie temperature (15). Their use in magnetic tunnel junctions is promising for a new generation of tunnel junctions with very high magnetoresistance for room-temperature applications.", "year": 1999, "venue": "", "authors": [ "B. McInnes", "Jannene S. McBride", "N. Evans", "David D. Lambert", "A. Andrew" ], "externalIds": { "MAG": "2908355692", "DOI": "10.1515/9781400841356.349", "CorpusId": 524106 }, "url": "https://www.semanticscholar.org/paper/a828fd17399d0ec9f59801e21230e7f6391757f4", "referenceCount": 11, "citationCount": 31015, "influentialCitationCount": 2030, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "On the evolution of random graphs", "abstract": "(n) k edges have equal probabilities to be chosen as the next one . We shall 2 study the \"evolution\" of such a random graph if N is increased . In this investigation we endeavour to find what is the \"typical\" structure at a given stage of evolution (i . e . if N is equal, or asymptotically equal, to a given function N(n) of n) . By a \"typical\" structure we mean such a structure the probability of which tends to 1 if n -* + when N = N(n) . If A is such a property that lim Pn,N,(n ) ( A) = 1, we shall say that „almost all\" graphs Gn,N(n) n--possess this property .", "year": 1984, "venue": "", "authors": [ "P. Erdos", "A. Rényi" ], "externalIds": { "MAG": "2908457301", "DOI": "10.1090/s0002-9947-1984-0756039-5", "CorpusId": 6829589 }, "url": "https://www.semanticscholar.org/paper/a5aad5abb32f6b15f31b92312bb3b0f7b6470977", "referenceCount": 19, "citationCount": 7893, "influentialCitationCount": 711, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Beyond Erd˝os-Rényi: Gener-alization in algorithmic reasoning on graphs", "abstract": null, "year": null, "venue": "The Second Learning on Graphs Conference", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Schrödinger bridge based deep conditional generative learning": { "paper_title": "Schrödinger bridge based deep conditional generative learning", "arxiv_id": "2409.17294v1", "keyword": "generative model", "authors": [ "Hanwen Huang" ], "references": [ { "title": "Deep conditional distribution learning via conditional F\\\"ollmer flow", "abstract": "We introduce an ordinary differential equation (ODE) based deep generative method for learning conditional distributions, named Conditional F\\\"ollmer Flow. Starting from a standard Gaussian distribution, the proposed flow could approximate the target conditional distribution very well when the time is close to 1. For effective implementation, we discretize the flow with Euler's method where we estimate the velocity field nonparametrically using a deep neural network. Furthermore, we also establish the convergence result for the Wasserstein-2 distance between the distribution of the learned samples and the target conditional distribution, providing the first comprehensive end-to-end error analysis for conditional distribution learning via ODE flow. Our numerical experiments showcase its effectiveness across a range of scenarios, from standard nonparametric conditional density estimation problems to more intricate challenges involving image data, illustrating its superiority over various existing conditional density estimation methods.", "year": 2024, "venue": "", "authors": [ "Jinyuan Chang", "Zhao Ding", "Yuling Jiao", "Ruoxuan Li", "Jerry Zhijian Yang" ], "externalIds": { "ArXiv": "2402.01460", "CorpusId": 267406176 }, "url": "https://www.semanticscholar.org/paper/2bfd0623815b193ab5a411868fe91fbe6e02f6ac", "referenceCount": 0, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Diffusion Bridge Mixture Transports, Schrödinger Bridge Problems and Generative Modeling", "abstract": "The dynamic Schr\\\"odinger bridge problem seeks a stochastic process that defines a transport between two target probability measures, while optimally satisfying the criteria of being closest, in terms of Kullback-Leibler divergence, to a reference process. We propose a novel sampling-based iterative algorithm, the iterated diffusion bridge mixture (IDBM) procedure, aimed at solving the dynamic Schr\\\"odinger bridge problem. The IDBM procedure exhibits the attractive property of realizing a valid transport between the target probability measures at each iteration. We perform an initial theoretical investigation of the IDBM procedure, establishing its convergence properties. The theoretical findings are complemented by numerical experiments illustrating the competitive performance of the IDBM procedure. Recent advancements in generative modeling employ the time-reversal of a diffusion process to define a generative process that approximately transports a simple distribution to the data distribution. As an alternative, we propose utilizing the first iteration of the IDBM procedure as an approximation-free method for realizing this transport. This approach offers greater flexibility in selecting the generative process dynamics and exhibits accelerated training and superior sample quality over larger discretization intervals. In terms of implementation, the necessary modifications are minimally intrusive, being limited to the training loss definition.", "year": 2023, "venue": "Journal of machine learning research", "authors": [ "Stefano Peluchetti" ], "externalIds": { "DBLP": "journals/jmlr/Peluchetti23", "ArXiv": "2304.00917", "DOI": "10.48550/arXiv.2304.00917", "CorpusId": 257912618 }, "url": "https://www.semanticscholar.org/paper/753d21e35f91097211147e8bf9efadea3f291dfb", "referenceCount": 53, "citationCount": 29, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "I2SB: Image-to-Image Schrödinger Bridge", "abstract": "We propose Image-to-Image Schr\\\"odinger Bridge (I$^2$SB), a new class of conditional diffusion models that directly learn the nonlinear diffusion processes between two given distributions. These diffusion bridges are particularly useful for image restoration, as the degraded images are structurally informative priors for reconstructing the clean images. I$^2$SB belongs to a tractable class of Schr\\\"odinger bridge, the nonlinear extension to score-based models, whose marginal distributions can be computed analytically given boundary pairs. This results in a simulation-free framework for nonlinear diffusions, where the I$^2$SB training becomes scalable by adopting practical techniques used in standard diffusion models. We validate I$^2$SB in solving various image restoration tasks, including inpainting, super-resolution, deblurring, and JPEG restoration on ImageNet 256x256 and show that I$^2$SB surpasses standard conditional diffusion models with more interpretable generative processes. Moreover, I$^2$SB matches the performance of inverse methods that additionally require the knowledge of the corruption operators. Our work opens up new algorithmic opportunities for developing efficient nonlinear diffusion models on a large scale. scale. Project page and codes: https://i2sb.github.io/", "year": 2023, "venue": "International Conference on Machine Learning", "authors": [ "Guan-Horng Liu", "Arash Vahdat", "De-An Huang", "Evangelos A. Theodorou", "Weili Nie", "Anima Anandkumar" ], "externalIds": { "DBLP": "journals/corr/abs-2302-05872", "ArXiv": "2302.05872", "DOI": "10.48550/arXiv.2302.05872", "CorpusId": 257022338 }, "url": "https://www.semanticscholar.org/paper/26756d4327a62b1d5aef8031cc4071e243f050a3", "referenceCount": 78, "citationCount": 84, "influentialCitationCount": 17, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Sampling is as easy as learning the score: theory for diffusion models with minimal data assumptions", "abstract": "We provide theoretical convergence guarantees for score-based generative models (SGMs) such as denoising diffusion probabilistic models (DDPMs), which constitute the backbone of large-scale real-world generative models such as DALL$\\cdot$E 2. Our main result is that, assuming accurate score estimates, such SGMs can efficiently sample from essentially any realistic data distribution. In contrast to prior works, our results (1) hold for an $L^2$-accurate score estimate (rather than $L^\\infty$-accurate); (2) do not require restrictive functional inequality conditions that preclude substantial non-log-concavity; (3) scale polynomially in all relevant problem parameters; and (4) match state-of-the-art complexity guarantees for discretization of the Langevin diffusion, provided that the score error is sufficiently small. We view this as strong theoretical justification for the empirical success of SGMs. We also examine SGMs based on the critically damped Langevin diffusion (CLD). Contrary to conventional wisdom, we provide evidence that the use of the CLD does not reduce the complexity of SGMs.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Sitan Chen", "Sinho Chewi", "Jungshian Li", "Yuanzhi Li", "A. Salim", "Anru R. Zhang" ], "externalIds": { "ArXiv": "2209.11215", "DBLP": "journals/corr/abs-2209-11215", "DOI": "10.48550/arXiv.2209.11215", "CorpusId": 252438904 }, "url": "https://www.semanticscholar.org/paper/7309bf7607f4b4339f4ae288f3ad4fc36d139b5a", "referenceCount": 54, "citationCount": 169, "influentialCitationCount": 42, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "ProDiff: Progressive Fast Diffusion Model for High-Quality Text-to-Speech", "abstract": "Denoising diffusion probabilistic models (DDPMs) have recently achieved leading performances in many generative tasks. However, the inherited iterative sampling process costs hinder their applications to text-to-speech deployment. Through the preliminary study on diffusion model parameterization, we find that previous gradient-based TTS models require hundreds or thousands of iterations to guarantee high sample quality, which poses a challenge for accelerating sampling. In this work, we propose ProDiff, on progressive fast diffusion model for high-quality text-to-speech. Unlike previous work estimating the gradient for data density, ProDiff parameterizes the denoising model by directly predicting clean data to avoid distinct quality degradation in accelerating sampling. To tackle the model convergence challenge with decreased diffusion iterations, ProDiff reduces the data variance in the target site via knowledge distillation. Specifically, the denoising model uses the generated mel-spectrogram from an N-step DDIM teacher as the training target and distills the behavior into a new model with N/2 steps. As such, it allows the TTS model to make sharp predictions and further reduces the sampling time by orders of magnitude. Our evaluation demonstrates that ProDiff needs only 2 iterations to synthesize high-fidelity mel-spectrograms, while it maintains sample quality and diversity competitive with state-of-the-art models using hundreds of steps. ProDiff enables a sampling speed of 24x faster than real-time on a single NVIDIA 2080Ti GPU, making diffusion models practically applicable to text-to-speech synthesis deployment for the first time. Our extensive ablation studies demonstrate that each design in ProDiff is effective, and we further show that ProDiff can be easily extended to the multi-speaker setting.", "year": 2022, "venue": "ACM Multimedia", "authors": [ "Rongjie Huang", "Zhou Zhao", "Huadai Liu", "Jinglin Liu", "Chenye Cui", "Yi Ren" ], "externalIds": { "ArXiv": "2207.06389", "DBLP": "journals/corr/abs-2207-06389", "DOI": "10.1145/3503161.3547855", "CorpusId": 250492984 }, "url": "https://www.semanticscholar.org/paper/6fc1c18418a74b4ed46e00ee5f70e46c8ddc8460", "referenceCount": 60, "citationCount": 146, "influentialCitationCount": 7, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Conditional Simulation Using Diffusion Schrödinger Bridges", "abstract": "Denoising diffusion models have recently emerged as a powerful class of generative models. They provide state-of-the-art results, not only for unconditional simulation, but also when used to solve conditional simulation problems arising in a wide range of inverse problems. A limitation of these models is that they are computationally intensive at generation time as they require simulating a diffusion process over a long time horizon. When performing unconditional simulation, a Schr\\\"odinger bridge formulation of generative modeling leads to a theoretically grounded algorithm shortening generation time which is complementary to other proposed acceleration techniques. We extend the Schr\\\"odinger bridge framework to conditional simulation. We demonstrate this novel methodology on various applications including image super-resolution, optimal filtering for state-space models and the refinement of pre-trained networks. Our code can be found at https://github.com/vdeborto/cdsb.", "year": 2022, "venue": "Conference on Uncertainty in Artificial Intelligence", "authors": [ "Yuyang Shi", "Valentin De Bortoli", "George Deligiannidis", "A. Doucet" ], "externalIds": { "DBLP": "conf/uai/0002BDD22", "ArXiv": "2202.13460", "CorpusId": 247158357 }, "url": "https://www.semanticscholar.org/paper/ab7c0f8d69f81c44716a7bc33fa1e49e2981fc04", "referenceCount": 60, "citationCount": 43, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Learning Fast Samplers for Diffusion Models by Differentiating Through Sample Quality", "abstract": "Diffusion models have emerged as an expressive family of generative models rivaling GANs in sample quality and autoregressive models in likelihood scores. Standard diffusion models typically require hundreds of forward passes through the model to generate a single high-fidelity sample. We introduce Differentiable Diffusion Sampler Search (DDSS): a method that optimizes fast samplers for any pre-trained diffusion model by differentiating through sample quality scores. We also present Generalized Gaussian Diffusion Models (GGDM), a family of flexible non-Markovian samplers for diffusion models. We show that optimizing the degrees of freedom of GGDM samplers by maximizing sample quality scores via gradient descent leads to improved sample quality. Our optimization procedure backpropagates through the sampling process using the reparametrization trick and gradient rematerialization. DDSS achieves strong results on unconditional image generation across various datasets (e.g., FID scores on LSUN church 128x128 of 11.6 with only 10 inference steps, and 4.82 with 20 steps, compared to 51.1 and 14.9 with strongest DDPM/DDIM baselines). Our method is compatible with any pre-trained diffusion model without fine-tuning or re-training required.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Daniel Watson", "William Chan", "Jonathan Ho", "Mohammad Norouzi" ], "externalIds": { "ArXiv": "2202.05830", "DBLP": "journals/corr/abs-2202-05830", "CorpusId": 246823323 }, "url": "https://www.semanticscholar.org/paper/7e839c2667479d91e21e84583c27257dc7dc1a36", "referenceCount": 41, "citationCount": 147, "influentialCitationCount": 9, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Conditional Image Generation with Score-Based Diffusion Models", "abstract": "Score-based diffusion models have emerged as one of the most promising frameworks for deep generative modelling. In this work we conduct a systematic comparison and theoretical analysis of different approaches to learning conditional probability distributions with score-based diffusion models. In particular, we prove results which provide a theoretical justification for one of the most successful estimators of the conditional score. Moreover, we introduce a multi-speed diffusion framework, which leads to a new estimator for the conditional score, performing on par with previous state-of-the-art approaches. Our theoretical and experimental findings are accompanied by an open source library MSDiff which allows for application and further research of multi-speed diffusion models.", "year": 2021, "venue": "arXiv.org", "authors": [ "Georgios Batzolis", "Jan Stanczuk", "C. Schonlieb", "Christian Etmann" ], "externalIds": { "DBLP": "journals/corr/abs-2111-13606", "ArXiv": "2111.13606", "CorpusId": 244709128 }, "url": "https://www.semanticscholar.org/paper/35356feaaf1a739a7db2b76f32e3e5a71ec74eb5", "referenceCount": 30, "citationCount": 131, "influentialCitationCount": 6, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Bayesian learning via neural Schrödinger–Föllmer flows", "abstract": null, "year": 2021, "venue": "Statistics and computing", "authors": [ "Francisco Vargas", "Andrius Ovsianas", "David Fernandes", "M. Girolami", "Neil D. Lawrence", "Nikolas Nusken" ], "externalIds": { "DBLP": "journals/sac/VargasOFGLN23", "ArXiv": "2111.10510", "DOI": "10.1007/s11222-022-10172-5", "CorpusId": 244477794 }, "url": "https://www.semanticscholar.org/paper/c930632c662231f768338c7424300b8ba5390c25", "referenceCount": 93, "citationCount": 34, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Likelihood Training of Schrödinger Bridge using Forward-Backward SDEs Theory", "abstract": "Schr\\\"odinger Bridge (SB) is an entropy-regularized optimal transport problem that has received increasing attention in deep generative modeling for its mathematical flexibility compared to the Scored-based Generative Model (SGM). However, it remains unclear whether the optimization principle of SB relates to the modern training of deep generative models, which often rely on constructing log-likelihood objectives.This raises questions on the suitability of SB models as a principled alternative for generative applications. In this work, we present a novel computational framework for likelihood training of SB models grounded on Forward-Backward Stochastic Differential Equations Theory - a mathematical methodology appeared in stochastic optimal control that transforms the optimality condition of SB into a set of SDEs. Crucially, these SDEs can be used to construct the likelihood objectives for SB that, surprisingly, generalizes the ones for SGM as special cases. This leads to a new optimization principle that inherits the same SB optimality yet without losing applications of modern generative training techniques, and we show that the resulting training algorithm achieves comparable results on generating realistic images on MNIST, CelebA, and CIFAR10. Our code is available at https://github.com/ghliu/SB-FBSDE.", "year": 2021, "venue": "International Conference on Learning Representations", "authors": [ "T. Chen", "Guan-Horng Liu", "Evangelos A. Theodorou" ], "externalIds": { "DBLP": "conf/iclr/ChenLT22", "ArXiv": "2110.11291", "CorpusId": 239050236 }, "url": "https://www.semanticscholar.org/paper/509e166d5e66df10675a0e15063daad518dcc5ad", "referenceCount": 73, "citationCount": 124, "influentialCitationCount": 31, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "A Deep Generative Approach to Conditional Sampling", "abstract": "Abstract We propose a deep generative approach to sampling from a conditional distribution based on a unified formulation of conditional distribution and generalized nonparametric regression function using the noise-outsourcing lemma. The proposed approach aims at learning a conditional generator, so that a random sample from the target conditional distribution can be obtained by transforming a sample drawn from a reference distribution. The conditional generator is estimated nonparametrically with neural networks by matching appropriate joint distributions using the Kullback-Liebler divergence. An appealing aspect of our method is that it allows either of or both the predictor and the response to be high-dimensional and can handle both continuous and discrete type predictors and responses. We show that the proposed method is consistent in the sense that the conditional generator converges in distribution to the underlying conditional distribution under mild conditions. Our numerical experiments with simulated and benchmark image data validate the proposed method and demonstrate that it outperforms several existing conditional density estimation methods. Supplementary materials for this article are available online.", "year": 2021, "venue": "Journal of the American Statistical Association", "authors": [ "Xingyu Zhou", "Yuling Jiao", "Jin Liu", "Jian Huang" ], "externalIds": { "ArXiv": "2110.10277", "DOI": "10.1080/01621459.2021.2016424", "CorpusId": 239049380 }, "url": "https://www.semanticscholar.org/paper/36db576bf67b8211896d230126040a25f1ead134", "referenceCount": 58, "citationCount": 32, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "CSDI: Conditional Score-based Diffusion Models for Probabilistic Time Series Imputation", "abstract": "The imputation of missing values in time series has many applications in healthcare and finance. While autoregressive models are natural candidates for time series imputation, score-based diffusion models have recently outperformed existing counterparts including autoregressive models in many tasks such as image generation and audio synthesis, and would be promising for time series imputation. In this paper, we propose Conditional Score-based Diffusion models for Imputation (CSDI), a novel time series imputation method that utilizes score-based diffusion models conditioned on observed data. Unlike existing score-based approaches, the conditional diffusion model is explicitly trained for imputation and can exploit correlations between observed values. On healthcare and environmental data, CSDI improves by 40-65% over existing probabilistic imputation methods on popular performance metrics. In addition, deterministic imputation by CSDI reduces the error by 5-20% compared to the state-of-the-art deterministic imputation methods. Furthermore, CSDI can also be applied to time series interpolation and probabilistic forecasting, and is competitive with existing baselines. The code is available at https://github.com/ermongroup/CSDI.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Y. Tashiro", "Jiaming Song", "Yang Song", "Stefano Ermon" ], "externalIds": { "ArXiv": "2107.03502", "DBLP": "journals/corr/abs-2107-03502", "CorpusId": 235765577 }, "url": "https://www.semanticscholar.org/paper/8982bb695dcebdacbfd079c62cd7acca8a8b48dc", "referenceCount": 45, "citationCount": 317, "influentialCitationCount": 55, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Variational Diffusion Models", "abstract": "Diffusion-based generative models have demonstrated a capacity for perceptually impressive synthesis, but can they also be great likelihood-based models? We answer this in the affirmative, and introduce a family of diffusion-based generative models that obtain state-of-the-art likelihoods on standard image density estimation benchmarks. Unlike other diffusion-based models, our method allows for efficient optimization of the noise schedule jointly with the rest of the model. We show that the variational lower bound (VLB) simplifies to a remarkably short expression in terms of the signal-to-noise ratio of the diffused data, thereby improving our theoretical understanding of this model class. Using this insight, we prove an equivalence between several models proposed in the literature. In addition, we show that the continuous-time VLB is invariant to the noise schedule, except for the signal-to-noise ratio at its endpoints. This enables us to learn a noise schedule that minimizes the variance of the resulting VLB estimator, leading to faster optimization. Combining these advances with architectural improvements, we obtain state-of-the-art likelihoods on image density estimation benchmarks, outperforming autoregressive models that have dominated these benchmarks for many years, with often significantly faster optimization. In addition, we show how to use the model as part of a bits-back compression scheme, and demonstrate lossless compression rates close to the theoretical optimum. Code is available at https://github.com/google-research/vdm .", "year": 2021, "venue": "arXiv.org", "authors": [ "Diederik P. Kingma", "Tim Salimans", "Ben Poole", "Jonathan Ho" ], "externalIds": { "DBLP": "journals/corr/abs-2107-00630", "ArXiv": "2107.00630", "CorpusId": 235694314 }, "url": "https://www.semanticscholar.org/paper/94bcd712aed610b8eaeccc57136d65ec988356f2", "referenceCount": 54, "citationCount": 803, "influentialCitationCount": 96, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Diffusion Schrödinger Bridge with Applications to Score-Based Generative Modeling", "abstract": "Progressively applying Gaussian noise transforms complex data distributions to approximately Gaussian. Reversing this dynamic defines a generative model. When the forward noising process is given by a Stochastic Differential Equation (SDE), Song et al. (2021) demonstrate how the time inhomogeneous drift of the associated reverse-time SDE may be estimated using score-matching. A limitation of this approach is that the forward-time SDE must be run for a sufficiently long time for the final distribution to be approximately Gaussian. In contrast, solving the Schr\\\"odinger Bridge problem (SB), i.e. an entropy-regularized optimal transport problem on path spaces, yields diffusions which generate samples from the data distribution in finite time. We present Diffusion SB (DSB), an original approximation of the Iterative Proportional Fitting (IPF) procedure to solve the SB problem, and provide theoretical analysis along with generative modeling experiments. The first DSB iteration recovers the methodology proposed by Song et al. (2021), with the flexibility of using shorter time intervals, as subsequent DSB iterations reduce the discrepancy between the final-time marginal of the forward (resp. backward) SDE with respect to the prior (resp. data) distribution. Beyond generative modeling, DSB offers a widely applicable computational optimal transport tool as the continuous state-space analogue of the popular Sinkhorn algorithm (Cuturi, 2013).", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Valentin De Bortoli", "James Thornton", "J. Heng", "A. Doucet" ], "externalIds": { "ArXiv": "2106.01357", "DBLP": "conf/nips/BortoliTHD21", "CorpusId": 235294278 }, "url": "https://www.semanticscholar.org/paper/fad8bd00bca79005f89a0b0e2aa13fddc864fe22", "referenceCount": 95, "citationCount": 327, "influentialCitationCount": 56, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Diffusion Models Beat GANs on Image Synthesis", "abstract": "We show that diffusion models can achieve image sample quality superior to the current state-of-the-art generative models. We achieve this on unconditional image synthesis by finding a better architecture through a series of ablations. For conditional image synthesis, we further improve sample quality with classifier guidance: a simple, compute-efficient method for trading off diversity for fidelity using gradients from a classifier. We achieve an FID of 2.97 on ImageNet 128$\\times$128, 4.59 on ImageNet 256$\\times$256, and 7.72 on ImageNet 512$\\times$512, and we match BigGAN-deep even with as few as 25 forward passes per sample, all while maintaining better coverage of the distribution. Finally, we find that classifier guidance combines well with upsampling diffusion models, further improving FID to 3.94 on ImageNet 256$\\times$256 and 3.85 on ImageNet 512$\\times$512. We release our code at https://github.com/openai/guided-diffusion", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Prafulla Dhariwal", "Alex Nichol" ], "externalIds": { "ArXiv": "2105.05233", "DBLP": "journals/corr/abs-2105-05233", "CorpusId": 234357997 }, "url": "https://www.semanticscholar.org/paper/64ea8f180d0682e6c18d1eb688afdb2027c02794", "referenceCount": 81, "citationCount": 5177, "influentialCitationCount": 661, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Optimal Transport in Systems and Control", "abstract": "Optimal transport began as the problem of how to efficiently redistribute goods between production and consumers and evolved into a far-reaching geometric variational framework for studying flows of distributions on metric spaces. This theory enables a class of stochastic control problems to regulate dynamical systems so as to limit uncertainty to within specified limits. Representative control examples include the landing of a spacecraft aimed probabilistically toward a target and the suppression of undesirable effects of thermal noise on resonators; in both of these examples, the goal is to regulate the flow of the distribution of the random state. A most unlikely link turned up between transport of probability distributions and a maximum entropy inference problem posed by Erwin Schrödinger, where the latter is seen as an entropy-regularized version of the former. These intertwined topics of optimal transport, stochastic control, and inference are the subject of this review, which aims to highlight connections, insights, and computational tools while touching on quadratic regulator theory and probabilistic flows in discrete spaces and networks.", "year": 2021, "venue": "Annu. Rev. Control. Robotics Auton. Syst.", "authors": [ "Yongxin Chen", "T. Georgiou", "M. Pavon" ], "externalIds": { "MAG": "3111153674", "DBLP": "journals/arcras/ChenGP21", "DOI": "10.1146/annurev-control-070220-100858", "CorpusId": 230574043 }, "url": "https://www.semanticscholar.org/paper/27bf8d2e5f024a42b7e4d4432bdf7f2a7fe8b0b9", "referenceCount": 0, "citationCount": 76, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Image Super-Resolution via Iterative Refinement", "abstract": "We present SR3, an approach to image Super-Resolution via Repeated Refinement. SR3 adapts denoising diffusion probabilistic models (Ho et al. 2020), (Sohl-Dickstein et al. 2015) to image-to-image translation, and performs super-resolution through a stochastic iterative denoising process. Output images are initialized with pure Gaussian noise and iteratively refined using a U-Net architecture that is trained on denoising at various noise levels, conditioned on a low-resolution input image. SR3 exhibits strong performance on super-resolution tasks at different magnification factors, on faces and natural images. We conduct human evaluation on a standard 8× face super-resolution task on CelebA-HQ for which SR3 achieves a fool rate close to 50%, suggesting photo-realistic outputs, while GAN baselines do not exceed a fool rate of 34%. We evaluate SR3 on a 4× super-resolution task on ImageNet, where SR3 outperforms baselines in human evaluation and classification accuracy of a ResNet-50 classifier trained on high-resolution images. We further show the effectiveness of SR3 in cascaded image generation, where a generative model is chained with super-resolution models to synthesize high-resolution images with competitive FID scores on the class-conditional 256×256 ImageNet generation challenge.", "year": 2021, "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", "authors": [ "Chitwan Saharia", "Jonathan Ho", "William Chan", "Tim Salimans", "David J. Fleet", "Mohammad Norouzi" ], "externalIds": { "DBLP": "journals/corr/abs-2104-07636", "ArXiv": "2104.07636", "DOI": "10.1109/TPAMI.2022.3204461", "CorpusId": 233241040, "PubMed": "36094974" }, "url": "https://www.semanticscholar.org/paper/8a1ea7b6e7e834d146ad782be5d63f57f806a9cc", "referenceCount": 74, "citationCount": 1305, "influentialCitationCount": 130, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering", "Medicine" ] }, { "title": "Improved Denoising Diffusion Probabilistic Models", "abstract": "Denoising diffusion probabilistic models (DDPM) are a class of generative models which have recently been shown to produce excellent samples. We show that with a few simple modifications, DDPMs can also achieve competitive log-likelihoods while maintaining high sample quality. Additionally, we find that learning variances of the reverse diffusion process allows sampling with an order of magnitude fewer forward passes with a negligible difference in sample quality, which is important for the practical deployment of these models. We additionally use precision and recall to compare how well DDPMs and GANs cover the target distribution. Finally, we show that the sample quality and likelihood of these models scale smoothly with model capacity and training compute, making them easily scalable. We release our code at https://github.com/openai/improved-diffusion", "year": 2021, "venue": "International Conference on Machine Learning", "authors": [ "Alex Nichol", "Prafulla Dhariwal" ], "externalIds": { "ArXiv": "2102.09672", "DBLP": "conf/icml/NicholD21", "CorpusId": 231979499 }, "url": "https://www.semanticscholar.org/paper/de18baa4964804cf471d85a5a090498242d2e79f", "referenceCount": 47, "citationCount": 2547, "influentialCitationCount": 282, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Maximum Likelihood Training of Score-Based Diffusion Models", "abstract": "Score-based diffusion models synthesize samples by reversing a stochastic process that diffuses data to noise, and are trained by minimizing a weighted combination of score matching losses. The log-likelihood of score-based diffusion models can be tractably computed through a connection to continuous normalizing flows, but log-likelihood is not directly optimized by the weighted combination of score matching losses. We show that for a specific weighting scheme, the objective upper bounds the negative log-likelihood, thus enabling approximate maximum likelihood training of score-based diffusion models. We empirically observe that maximum likelihood training consistently improves the likelihood of score-based diffusion models across multiple datasets, stochastic processes, and model architectures. Our best models achieve negative log-likelihoods of 2.83 and 3.76 bits/dim on CIFAR-10 and ImageNet 32x32 without any data augmentation, on a par with state-of-the-art autoregressive models on these tasks.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Yang Song", "Conor Durkan", "Iain Murray", "Stefano Ermon" ], "externalIds": { "ArXiv": "2101.09258", "DBLP": "conf/nips/SongDME21", "CorpusId": 235352469 }, "url": "https://www.semanticscholar.org/paper/9cf6f42806a35fd1d410dbc34d8e8df73a29d094", "referenceCount": 64, "citationCount": 472, "influentialCitationCount": 82, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Knowledge Distillation in Iterative Generative Models for Improved Sampling Speed", "abstract": "Iterative generative models, such as noise conditional score networks and denoising diffusion probabilistic models, produce high quality samples by gradually denoising an initial noise vector. However, their denoising process has many steps, making them 2-3 orders of magnitude slower than other generative models such as GANs and VAEs. In this paper, we establish a novel connection between knowledge distillation and image generation with a technique that distills a multi-step denoising process into a single step, resulting in a sampling speed similar to other single-step generative models. Our Denoising Student generates high quality samples comparable to GANs on the CIFAR-10 and CelebA datasets, without adversarial training. We demonstrate that our method scales to higher resolutions through experiments on 256 x 256 LSUN. Code and checkpoints are available at https://github.com/tcl9876/Denoising_Student", "year": 2021, "venue": "arXiv.org", "authors": [ "Eric Luhman", "Troy Luhman" ], "externalIds": { "ArXiv": "2101.02388", "DBLP": "journals/corr/abs-2101-02388", "CorpusId": 230799531 }, "url": "https://www.semanticscholar.org/paper/ba2f73db4e38324f751fbf30f7dde0bf4e7fa520", "referenceCount": 47, "citationCount": 187, "influentialCitationCount": 11, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Score-Based Generative Modeling through Stochastic Differential Equations", "abstract": "Creating noise from data is easy; creating data from noise is generative modeling. We present a stochastic differential equation (SDE) that smoothly transforms a complex data distribution to a known prior distribution by slowly injecting noise, and a corresponding reverse-time SDE that transforms the prior distribution back into the data distribution by slowly removing the noise. Crucially, the reverse-time SDE depends only on the time-dependent gradient field (\\aka, score) of the perturbed data distribution. By leveraging advances in score-based generative modeling, we can accurately estimate these scores with neural networks, and use numerical SDE solvers to generate samples. We show that this framework encapsulates previous approaches in score-based generative modeling and diffusion probabilistic modeling, allowing for new sampling procedures and new modeling capabilities. In particular, we introduce a predictor-corrector framework to correct errors in the evolution of the discretized reverse-time SDE. We also derive an equivalent neural ODE that samples from the same distribution as the SDE, but additionally enables exact likelihood computation, and improved sampling efficiency. In addition, we provide a new way to solve inverse problems with score-based models, as demonstrated with experiments on class-conditional generation, image inpainting, and colorization. Combined with multiple architectural improvements, we achieve record-breaking performance for unconditional image generation on CIFAR-10 with an Inception score of 9.89 and FID of 2.20, a competitive likelihood of 2.99 bits/dim, and demonstrate high fidelity generation of 1024 x 1024 images for the first time from a score-based generative model.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Yang Song", "Jascha Narain Sohl-Dickstein", "Diederik P. Kingma", "Abhishek Kumar", "Stefano Ermon", "Ben Poole" ], "externalIds": { "DBLP": "journals/corr/abs-2011-13456", "ArXiv": "2011.13456", "MAG": "3110257065", "CorpusId": 227209335 }, "url": "https://www.semanticscholar.org/paper/633e2fbfc0b21e959a244100937c5853afca4853", "referenceCount": 66, "citationCount": 4108, "influentialCitationCount": 956, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Denoising Diffusion Implicit Models", "abstract": "Denoising diffusion probabilistic models (DDPMs) have achieved high quality image generation without adversarial training, yet they require simulating a Markov chain for many steps to produce a sample. To accelerate sampling, we present denoising diffusion implicit models (DDIMs), a more efficient class of iterative implicit probabilistic models with the same training procedure as DDPMs. In DDPMs, the generative process is defined as the reverse of a Markovian diffusion process. We construct a class of non-Markovian diffusion processes that lead to the same training objective, but whose reverse process can be much faster to sample from. We empirically demonstrate that DDIMs can produce high quality samples $10 \\times$ to $50 \\times$ faster in terms of wall-clock time compared to DDPMs, allow us to trade off computation for sample quality, and can perform semantically meaningful image interpolation directly in the latent space.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Jiaming Song", "Chenlin Meng", "Stefano Ermon" ], "externalIds": { "ArXiv": "2010.02502", "DBLP": "journals/corr/abs-2010-02502", "MAG": "3092442149", "CorpusId": 222140788 }, "url": "https://www.semanticscholar.org/paper/014576b866078524286802b1d0e18628520aa886", "referenceCount": 47, "citationCount": 4422, "influentialCitationCount": 877, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Improved Techniques for Training Score-Based Generative Models", "abstract": "Score-based generative models can produce high quality image samples comparable to GANs, without requiring adversarial optimization. However, existing training procedures are limited to images of low resolution (typically below 32x32), and can be unstable under some settings. We provide a new theoretical analysis of learning and sampling from score models in high dimensional spaces, explaining existing failure modes and motivating new solutions that generalize across datasets. To enhance stability, we also propose to maintain an exponential moving average of model weights. With these improvements, we can effortlessly scale score-based generative models to images with unprecedented resolutions ranging from 64x64 to 256x256. Our score-based models can generate high-fidelity samples that rival best-in-class GANs on various image datasets, including CelebA, FFHQ, and multiple LSUN categories.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Yang Song", "Stefano Ermon" ], "externalIds": { "DBLP": "conf/nips/0011E20", "ArXiv": "2006.09011", "MAG": "3035384201", "CorpusId": 219708245 }, "url": "https://www.semanticscholar.org/paper/1156e277fa7ec195b043161d3c5c97715da17658", "referenceCount": 32, "citationCount": 904, "influentialCitationCount": 112, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Conditional Sampling with Monotone GANs: From Generative Models to Likelihood-Free Inference", "abstract": "We present a novel framework for conditional sampling of probability measures, using block triangular transport maps. We develop the theoretical foundations of block triangular transport in a Banach space setting, establishing general conditions under which conditional sampling can be achieved and drawing connections between monotone block triangular maps and optimal transport. Based on this theory, we then introduce a computational approach, called monotone generative adversarial networks (M-GANs), to learn suitable block triangular maps. Our algorithm uses only samples from the underlying joint probability measure and is hence likelihood-free. Numerical experiments with M-GAN demonstrate accurate sampling of conditional measures in synthetic examples, Bayesian inverse problems involving ordinary and partial differential equations, and probabilistic image in-painting.", "year": 2020, "venue": "SIAM/ASA J. Uncertain. Quantification", "authors": [ "R. Baptista", "Bamdad Hosseini", "Nikola B. Kovachki", "Y. Marzouk" ], "externalIds": { "DBLP": "journals/juq/BaptistaHKM24", "ArXiv": "2006.06755", "DOI": "10.1137/23m1581546", "CorpusId": 259088624 }, "url": "https://www.semanticscholar.org/paper/377960f553583c2f73e8866dbab50cda834939a5", "referenceCount": 113, "citationCount": 17, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Learning Likelihoods with Conditional Normalizing Flows", "abstract": "Normalizing Flows (NFs) are able to model complicated distributions p(y) with strong inter-dimensional correlations and high multimodality by transforming a simple base density p(z) through an invertible neural network under the change of variables formula. Such behavior is desirable in multivariate structured prediction tasks, where handcrafted per-pixel loss-based methods inadequately capture strong correlations between output dimensions. We present a study of conditional normalizing flows (CNFs), a class of NFs where the base density to output space mapping is conditioned on an input x, to model conditional densities p(y|x). CNFs are efficient in sampling and inference, they can be trained with a likelihood-based objective, and CNFs, being generative flows, do not suffer from mode collapse or training instabilities. We provide an effective method to train continuous CNFs for binary problems and in particular, we apply these CNFs to super-resolution and vessel segmentation tasks demonstrating competitive performance on standard benchmark datasets in terms of likelihood and conventional metrics.", "year": 2019, "venue": "arXiv.org", "authors": [ "Christina Winkler", "Daniel E. Worrall", "Emiel Hoogeboom", "M. Welling" ], "externalIds": { "ArXiv": "1912.00042", "DBLP": "journals/corr/abs-1912-00042", "MAG": "2991001610", "CorpusId": 208527755 }, "url": "https://www.semanticscholar.org/paper/1efc6f54d8ce68fb38382bb2e33d747c4af9badf", "referenceCount": 52, "citationCount": 197, "influentialCitationCount": 12, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Conditional density estimation tools in python and R with applications to photometric redshifts and likelihood-free cosmological inference", "abstract": null, "year": 2019, "venue": "Astronomy and Computing", "authors": [ "Niccolò Dalmasso", "T. Pospisil", "Ann B. Lee", "Rafael Izbicki", "P. Freeman", "A. Malz" ], "externalIds": { "ArXiv": "1908.11523", "MAG": "2999839472", "DBLP": "journals/ascom/DalmassoPLIFM20", "DOI": "10.1016/j.ascom.2019.100362", "CorpusId": 201698434 }, "url": "https://www.semanticscholar.org/paper/24e2c4a198a7d08522211aa939afa83130f139c9", "referenceCount": 84, "citationCount": 38, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Physics", "Mathematics" ] }, { "title": "Applied Stochastic Differential Equations", "abstract": "The topic of this book is stochastic differential equations (SDEs). As their name suggests, they really are differential equations that produce a different “answer” or solution trajectory each time they are solved. This peculiar behaviour gives them properties that are useful in modeling of uncertainties in a wide range of applications, but at the same time it complicates the rigorous mathematical treatment of SDEs. The emphasis of the book is on applied rather than theoretical aspects of SDEs and, therefore, we have chosen to structure the book in a way that we believe supports learning SDEs from an applied point of view. In the following, we briefly outline the purposes of each of the remaining chapters and explain how the chapters are connected to each other. In the chapters, we have attempted to provide a wide selection of examples of the practical application of theoretical and methodological results. Each chapter (except for the Introduction and Epilogue) also contains a representative set of analytic and hands-on exercises that can be used for testing and deepening understanding of the topics. Chapter 2 is a brief outline of concepts and solutions methods for deterministic ordinary differential equations (ODEs). We especially emphasize solution methods for linear ODEs, because the methods translate quite easily to SDEs. We also examine commonly used numerical methods such as the Euler method and Runge–Kutta methods, which we extend to SDEs in the later chapters. Chapter 3 starts with a number of motivating examples of SDEs found in physics, engineering, finance, and other applications. It turns out that in a modeling sense, SDEs can be regarded as noise-driven ODEs, but this notion should not be taken too far. The aim of the rest of the chapter is to show where things start to go wrong. Roughly speaking, with linear SDEs we are quite safe with this kind of thinking, but anything beyond them will not work.", "year": 2019, "venue": "", "authors": [ "Simo Särkkä", "A. Solin" ], "externalIds": { "MAG": "2940934113", "DOI": "10.1017/9781108186735", "CorpusId": 17740616 }, "url": "https://www.semanticscholar.org/paper/c54b0ed93d2b3b3b00e07cdc94dbf19df53d5972", "referenceCount": 179, "citationCount": 374, "influentialCitationCount": 66, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Style-Based Generator Architecture for Generative Adversarial Networks", "abstract": "We propose an alternative generator architecture for generative adversarial networks, borrowing from style transfer literature. The new architecture leads to an automatically learned, unsupervised separation of high-level attributes (e.g., pose and identity when trained on human faces) and stochastic variation in the generated images (e.g., freckles, hair), and it enables intuitive, scale-specific control of the synthesis. The new generator improves the state-of-the-art in terms of traditional distribution quality metrics, leads to demonstrably better interpolation properties, and also better disentangles the latent factors of variation. To quantify interpolation quality and disentanglement, we propose two new, automated methods that are applicable to any generator architecture. Finally, we introduce a new, highly varied and high-quality dataset of human faces.", "year": 2018, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Tero Karras", "S. Laine", "Timo Aila" ], "externalIds": { "DBLP": "journals/corr/abs-1812-04948", "MAG": "2904367110", "ArXiv": "1812.04948", "DOI": "10.1109/CVPR.2019.00453", "CorpusId": 54482423 }, "url": "https://www.semanticscholar.org/paper/ceb2ebef0b41e31c1a21b28c2734123900c005e2", "referenceCount": 65, "citationCount": 8967, "influentialCitationCount": 1761, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Converting High-Dimensional Regression to High-Dimensional Conditional Density Estimation", "abstract": "There is a growing demand for nonparametric conditional density estimators (CDEs) in fields such as astronomy and economics. In astronomy, for example, one can dramatically improve estimates of the parameters that dictate the evolution of the Universe by working with full conditional densities instead of regression (i.e., conditional mean) estimates. More generally, standard regression falls short in any prediction problem where the distribution of the response is more complex with multi-modality, asymmetry or heteroscedastic noise. Nevertheless, much of the work on high-dimensional inference concerns regression and classification only, whereas research on density estimation has lagged behind. Here we propose FlexCode, a fully nonparametric approach to conditional density estimation that reformulates CDE as a non-parametric orthogonal series problem where the expansion coefficients are estimated by regression. By taking such an approach, one can efficiently estimate conditional densities and not just expectations in high dimensions by drawing upon the success in high-dimensional regression. Depending on the choice of regression procedure, our method can adapt to a variety of challenging high-dimensional settings with different structures in the data (e.g., a large number of irrelevant components and nonlinear manifold structure) as well as different data types (e.g., functional data, mixed data types and sample sets). We study the theoretical and empirical performance of our proposed method, and we compare our approach with traditional conditional density estimators on simulated as well as real-world data, such as photometric galaxy data, Twitter data, and line-of-sight velocities in a galaxy cluster.", "year": 2017, "venue": "", "authors": [ "Rafael Izbicki", "Ann B. Lee" ], "externalIds": { "MAG": "2609566741", "ArXiv": "1704.08095", "DOI": "10.1214/17-EJS1302", "CorpusId": 51767129 }, "url": "https://www.semanticscholar.org/paper/c215beb6ce1740c5a641c1d9a1ce4a0ca7937750", "referenceCount": 66, "citationCount": 69, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Nonparametric Conditional Density Estimation in a High-Dimensional Regression Setting", "abstract": "In some applications (e.g., in cosmology and economics), the regression is not adequate to represent the association between a predictor x and a response Z because of multi-modality and asymmetry of f(z|x); using the full density instead of a single-point estimate can then lead to less bias in subsequent analysis. As of now, there are no effective ways of estimating f(z|x) when x represents high-dimensional, complex data. In this article, we propose a new nonparametric estimator of f(z|x) that adapts to sparse (low-dimensional) structure in x. By directly expanding f(z|x) in the eigenfunctions of a kernel-based operator, we avoid tensor products in high dimensions as well as ratios of estimated densities. Our basis functions are orthogonal with respect to the underlying data distribution, allowing fast implementation and tuning of parameters. We derive rates of convergence and show that the method adapts to the intrinsic dimension of the data. We also demonstrate the effectiveness of the series method on images, spectra, and an application to photometric redshift estimation of galaxies. Supplementary materials for this article are available online.", "year": 2016, "venue": "", "authors": [ "Rafael Izbicki", "Ann B. Lee" ], "externalIds": { "MAG": "2190216678", "ArXiv": "1604.00540", "DOI": "10.1080/10618600.2015.1094393", "CorpusId": 2886739 }, "url": "https://www.semanticscholar.org/paper/7084fbc8252bc7e5abbd74c5bfe78cce749cf839", "referenceCount": 59, "citationCount": 55, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Learning Structured Output Representation using Deep Conditional Generative Models", "abstract": "Supervised deep learning has been successfully applied to many recognition problems. Although it can approximate a complex many-to-one function well when a large amount of training data is provided, it is still challenging to model complex structured output representations that effectively perform probabilistic inference and make diverse predictions. In this work, we develop a deep conditional generative model for structured output prediction using Gaussian latent variables. The model is trained efficiently in the framework of stochastic gradient variational Bayes, and allows for fast prediction using stochastic feed-forward inference. In addition, we provide novel strategies to build robust structured prediction algorithms, such as input noise-injection and multi-scale prediction objective at training. In experiments, we demonstrate the effectiveness of our proposed algorithm in comparison to the deterministic deep neural network counterparts in generating diverse but realistic structured output predictions using stochastic inference. Furthermore, the proposed training methods are complimentary, which leads to strong pixel-level object segmentation and semantic labeling performance on Caltech-UCSD Birds 200 and the subset of Labeled Faces in the Wild dataset.", "year": 2015, "venue": "Neural Information Processing Systems", "authors": [ "Kihyuk Sohn", "Honglak Lee", "Xinchen Yan" ], "externalIds": { "MAG": "2188365844", "DBLP": "conf/nips/SohnLY15", "CorpusId": 13936837 }, "url": "https://www.semanticscholar.org/paper/3f25e17eb717e5894e0404ea634451332f85d287", "referenceCount": 37, "citationCount": 2837, "influentialCitationCount": 478, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep Unsupervised Learning using Nonequilibrium Thermodynamics", "abstract": "A central problem in machine learning involves modeling complex data-sets using highly flexible families of probability distributions in which learning, sampling, inference, and evaluation are still analytically or computationally tractable. Here, we develop an approach that simultaneously achieves both flexibility and tractability. The essential idea, inspired by non-equilibrium statistical physics, is to systematically and slowly destroy structure in a data distribution through an iterative forward diffusion process. We then learn a reverse diffusion process that restores structure in data, yielding a highly flexible and tractable generative model of the data. This approach allows us to rapidly learn, sample from, and evaluate probabilities in deep generative models with thousands of layers or time steps, as well as to compute conditional and posterior probabilities under the learned model. We additionally release an open source reference implementation of the algorithm.", "year": 2015, "venue": "International Conference on Machine Learning", "authors": [ "Jascha Narain Sohl-Dickstein", "Eric A. Weiss", "Niru Maheswaranathan", "S. Ganguli" ], "externalIds": { "MAG": "2129069237", "DBLP": "journals/corr/Sohl-DicksteinW15", "ArXiv": "1503.03585", "CorpusId": 14888175 }, "url": "https://www.semanticscholar.org/paper/2dcef55a07f8607a819c21fe84131ea269cc2e3c", "referenceCount": 60, "citationCount": 4757, "influentialCitationCount": 356, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Generative adversarial networks", "abstract": "Generative adversarial networks are a kind of artificial intelligence algorithm designed to solve the generative modeling problem. The goal of a generative model is to study a collection of training examples and learn the probability distribution that generated them. Generative Adversarial Networks (GANs) are then able to generate more examples from the estimated probability distribution. Generative models based on deep learning are common, but GANs are among the most successful generative models (especially in terms of their ability to generate realistic high-resolution images). GANs have been successfully applied to a wide variety of tasks (mostly in research settings) but continue to present unique challenges and research opportunities because they are based on game theory while most other approaches to generative modeling are based on optimization.", "year": 2014, "venue": "Communications of the ACM", "authors": [ "I. Goodfellow", "Jean Pouget-Abadie", "Mehdi Mirza", "Bing Xu", "David Warde-Farley", "Sherjil Ozair", "Aaron C. Courville", "Yoshua Bengio" ], "externalIds": { "DBLP": "journals/corr/GoodfellowPMXWOCB14", "MAG": "3096831136", "DOI": "10.1145/3422622", "CorpusId": 1033682 }, "url": "https://www.semanticscholar.org/paper/13bc4e683075bdd6a3f0155241c276a772d4aa06", "referenceCount": 157, "citationCount": 28293, "influentialCitationCount": 3061, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "A survey of the Schr\\\"odinger problem and some of its connections with optimal transport", "abstract": "This article is aimed at presenting the Schr\\\"odinger problem and some of its connections with optimal transport. We hope that it can be used as a basic user's guide to Schr\\\"odinger problem. We also give a survey of the related literature. In addition, some new results are proved.", "year": 2013, "venue": "", "authors": [ "Christian L'eonard" ], "externalIds": { "ArXiv": "1308.0215", "MAG": "2028981829", "DOI": "10.3934/DCDS.2014.34.1533", "CorpusId": 14387241 }, "url": "https://www.semanticscholar.org/paper/6609f2a04229c01f939151e6398c28c86b9ca989", "referenceCount": 86, "citationCount": 465, "influentialCitationCount": 69, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Approximating conditional distribution functions using dimension reduction", "abstract": "Motivated by applications to prediction and forecasting, we sug- gest methods for approximating the conditional distribution function of a random variable Y given a dependent random d-vector X. The idea is to estimate not the distribution of Y |X, but that of Y |� T X, where the unit vectoris selected so that the approximation is opti- mal under a least-squares criterion. We show thatmay be estimated root-n consistently. Furthermore, estimation of the conditional distri- bution function of Y , givenT X, has the same first-order asymptotic properties that it would enjoy ifwere known. The proposed method is illustrated using both simulated and real-data examples, showing its effectiveness for both independent datasets and data from time series. Numerical work corroborates the theoretical result thatcan be estimated particularly accurately.", "year": 2005, "venue": "", "authors": [ "P. Hall", "Q. Yao" ], "externalIds": { "ArXiv": "math/0507432", "MAG": "2016351318", "DOI": "10.1214/009053604000001282", "CorpusId": 15484971 }, "url": "https://www.semanticscholar.org/paper/2dd67a0ffc9bd67f687195f8a9dcbaac49537d66", "referenceCount": 36, "citationCount": 64, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "A crossvalidation method for estimating conditional densities", "abstract": "We extend the idea of crossvalidation to choose the smoothing parameters of the 'double-kernel' local linear regression for estimating a conditional density. Our selection rule optimises the estimated conditional density function by minimising the integrated squared error. We also discuss three other bandwidth selection rules, an ad hoc method used by Fan et al. (1996), a bootstrap method of Hall et al. (1999) for bandwidth selection in the estimation of conditional distribution functions, modified by Bashtannyk & Hyndman (2001) to cover conditional density functions, and finally a simple approach proposed by Hyndman & Yao (2002). The performance of the new approach is compared with these three methods by simulation studies, and our method performs outstandingly well. The method is illustrated by an application to estimating the transition density and the Value-at-Risk of treasury-bill data. Copyright 2004, Oxford University Press.", "year": 2004, "venue": "", "authors": [ "Jianqing Fan", "Tsz Ho Yim" ], "externalIds": { "MAG": "2005453967", "DOI": "10.1093/BIOMET/91.4.819", "CorpusId": 6037178 }, "url": "https://www.semanticscholar.org/paper/f78989af4bef999aaf96f1c6bcc43f79e2885208", "referenceCount": 33, "citationCount": 151, "influentialCitationCount": 14, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Probability Densities with Given Marginals", "abstract": null, "year": 1968, "venue": "", "authors": [ "S. Kullback" ], "externalIds": { "MAG": "1964838569", "DOI": "10.1214/AOMS/1177698249", "CorpusId": 120056206 }, "url": "https://www.semanticscholar.org/paper/3f3b26104af6b1556f72739bab213ce68e8299ae", "referenceCount": 6, "citationCount": 128, "influentialCitationCount": 13, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "A Relationship Between Arbitrary Positive Matrices and Doubly Stochastic Matrices", "abstract": null, "year": 1964, "venue": "", "authors": [ "Richard Sinkhorn" ], "externalIds": { "MAG": "1990283121", "DOI": "10.1214/AOMS/1177703591", "CorpusId": 120846714 }, "url": "https://www.semanticscholar.org/paper/441f33fab0614fa0696be54a046cbc692b7e70a2", "referenceCount": 0, "citationCount": 1061, "influentialCitationCount": 105, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Diffusion schr¨odinger bridge matching", "abstract": null, "year": 2023, "venue": "arXiv", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "AUTO-ENCODING VARIATIONAL BAYES", "abstract": "To make decisions based on a model fit by Auto-Encoding Variational Bayes (AEVB), practitioners typically use importance sampling to estimate a functional of the posterior distribution. The variational distribution found by AEVB serves as the proposal distribution for importance sampling. However, this proposal distribution may give unreliable (high variance) importance sampling estimates, thus leading to poor decisions. We explore how changing the objective function for learning the variational distribution, while continuing to learn the generative model based on the ELBO, affects the quality of downstream decisions. For a particular model, we characterize the error of importance sampling as a function of posterior variance and show that proposal distributions learned with evidence upper bounds are better. Motivated by these theoretical results, we propose a novel variant of the VAE. In addition to experimenting with MNIST, we present a full-fledged application of the proposed method to single-cell RNA sequencing. In this challenging instance of multiple hypothesis testing, the proposed method surpasses the current state of the art.", "year": 2020, "venue": "", "authors": [ "Romain Lopez", "Pierre Boyeau", "N. Yosef", "Michael I. Jordan", "J. Regier" ], "externalIds": { "CorpusId": 211146177 }, "url": "https://www.semanticscholar.org/paper/ef4f5a50837a7c1b3e87b9300ffc7ba00d461a0f", "referenceCount": 53, "citationCount": 11952, "influentialCitationCount": 1706, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Scalable Reversible Generative Models with Free-form Continuous Dynamics", "abstract": "A promising class of generative models maps points from a simple distribution to a complex distribution through an invertible neural network. Likelihood-based training of these models requires restricting their architectures to allow cheap computation of Jacobian determinants. Alternatively, the Jacobian trace can be used if the transformation is specified by an ordinary differential equation. In this paper, we use Hutchinson’s trace estimator to give a scalable unbiased estimate of the log-density. The result is a continuous-time invertible generative model with unbiased density estimation and one-pass sampling, while allowing unrestricted neural network architectures. We demonstrate our approach on highdimensional density estimation, image generation, and variational inference, achieving the state-of-the-art among exact likelihood methods with efficient sampling.", "year": 2018, "venue": "", "authors": [ "Will Grathwohl" ], "externalIds": { "CorpusId": 211221927 }, "url": "https://www.semanticscholar.org/paper/1ed1b8fa1a57b860701071300dc6a9c441db6866", "referenceCount": 20, "citationCount": 107, "influentialCitationCount": 22, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "UCI machine learning repository", "abstract": null, "year": 2017, "venue": "http://archive.ics.uci", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Copula theory and its applications : proceedings of the workshop held in Warsaw, 25-26 September 2009", "abstract": "Surveys.- Copula Theory: An Introduction.- Dynamic Modeling of Dependence in Finance via Copulae Between Stochastic Processes.- Copula Estimation.- Pair-Copula Constructions of Multivariate Copulas.- Risk Aggregation.- Extreme-Value Copulas.- Construction and Sampling of Nested Archimedean Copulas.- Tail Behaviour of Copulas.- Copulae in Reliability Theory (Order Statistics, Coherent Systems).- Copula-Based Measures of Multivariate Association.- Semi-copulas and Interpretations of Coincidences Between Stochastic Dependence and Ageing.- Contributed Papers.- A Copula-Based Model for Spatial and Temporal Dependence of Equity Markets.- Nonparametric and Semiparametric Bivariate Modeling of Petrophysical Porosity-Permeability Dependence from Well Log Data.- Testing Under the Extended Koziol-Green Model.- Parameter Estimation and Application of the Multivariate Skew t-Copula.- On Analytical Similarities of Archimedean and Exchangeable Marshall-Olkin Copulas.- Relationships Between Archimedean Copulas and Morgenstern Utility Functions.", "year": 2010, "venue": "", "authors": [ "A. Jaworski", "F. Durante", "W. Härdle", "T. Rychlik" ], "externalIds": { "MAG": "631499667", "CorpusId": 118005739 }, "url": "https://www.semanticscholar.org/paper/c3e77235e9b4a3da3f4a36f7fe0fe38e90862fbe", "referenceCount": 0, "citationCount": 34, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "MNIST handwritten digit database", "abstract": null, "year": 2010, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "A stochastic control approach to reciprocal diffusion processes", "abstract": null, "year": 1991, "venue": "", "authors": [ "P. Pra" ], "externalIds": { "MAG": "2023369695", "DOI": "10.1007/BF01442404", "CorpusId": 120438911 }, "url": "https://www.semanticscholar.org/paper/f5fdc3e66379c0320cf9cc5493dd97cd59673f82", "referenceCount": 24, "citationCount": 186, "influentialCitationCount": 30, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Stochastic control and nonequilibrium thermodynamical systems", "abstract": null, "year": 1989, "venue": "", "authors": [ "M. Pavon" ], "externalIds": { "MAG": "2058425167", "DOI": "10.1007/BF01448198", "CorpusId": 121824945 }, "url": "https://www.semanticscholar.org/paper/7ab21702cdebc5ac592a4a5558e62dd720ffa26f", "referenceCount": 35, "citationCount": 45, "influentialCitationCount": 10, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] } ] }, "An Effective, Robust and Fairness-aware Hate Speech Detection Framework": { "paper_title": "An Effective, Robust and Fairness-aware Hate Speech Detection Framework", "arxiv_id": "2409.17191v1", "keyword": "generative model", "authors": [ "Guanyi Mou", "Kyumin Lee" ], "references": [ { "title": "Reweighting Augmented Samples by Minimizing the Maximal Expected Loss", "abstract": "Data augmentation is an effective technique to improve the generalization of deep neural networks. However, previous data augmentation methods usually treat the augmented samples equally without considering their individual impacts on the model. To address this, for the augmented samples from the same training example, we propose to assign different weights to them. We construct the maximal expected loss which is the supremum over any reweighted loss on augmented samples. Inspired by adversarial training, we minimize this maximal expected loss (MMEL) and obtain a simple and interpretable closed-form solution: more attention should be paid to augmented samples with large loss values (i.e., harder examples). Minimizing this maximal expected loss enables the model to perform well under any reweighting strategy. The proposed method can generally be applied on top of any data augmentation methods. Experiments are conducted on both natural language understanding tasks with token-level data augmentation, and image classification tasks with commonly-used image augmentation techniques like random crop and horizontal flip. Empirical results show that the proposed method improves the generalization performance of the model.", "year": 2021, "venue": "International Conference on Learning Representations", "authors": [ "Mingyang Yi", "Lu Hou", "Lifeng Shang", "Xin Jiang", "Qun Liu", "Zhi-Ming Ma" ], "externalIds": { "DBLP": "journals/corr/abs-2103-08933", "ArXiv": "2103.08933", "CorpusId": 232240244 }, "url": "https://www.semanticscholar.org/paper/601e490881aa6baac15640b0dfcc0e7ad2731475", "referenceCount": 50, "citationCount": 18, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "HateGAN: Adversarial Generative-Based Data Augmentation for Hate Speech Detection", "abstract": "Academia and industry have developed machine learning and natural language processing models to detect online hate speech automatically. However, most of these existing methods adopt a supervised approach that heavily depends on labeled datasets for training. This results in the methods’ poor detection performance of the hate speech class as the training datasets are highly imbalanced. In this paper, we propose HateGAN, a deep generative reinforcement learning model, which addresses the challenge of imbalance class by augmenting the dataset with hateful tweets. We conduct extensive experiments to augment two commonly-used hate speech detection datasets with the HateGAN generated tweets. Our experiment results show that HateGAN improves the detection performance of the hate speech class regardless of the classifiers and datasets used in the detection task. Specifically, we observe an average 5% improvement for the hate class F1 scores across all state-of-the-art hate speech classifiers. We also conduct case studies to empirically examine the HateGAN generated hate speeches and show that the generated tweets are diverse, coherent, and relevant to hate speech detection.", "year": 2020, "venue": "International Conference on Computational Linguistics", "authors": [ "Rui Cao", "R. Lee" ], "externalIds": { "MAG": "3118142489", "ACL": "2020.coling-main.557", "DBLP": "conf/coling/CaoL20", "DOI": "10.18653/V1/2020.COLING-MAIN.557", "CorpusId": 227230383 }, "url": "https://www.semanticscholar.org/paper/7d96eaaa71a9556ab3b0c04c691af0b27b769d03", "referenceCount": 44, "citationCount": 32, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Fairness in Uncertainty: Some Limits and Misinterpretations of Actuarial Fairness", "abstract": null, "year": 2020, "venue": "Journal of Business Ethics", "authors": [ "Sylvestre Frezal", "Laurence Barry" ], "externalIds": { "MAG": "2943909615", "DOI": "10.1007/S10551-019-04171-2", "CorpusId": 165046908 }, "url": "https://www.semanticscholar.org/paper/ddd81ff1501b9f968418ca4d643ba4a4c4ee3711", "referenceCount": 52, "citationCount": 28, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Economics" ] }, { "title": "SWE2: SubWord Enriched and Significant Word Emphasized Framework for Hate Speech Detection", "abstract": "Hate speech detection on online social networks has become one of the emerging hot topics in recent years. With the broad spread and fast propagation speed across online social networks, hate speech makes significant impacts on society by increasing prejudice and hurting people. Therefore, there are aroused attention and concern from both industry and academia. In this paper, we address the hate speech problem and propose a novel hate speech detection framework called SWE2, which only relies on the content of messages and automatically identifies hate speech. In particular, our framework exploits both word-level semantic information and sub-word knowledge. It is intuitively persuasive and also practically performs well under a situation with/without character-level adversarial attack. Experimental results show that our proposed model achieves 0.975 accuracy and 0.953 macro F1, outperforming 7 state-of-the-art baselines under no adversarial attack. Our model robustly and significantly performed well under extreme adversarial attack (manipulation of 50% messages), achieving 0.967 accuracy and 0.934 macro F1.", "year": 2020, "venue": "International Conference on Information and Knowledge Management", "authors": [ "Guanyi Mou", "Pengyi Ye", "Kyumin Lee" ], "externalIds": { "MAG": "3094411615", "DBLP": "conf/cikm/MouYL20", "ArXiv": "2409.16673", "DOI": "10.1145/3340531.3411990", "CorpusId": 221855145 }, "url": "https://www.semanticscholar.org/paper/8edd3fd2e7a1d8be6d030cad59be9ec6eade1a83", "referenceCount": 50, "citationCount": 13, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "HABERTOR: An Efficient and Effective Deep Hatespeech Detector", "abstract": "We present our HABERTOR model for detecting hatespeech in large scale user-generated content. Inspired by the recent success of the BERT model, we propose several modifications to BERT to enhance the performance on the downstream hatespeech classification task. HABERTOR inherits BERT's architecture, but is different in four aspects: (i) it generates its own vocabularies and is pre-trained from the scratch using the largest scale hatespeech dataset; (ii) it consists of Quaternion-based factorized components, resulting in a much smaller number of parameters, faster training and inferencing, as well as less memory usage; (iii) it uses our proposed multi-source ensemble heads with a pooling layer for separate input sources, to further enhance its effectiveness; and (iv) it uses a regularized adversarial training with our proposed fine-grained and adaptive noise magnitude to enhance its robustness. Through experiments on the large-scale real-world hatespeech dataset with 1.4M annotated comments, we show that HABERTOR works better than 15 state-of-the-art hatespeech detection methods, including fine-tuning Language Models. In particular, comparing with BERT, our HABERTOR is 4~5 times faster in the training/inferencing phase, uses less than 1/3 of the memory, and has better performance, even though we pre-train it by using less than 1% of the number of words. Our generalizability analysis shows that HABERTOR transfers well to other unseen hatespeech datasets and is a more efficient and effective alternative to BERT for the hatespeech classification.", "year": 2020, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "T. Tran", "Yifan Hu", "Changwei Hu", "Kevin Yen", "Fei Tan", "Kyumin Lee", "Serim Park" ], "externalIds": { "DBLP": "journals/corr/abs-2010-08865", "ArXiv": "2010.08865", "MAG": "3092747808", "ACL": "2020.emnlp-main.606", "DOI": "10.18653/v1/2020.emnlp-main.606", "CorpusId": 224511993 }, "url": "https://www.semanticscholar.org/paper/3ae6450d4d1181b72d1a93aab832afec1b258c03", "referenceCount": 65, "citationCount": 29, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Fairness and Diversity for Rankings in Two-Sided Markets", "abstract": "Ranking items by their probability of relevance has long been the goal of conventional ranking systems. While this maximizes traditional criteria of ranking performance, there is a growing understanding that it is an oversimplification in online platforms that serve not only a diverse user population, but also the producers of the items. In particular, ranking algorithms are expected to be fair in how they serve all groups of users -- not just the majority group -- and they also need to be fair in how they divide exposure among the items. These fairness considerations can partially be met by adding diversity to the rankings, as done in several recent works, but we show in this paper that user fairness, item fairness and diversity are fundamentally different concepts. In particular, we find that algorithms that consider only one of the three desiderata can fail to satisfy and even harm the other two. To overcome this shortcoming, we present the first ranking algorithm that explicitly enforces all three desiderata. The algorithm optimizes user and item fairness as a convex optimization problem which can be solved optimally. From its solution, a ranking policy can be derived via a new Birkhoff-von Neumann decomposition algorithm that optimizes diversity. Beyond the theoretical analysis, we provide a comprehensive empirical evaluation on a new benchmark dataset to show the effectiveness of the proposed ranking algorithm on controlling the three desiderata and the interplay between them.", "year": 2020, "venue": "arXiv.org", "authors": [ "Lequn Wang", "T. Joachims" ], "externalIds": { "MAG": "3089906377", "DBLP": "journals/corr/abs-2010-01470", "CorpusId": 222134124 }, "url": "https://www.semanticscholar.org/paper/2563be4545252c73354c8970d82ec41d3149a1d0", "referenceCount": 70, "citationCount": 11, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Simple but Tough-to-Beat Data Augmentation Approach for Natural Language Understanding and Generation", "abstract": "Adversarial training has been shown effective at endowing the learned representations with stronger generalization ability. However, it typically requires expensive computation to determine the direction of the injected perturbations. In this paper, we introduce a set of simple yet effective data augmentation strategies dubbed cutoff, where part of the information within an input sentence is erased to yield its restricted views (during the fine-tuning stage). Notably, this process relies merely on stochastic sampling and thus adds little computational overhead. A Jensen-Shannon Divergence consistency loss is further utilized to incorporate these augmented samples into the training objective in a principled manner. To verify the effectiveness of the proposed strategies, we apply cutoff to both natural language understanding and generation problems. On the GLUE benchmark, it is demonstrated that cutoff, in spite of its simplicity, performs on par or better than several competitive adversarial-based approaches. We further extend cutoff to machine translation and observe significant gains in BLEU scores (based upon the Transformer Base model). Moreover, cutoff consistently outperforms adversarial training and achieves state-of-the-art results on the IWSLT2014 German-English dataset.", "year": 2020, "venue": "arXiv.org", "authors": [ "Dinghan Shen", "Ming Zheng", "Yelong Shen", "Yanru Qu", "Weizhu Chen" ], "externalIds": { "MAG": "3089659770", "DBLP": "journals/corr/abs-2009-13818", "ArXiv": "2009.13818", "CorpusId": 221995719 }, "url": "https://www.semanticscholar.org/paper/5a11bd4e678fcb05cb8f5d30c45877fb58bdd3b3", "referenceCount": 46, "citationCount": 121, "influentialCitationCount": 18, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Contextualized Perturbation for Textual Adversarial Attack", "abstract": "Adversarial examples expose the vulnerabilities of natural language processing (NLP) models, and can be used to evaluate and improve their robustness. Existing techniques of generating such examples are typically driven by local heuristic rules that are agnostic to the context, often resulting in unnatural and ungrammatical outputs. This paper presents CLARE, a ContextuaLized AdversaRial Example generation model that produces fluent and grammatical outputs through a mask-then-infill procedure. CLARE builds on a pre-trained masked language model and modifies the inputs in a context-aware manner. We propose three contextualized perturbations, Replace, Insert and Merge, that allow for generating outputs of varied lengths. CLARE can flexibly combine these perturbations and apply them at any position in the inputs, and is thus able to attack the victim model more effectively with fewer edits. Extensive experiments and human evaluation demonstrate that CLARE outperforms the baselines in terms of attack success rate, textual similarity, fluency and grammaticality.", "year": 2020, "venue": "North American Chapter of the Association for Computational Linguistics", "authors": [ "Dianqi Li", "Yizhe Zhang", "Hao Peng", "Liqun Chen", "Chris Brockett", "Ming-Ting Sun", "Bill Dolan" ], "externalIds": { "MAG": "3169948074", "ACL": "2021.naacl-main.400", "DBLP": "journals/corr/abs-2009-07502", "ArXiv": "2009.07502", "DOI": "10.18653/V1/2021.NAACL-MAIN.400", "CorpusId": 221739314 }, "url": "https://www.semanticscholar.org/paper/472cd41fa2ba2e520706f232cae12db4a7b5e60a", "referenceCount": 67, "citationCount": 212, "influentialCitationCount": 38, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Quaternion-Based Self-Attentive Long Short-term User Preference Encoding for Recommendation", "abstract": "Quaternion space has brought several benefits over the traditional Euclidean space: Quaternions (i) consist of a real and three imaginary components, encouraging richer representations; (ii) utilize Hamilton product which better encodes the inter-latent interactions across multiple Quaternion components; and (iii) result in a model with smaller degrees of freedom and less prone to overfitting. Unfortunately, most of the current recommender systems rely on real-valued representations in Euclidean space to model either user's long-term or short-term interests. In this paper, we fully utilize Quaternion space to model both user's long-term and short-term preferences. We first propose a QUaternion-based self-Attentive Long term user Encoding (QUALE) to study the user's long-term intents. Then, we propose a QUaternion-based self-Attentive Short term user Encoding (QUASE) to learn the user's short-term interests. To enhance our models' capability, we propose to fuse QUALE and QUASE into one model, namely QUALSE, by using a Quaternion-based gating mechanism. We further develop Quaternion-based Adversarial learning along with the Bayesian Personalized Ranking (QABPR) to improve our model's robustness. Extensive experiments on six real-world datasets show that our fused QUALSE model outperformed 11 state-of-the-art baselines, improving 8.43% at HIT@1 and 10.27% at NDCG@1 on average compared with the best baseline.", "year": 2020, "venue": "International Conference on Information and Knowledge Management", "authors": [ "T. Tran", "Di You", "Kyumin Lee" ], "externalIds": { "DBLP": "journals/corr/abs-2008-13335", "ArXiv": "2008.13335", "MAG": "3082034020", "DOI": "10.1145/3340531.3411926", "CorpusId": 221376469 }, "url": "https://www.semanticscholar.org/paper/51560f469df9018c1aadc323cb989abe08d62970", "referenceCount": 49, "citationCount": 12, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Evaluation of Neural Architectures Trained with Square Loss vs Cross-Entropy in Classification Tasks", "abstract": "Modern neural architectures for classification tasks are trained using the cross-entropy loss, which is widely believed to be empirically superior to the square loss. In this work we provide evidence indicating that this belief may not be well-founded. We explore several major neural architectures and a range of standard benchmark datasets for NLP, automatic speech recognition (ASR) and computer vision tasks to show that these architectures, with the same hyper-parameter settings as reported in the literature, perform comparably or better when trained with the square loss, even after equalizing computational resources. Indeed, we observe that the square loss produces better results in the dominant majority of NLP and ASR experiments. Cross-entropy appears to have a slight edge on computer vision tasks. \nWe argue that there is little compelling empirical or theoretical evidence indicating a clear-cut advantage to the cross-entropy loss. Indeed, in our experiments, performance on nearly all non-vision tasks can be improved, sometimes significantly, by switching to the square loss. Furthermore, training with square loss appears to be less sensitive to the randomness in initialization. We posit that training using the square loss for classification needs to be a part of best practices of modern deep learning on equal footing with cross-entropy.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Like Hui", "M. Belkin" ], "externalIds": { "MAG": "3034893234", "DBLP": "journals/corr/abs-2006-07322", "ArXiv": "2006.07322", "CorpusId": 219635787 }, "url": "https://www.semanticscholar.org/paper/bcc48c5f68a387c89c75cd80d52ef52284db3c3a", "referenceCount": 49, "citationCount": 145, "influentialCitationCount": 14, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Towards Hate Speech Detection at Large via Deep Generative Modeling", "abstract": "Hate speech detection is a critical problem in social media, being often accused for enabling the spread of hatred and igniting violence. Hate speech detection requires overwhelming computing resources for online monitoring as well as thousands of human experts for daily screening of suspected posts or tweets. Recently, deep learning (DL)-based solutions have been proposed for hate speech detection, using modest-sized datasets of few thousands of sequences. While these methods perform well on the specific datasets, their ability to generalize to new hate speech sequences is limited. Being a data-driven approach, it is known that DL surpasses other methods whenever scale-up in trainset size and diversity is achieved. Therefore, we first present a dataset of 1 million hate and nonhate sequences, produced by a deep generative model. We further utilize the generated data to train a well-studied DL detector, demonstrating significant performance improvements across five hate speech datasets.", "year": 2020, "venue": "IEEE Internet Computing", "authors": [ "Tomer Wullach", "A. Adler", "Einat Minkov" ], "externalIds": { "MAG": "3093890791", "ArXiv": "2005.06370", "DBLP": "journals/internet/WullachAM21", "DOI": "10.1109/MIC.2020.3033161", "CorpusId": 218614055 }, "url": "https://www.semanticscholar.org/paper/d2aef43feecedb79b3cf367a5c0abbd64c4458f9", "referenceCount": 42, "citationCount": 34, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Beyond Accuracy: Behavioral Testing of NLP Models with CheckList", "abstract": "Although measuring held-out accuracy has been the primary approach to evaluate generalization, it often overestimates the performance of NLP models, while alternative approaches for evaluating models either focus on individual tasks or on specific behaviors. Inspired by principles of behavioral testing in software engineering, we introduce CheckList, a task-agnostic methodology for testing NLP models. CheckList includes a matrix of general linguistic capabilities and test types that facilitate comprehensive test ideation, as well as a software tool to generate a large and diverse number of test cases quickly. We illustrate the utility of CheckList with tests for three tasks, identifying critical failures in both commercial and state-of-art models. In a user study, a team responsible for a commercial sentiment analysis model found new and actionable bugs in an extensively tested model. In another user study, NLP practitioners with CheckList created twice as many tests, and found almost three times as many bugs as users without it.", "year": 2020, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Marco Tulio Ribeiro", "Tongshuang Sherry Wu", "Carlos Guestrin", "Sameer Singh" ], "externalIds": { "DBLP": "journals/corr/abs-2005-04118", "ACL": "2020.acl-main.442", "MAG": "3035507081", "ArXiv": "2005.04118", "DOI": "10.18653/v1/2020.acl-main.442", "CorpusId": 218551201 }, "url": "https://www.semanticscholar.org/paper/33ec7eb2168e37e3007d1059aa96b9a63254b4da", "referenceCount": 33, "citationCount": 962, "influentialCitationCount": 150, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "TextAttack: A Framework for Adversarial Attacks, Data Augmentation, and Adversarial Training in NLP", "abstract": "While there has been substantial research using adversarial attacks to analyze NLP models, each attack is implemented in its own code repository. It remains challenging to develop NLP attacks and utilize them to improve model performance. This paper introduces TextAttack, a Python framework for adversarial attacks, data augmentation, and adversarial training in NLP. TextAttack builds attacks from four components: a goal function, a set of constraints, a transformation, and a search method. TextAttack’s modular design enables researchers to easily construct attacks from combinations of novel and existing components. TextAttack provides implementations of 16 adversarial attacks from the literature and supports a variety of models and datasets, including BERT and other transformers, and all GLUE tasks. TextAttack also includes data augmentation and adversarial training modules for using components of adversarial attacks to improve model accuracy and robustness.TextAttack is democratizing NLP: anyone can try data augmentation and adversarial training on any model or dataset, with just a few lines of code. Code and tutorials are available at https://github.com/QData/TextAttack.", "year": 2020, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "John X. Morris", "Eli Lifland", "Jin Yong Yoo", "J. Grigsby", "Di Jin", "Yanjun Qi" ], "externalIds": { "MAG": "3105604018", "DBLP": "conf/emnlp/MorrisLYGJQ20", "ACL": "2020.emnlp-demos.16", "DOI": "10.18653/v1/2020.emnlp-demos.16", "CorpusId": 220714040 }, "url": "https://www.semanticscholar.org/paper/c9b56cb026a38e39bb0228faac57accd6f65e6f7", "referenceCount": 43, "citationCount": 632, "influentialCitationCount": 89, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "BAE: BERT-based Adversarial Examples for Text Classification", "abstract": "Modern text classification models are susceptible to adversarial examples, perturbed versions of the original text indiscernible by humans but which get misclassified by the model. We present BAE, a powerful black box attack for generating grammatically correct and semantically coherent adversarial examples. BAE replaces and inserts tokens in the original text by masking a portion of the text and leveraging a language model to generate alternatives for the masked tokens. Compared to prior work, we show that BAE performs a stronger attack on three widely used models for seven text classification datasets.", "year": 2020, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Siddhant Garg", "Goutham Ramakrishnan" ], "externalIds": { "MAG": "3104423855", "ACL": "2020.emnlp-main.498", "DBLP": "conf/emnlp/GargR20", "ArXiv": "2004.01970", "DOI": "10.18653/v1/2020.emnlp-main.498", "CorpusId": 214802269 }, "url": "https://www.semanticscholar.org/paper/06a427e1688f92053a38c73cb4e0da25177c89e7", "referenceCount": 35, "citationCount": 474, "influentialCitationCount": 69, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "BERT-ATTACK: Adversarial Attack against BERT Using BERT", "abstract": "Adversarial attacks for discrete data (such as text) has been proved significantly more challenging than continuous data (such as image), since it is difficult to generate adversarial samples with gradient-based methods. Currently, the successful attack methods for text usually adopt heuristic replacement strategies on character or word level, which remains challenging to find the optimal solution in the massive space of possible combination of replacements, while preserving semantic consistency and language fluency. In this paper, we propose \\textbf{BERT-Attack}, a high-quality and effective method to generate adversarial samples using pre-trained masked language models exemplified by BERT. We turn BERT against its fine-tuned models and other deep neural models for downstream tasks. Our method successfully misleads the target models to predict incorrectly, outperforming state-of-the-art attack strategies in both success rate and perturb percentage, while the generated adversarial samples are fluent and semantically preserved. Also, the cost of calculation is low, thus possible for large-scale generations.", "year": 2020, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Linyang Li", "Ruotian Ma", "Qipeng Guo", "X. Xue", "Xipeng Qiu" ], "externalIds": { "MAG": "3101449015", "DBLP": "conf/emnlp/LiMGXQ20", "ACL": "2020.emnlp-main.500", "ArXiv": "2004.09984", "DOI": "10.18653/v1/2020.emnlp-main.500", "CorpusId": 216036179 }, "url": "https://www.semanticscholar.org/paper/dc0ce66f5ab4c5173cdef951649044e4c4c05076", "referenceCount": 26, "citationCount": 595, "influentialCitationCount": 126, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Data Augmentation using Pre-trained Transformer Models", "abstract": "Language model based pre-trained models such as BERT have provided significant gains across different NLP tasks. In this paper, we study different types of transformer based pre-trained models such as auto-regressive models (GPT-2), auto-encoder models (BERT), and seq2seq models (BART) for conditional data augmentation. We show that prepending the class labels to text sequences provides a simple yet effective way to condition the pre-trained models for data augmentation. Additionally, on three classification benchmarks, pre-trained Seq2Seq model outperforms other data augmentation methods in a low-resource setting. Further, we explore how different pre-trained model based data augmentation differs in-terms of data diversity, and how well such methods preserve the class-label information.", "year": 2020, "venue": "LIFELONGNLP", "authors": [ "Varun Kumar", "Ashutosh Choudhary", "Eunah Cho" ], "externalIds": { "DBLP": "journals/corr/abs-2003-02245", "ArXiv": "2003.02245", "ACL": "2020.lifelongnlp-1.3", "MAG": "3010293452", "CorpusId": 211987786 }, "url": "https://www.semanticscholar.org/paper/33496cb3a5623925267528fa6b726f015e4dcda2", "referenceCount": 35, "citationCount": 316, "influentialCitationCount": 33, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Adv-BERT: BERT is not robust on misspellings! Generating nature adversarial samples on BERT", "abstract": "There is an increasing amount of literature that claims the brittleness of deep neural networks in dealing with adversarial examples that are created maliciously. It is unclear, however, how the models will perform in realistic scenarios where \\textit{natural rather than malicious} adversarial instances often exist. This work systematically explores the robustness of BERT, the state-of-the-art Transformer-style model in NLP, in dealing with noisy data, particularly mistakes in typing the keyboard, that occur inadvertently. Intensive experiments on sentiment analysis and question answering benchmarks indicate that: (i) Typos in various words of a sentence do not influence equally. The typos in informative words make severer damages; (ii) Mistype is the most damaging factor, compared with inserting, deleting, etc.; (iii) Humans and machines have different focuses on recognizing adversarial attacks.", "year": 2020, "venue": "arXiv.org", "authors": [ "Lichao Sun", "Kazuma Hashimoto", "Wenpeng Yin", "Akari Asai", "Jia Li", "Philip S. Yu", "Caiming Xiong" ], "externalIds": { "DBLP": "journals/corr/abs-2003-04985", "ArXiv": "2003.04985", "MAG": "3011279327", "CorpusId": 212657400 }, "url": "https://www.semanticscholar.org/paper/efe638a32c6bd9ad24a233784008bfe5b33cfc83", "referenceCount": 23, "citationCount": 100, "influentialCitationCount": 10, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Do Not Have Enough Data? Deep Learning to the Rescue!", "abstract": "Based on recent advances in natural language modeling and those in text generation capabilities, we propose a novel data augmentation method for text classification tasks. We use a powerful pre-trained neural network model to artificially synthesize new labeled data for supervised learning. We mainly focus on cases with scarce labeled data. Our method, referred to as language-model-based data augmentation (LAMBADA), involves fine-tuning a state-of-the-art language generator to a specific task through an initial training phase on the existing (usually small) labeled data. Using the fine-tuned model and given a class label, new sentences for the class are generated. Our process then filters these new sentences by using a classifier trained on the original data. In a series of experiments, we show that LAMBADA improves classifiers' performance on a variety of datasets. Moreover, LAMBADA significantly improves upon the state-of-the-art techniques for data augmentation, specifically those applicable to text classification tasks with little data.", "year": 2019, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Ateret Anaby-Tavor", "Boaz Carmeli", "Esther Goldbraich", "Amir Kantor", "George Kour", "Segev Shlomov", "N. Tepper", "Naama Zwerdling" ], "externalIds": { "ArXiv": "1911.03118", "MAG": "2998184481", "DBLP": "conf/aaai/Anaby-TavorCGKK20", "DOI": "10.1609/AAAI.V34I05.6233", "CorpusId": 212821571 }, "url": "https://www.semanticscholar.org/paper/7eba731a7fd8de712b7b79b5af41a6e2d4dbd191", "referenceCount": 45, "citationCount": 316, "influentialCitationCount": 27, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Augment to Prevent: Short-Text Data Augmentation in Deep Learning for Hate-Speech Classification", "abstract": "In this paper, we address the issue of augmenting text data in supervised Natural Language Processing problems, exemplified by deep online hate speech classification. A great challenge in this domain is that although the presence of hate speech can be deleterious to the quality of service provided by social platforms, it still comprises only a tiny fraction of the content that can be found online, which can lead to performance deterioration due to majority class overfitting. To this end, we perform a thorough study on the application of deep learning to the hate speech detection problem: a) we propose three text-based data augmentation techniques aimed at reducing the degree of class imbalance and to maximise the amount of information we can extract from our limited resources and b) we apply them on a selection of top-performing deep architectures and hate speech databases in order to showcase their generalisation properties. The data augmentation techniques are based on a) synonym replacement based on word embedding vector closeness, b) warping of the word tokens along the padded sequence or c) class-conditional, recurrent neural language generation. Our proposed framework yields a significant increase in multi-class hate speech detection, outperforming the baseline in the largest online hate speech database by an absolute 5.7% increase in Macro-F1 score and 30% in hate speech class recall.", "year": 2019, "venue": "International Conference on Information and Knowledge Management", "authors": [ "Georgios Rizos", "Konstantin Hemker", "Björn Schuller" ], "externalIds": { "DBLP": "conf/cikm/RizosHS19", "MAG": "2983149555", "DOI": "10.1145/3357384.3358040", "CorpusId": 207757246 }, "url": "https://www.semanticscholar.org/paper/b46194ac5696379cfa920ff08cda8d7c4cb6579c", "referenceCount": 50, "citationCount": 98, "influentialCitationCount": 9, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Automatic Hate Speech Detection on Social Media: A Brief Survey", "abstract": "Due to the advancement in technology and the explosion of the information age, people communicate with each other indirectly via using the online social networks (OSNs), such as Facebook Snapchat, Instagram, and Twitter. Users of OSNs can post anything without any control or constraint of the content, which leads to increase in spreading of hateful and offensive speech among users, thus resulting in an increase in crimes, murder, and terrorism. Hence, this paper provides a survey and state of the art natural language processing (NLP) technique that is used in automatic detection of the hate speech on OSNs, such as dictionaries, bag-of-words, N-gram etc.", "year": 2019, "venue": "ACS/IEEE International Conference on Computer Systems and Applications", "authors": [ "Ahlam Alrehili" ], "externalIds": { "DBLP": "conf/aiccsa/Alrehili19", "MAG": "3011369415", "DOI": "10.1109/AICCSA47632.2019.9035228", "CorpusId": 213183048 }, "url": "https://www.semanticscholar.org/paper/8dcb2d7fe9ba9f0944dcc033d4e86fcd266fc9f7", "referenceCount": 35, "citationCount": 26, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Atalaya at TASS 2019: Data Augmentation and Robust Embeddings for Sentiment Analysis", "abstract": "In this article we describe our participation in TASS 2019, a shared task aimed at the detection of sentiment polarity of Spanish tweets. We combined different representations such as bag-of-words, bag-of-characters, and tweet embeddings. In particular, we trained robust subword-aware word embeddings and computed tweet representations using a weighted-averaging strategy. We also used two data augmentation techniques to deal with data scarcity: two-way translation augmentation, and instance crossover augmentation, a novel technique that generates new instances by combining halves of tweets. In experiments, we trained linear classifiers and ensemble models, obtaining highly competitive results despite the simplicity of our approaches.", "year": 2019, "venue": "IberLEF@SEPLN", "authors": [ "F. Luque" ], "externalIds": { "MAG": "2971202292", "DBLP": "journals/corr/abs-1909-11241", "ArXiv": "1909.11241", "CorpusId": 199448337 }, "url": "https://www.semanticscholar.org/paper/bb6e5a4e25243b274d5c9e8db46ce5c6402a31a3", "referenceCount": 10, "citationCount": 25, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "TinyBERT: Distilling BERT for Natural Language Understanding", "abstract": "Language model pre-training, such as BERT, has significantly improved the performances of many natural language processing tasks. However, pre-trained language models are usually computationally expensive, so it is difficult to efficiently execute them on resource-restricted devices. To accelerate inference and reduce model size while maintaining accuracy, we first propose a novel Transformer distillation method that is specially designed for knowledge distillation (KD) of the Transformer-based models. By leveraging this new KD method, the plenty of knowledge encoded in a large “teacher” BERT can be effectively transferred to a small “student” TinyBERT. Then, we introduce a new two-stage learning framework for TinyBERT, which performs Transformer distillation at both the pre-training and task-specific learning stages. This framework ensures that TinyBERT can capture the general-domain as well as the task-specific knowledge in BERT. TinyBERT4 with 4 layers is empirically effective and achieves more than 96.8% the performance of its teacher BERT-Base on GLUE benchmark, while being 7.5x smaller and 9.4x faster on inference. TinyBERT4 is also significantly better than 4-layer state-of-the-art baselines on BERT distillation, with only ~28% parameters and ~31% inference time of them. Moreover, TinyBERT6 with 6 layers performs on-par with its teacher BERT-Base.", "year": 2019, "venue": "Findings", "authors": [ "Xiaoqi Jiao", "Yichun Yin", "Lifeng Shang", "Xin Jiang", "Xiao Chen", "Linlin Li", "F. Wang", "Qun Liu" ], "externalIds": { "MAG": "3105966348", "DBLP": "conf/emnlp/JiaoYSJCL0L20", "ArXiv": "1909.10351", "ACL": "2020.findings-emnlp.372", "DOI": "10.18653/v1/2020.findings-emnlp.372", "CorpusId": 202719327 }, "url": "https://www.semanticscholar.org/paper/0cbf97173391b0430140117027edcaf1a37968c7", "referenceCount": 57, "citationCount": 1595, "influentialCitationCount": 256, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Multilingual and Multi-Aspect Hate Speech Analysis", "abstract": "Current research on hate speech analysis is typically oriented towards monolingual and single classification tasks. In this paper, we present a new multilingual multi-aspect hate speech analysis dataset and use it to test the current state-of-the-art multilingual multitask learning approaches. We evaluate our dataset in various classification settings, then we discuss how to leverage our annotations in order to improve hate speech detection and classification in general.", "year": 2019, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "N. Ousidhoum", "Zizheng Lin", "Hongming Zhang", "Yangqiu Song", "D. Yeung" ], "externalIds": { "MAG": "2970269073", "ACL": "D19-1474", "ArXiv": "1908.11049", "DBLP": "conf/emnlp/OusidhoumLZSY19", "DOI": "10.18653/v1/D19-1474", "CorpusId": 201669180 }, "url": "https://www.semanticscholar.org/paper/3b4cc4386893b787f95709ca29015d4e5ef10d64", "referenceCount": 32, "citationCount": 249, "influentialCitationCount": 28, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Hate Speech Detection is Not as Easy as You May Think: A Closer Look at Model Validation", "abstract": "Hate speech is an important problem that is seriously affecting the dynamics and usefulness of online social communities. Large scale social platforms are currently investing important resources into automatically detecting and classifying hateful content, without much success. On the other hand, the results reported by state-of-the-art systems indicate that supervised approaches achieve almost perfect performance but only within specific datasets. In this work, we analyze this apparent contradiction between existing literature and actual applications. We study closely the experimental methodology used in prior work and their generalizability to other datasets. Our findings evidence methodological issues, as well as an important dataset bias. As a consequence, performance claims of the current state-of-the-art have become significantly overestimated. The problems that we have found are mostly related to data overfitting and sampling issues. We discuss the implications for current research and re-conduct experiments to give a more accurate picture of the current state-of-the art methods.", "year": 2019, "venue": "Annual International ACM SIGIR Conference on Research and Development in Information Retrieval", "authors": [ "Aymé Arango", "Jorge Pérez", "Bárbara Poblete" ], "externalIds": { "DBLP": "conf/sigir/ArangoPP19", "MAG": "2954479967", "DOI": "10.1145/3331184.3331262", "CorpusId": 197680325 }, "url": "https://www.semanticscholar.org/paper/f865d782448f9bf1cf3a57c65fb9300bcd86bda4", "referenceCount": 35, "citationCount": 152, "influentialCitationCount": 17, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A General Framework for Uncertainty Estimation in Deep Learning", "abstract": "Neural networks predictions are unreliable when the input sample is out of the training distribution or corrupted by noise. Being able to detect such failures automatically is fundamental to integrate deep learning algorithms into robotics. Current approaches for uncertainty estimation of neural networks require changes to the network and optimization process, typically ignore prior knowledge about the data, and tend to make over-simplifying assumptions which underestimate uncertainty. To address these limitations, we propose a novel framework for uncertainty estimation. Based on Bayesian belief networks and Monte-Carlo sampling, our framework not only fully models the different sources of prediction uncertainty, but also incorporates prior data information, e.g. sensor noise. We show theoretically that this gives us the ability to capture uncertainty better than existing methods. In addition, our framework has several desirable properties: (i) it is agnostic to the network architecture and task; (ii) it does not require changes in the optimization process; (iii) it can be applied to already trained architectures. We thoroughly validate the proposed framework through extensive experiments on both computer vision and control tasks, where we outperform previous methods by up to 23% in accuracy. The video available at https://youtu.be/X7n-bRS5vSM shows qualitative results of our experiments. The project's code is available at: https://tinyurl.com/s3nygw7.", "year": 2019, "venue": "IEEE Robotics and Automation Letters", "authors": [ "Antonio Loquercio", "Mattia Segu", "D. Scaramuzza" ], "externalIds": { "MAG": "3007223254", "DBLP": "journals/corr/abs-1907-06890", "ArXiv": "1907.06890", "DOI": "10.1109/LRA.2020.2974682", "CorpusId": 196831571 }, "url": "https://www.semanticscholar.org/paper/f883e232ef23401b34f65276ee40ce58bab992af", "referenceCount": 46, "citationCount": 250, "influentialCitationCount": 12, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics", "Engineering" ] }, { "title": "CONAN - COunter NArratives through Nichesourcing: a Multilingual Dataset of Responses to Fight Online Hate Speech", "abstract": "Although there is an unprecedented effort to provide adequate responses in terms of laws and policies to hate content on social media platforms, dealing with hatred online is still a tough problem. Tackling hate speech in the standard way of content deletion or user suspension may be charged with censorship and overblocking. One alternate strategy, that has received little attention so far by the research community, is to actually oppose hate content with counter-narratives (i.e. informed textual responses). In this paper, we describe the creation of the first large-scale, multilingual, expert-based dataset of hate-speech/counter-narrative pairs. This dataset has been built with the effort of more than 100 operators from three different NGOs that applied their training and expertise to the task. Together with the collected data we also provide additional annotations about expert demographics, hate and response type, and data augmentation through translation and paraphrasing. Finally, we provide initial experiments to assess the quality of our data.", "year": 2019, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Yi-Ling Chung", "E. Kuzmenko", "Serra Sinem Tekiroğlu", "Marco Guerini" ], "externalIds": { "MAG": "2980290088", "ArXiv": "1910.03270", "ACL": "P19-1271", "DBLP": "journals/corr/abs-1910-03270", "DOI": "10.18653/v1/P19-1271", "CorpusId": 196188987 }, "url": "https://www.semanticscholar.org/paper/1dae97251a05320f5749355baa50387607318832", "referenceCount": 59, "citationCount": 174, "influentialCitationCount": 21, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ARHNet - Leveraging Community Interaction for Detection of Religious Hate Speech in Arabic", "abstract": "The rapid widespread of social media has lead to some undesirable consequences like the rapid increase of hateful content and offensive language. Religious Hate Speech, in particular, often leads to unrest and sometimes aggravates to violence against people on the basis of their religious affiliations. The richness of the Arabic morphology and the limited available resources makes this task especially challenging. The current state-of-the-art approaches to detect hate speech in Arabic rely entirely on textual (lexical and semantic) cues. Our proposed methodology contends that leveraging Community-Interaction can better help us profile hate speech content on social media. Our proposed ARHNet (Arabic Religious Hate Speech Net) model incorporates both Arabic Word Embeddings and Social Network Graphs for the detection of religious hate speech.", "year": 2019, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Arijit Ghosh Chowdhury", "Aniket Didolkar", "Ramit Sawhney", "R. Shah" ], "externalIds": { "MAG": "2949444725", "ACL": "P19-2038", "DBLP": "conf/acl/ChowdhuryDSS19", "DOI": "10.18653/v1/P19-2038", "CorpusId": 196200400 }, "url": "https://www.semanticscholar.org/paper/016a3216fb4b66336331795b948953d394c53896", "referenceCount": 30, "citationCount": 28, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Lightweight and Efficient Neural Natural Language Processing with Quaternion Networks", "abstract": "Many state-of-the-art neural models for NLP are heavily parameterized and thus memory inefficient. This paper proposes a series of lightweight and memory efficient neural architectures for a potpourri of natural language processing (NLP) tasks. To this end, our models exploit computation using Quaternion algebra and hypercomplex spaces, enabling not only expressive inter-component interactions but also significantly (75%) reduced parameter size due to lesser degrees of freedom in the Hamilton product. We propose Quaternion variants of models, giving rise to new architectures such as the Quaternion attention Model and Quaternion Transformer. Extensive experiments on a battery of NLP tasks demonstrates the utility of proposed Quaternion-inspired models, enabling up to 75% reduction in parameter size without significant loss in performance.", "year": 2019, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Yi Tay", "Aston Zhang", "Anh Tuan Luu", "J. Rao", "Shuai Zhang", "Shuohang Wang", "Jie Fu", "S. Hui" ], "externalIds": { "MAG": "2952383053", "DBLP": "journals/corr/abs-1906-04393", "ACL": "P19-1145", "ArXiv": "1906.04393", "DOI": "10.18653/v1/P19-1145", "CorpusId": 184488400 }, "url": "https://www.semanticscholar.org/paper/5240bad304d5e9dd6a7ab1e089e024119ae55567", "referenceCount": 45, "citationCount": 52, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Quaternion Collaborative Filtering for Recommendation", "abstract": "This paper proposes Quaternion Collaborative Filtering (QCF), a novel representation learning method for recommendation. Our proposed QCF relies on and exploits computation with Quaternion algebra, benefiting from the expressiveness and rich representation learning capability of Hamilton products. Quaternion representations, based on hypercomplex numbers, enable rich inter-latent dependencies between imaginary components. This encourages intricate relations to be captured when learning user-item interactions, serving as a strong inductive bias  as compared with the real-space inner product. All in all, we conduct extensive experiments on six real-world datasets, demonstrating the effectiveness of Quaternion algebra in recommender systems. The results exhibit that QCF outperforms a wide spectrum of strong neural baselines on all datasets. Ablative experiments confirm the effectiveness of Hamilton-based composition over multi-embedding composition in real space. ", "year": 2019, "venue": "International Joint Conference on Artificial Intelligence", "authors": [ "Shuai Zhang", "Lina Yao", "Lucas Vinh Tran", "Aston Zhang", "Yi Tay" ], "externalIds": { "MAG": "2948937670", "DBLP": "journals/corr/abs-1906-02594", "ArXiv": "1906.02594", "DOI": "10.24963/ijcai.2019/599", "CorpusId": 174802656 }, "url": "https://www.semanticscholar.org/paper/9ad1bf541b1cd51c40636d25565d9ee2ff1d744e", "referenceCount": 49, "citationCount": 27, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Fair Is Better than Sensational: Man Is to Doctor as Woman Is to Doctor", "abstract": "Analogies such as man is to king as woman is to X are often used to illustrate the amazing power of word embeddings. Concurrently, they have also been used to expose how strongly human biases are encoded in vector spaces trained on natural language, with examples like man is to computer programmer as woman is to homemaker. Recent work has shown that analogies are in fact not an accurate diagnostic for bias, but this does not mean that they are not used anymore, or that their legacy is fading. Instead of focusing on the intrinsic problems of the analogy task as a bias detection tool, we discuss a series of issues involving implementation as well as subjective choices that might have yielded a distorted picture of bias in word embeddings. We stand by the truth that human biases are present in word embeddings, and, of course, the need to address them. But analogies are not an accurate tool to do so, and the way they have been most often used has exacerbated some possibly non-existing biases and perhaps hidden others. Because they are still widely popular, and some of them have become classics within and outside the NLP community, we deem it important to provide a series of clarifications that should put well-known, and potentially new analogies, into the right perspective.", "year": 2019, "venue": "International Conference on Computational Logic", "authors": [ "M. Nissim", "Rik van Noord", "Rob van der Goot" ], "externalIds": { "MAG": "3013547323", "DBLP": "journals/corr/abs-1905-09866", "ArXiv": "1905.09866", "ACL": "2020.cl-2.7", "DOI": "10.1162/coli_a_00379", "CorpusId": 165163511 }, "url": "https://www.semanticscholar.org/paper/4b6f6669060367ae8f58e8a749bde085102f6298", "referenceCount": 34, "citationCount": 92, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Psychology" ] }, { "title": "Fuzzy Multi-task Learning for Hate Speech Type Identification", "abstract": "In traditional machine learning, classifiers training is typically undertaken in the setting of single-task learning, so the trained classifier can discriminate between different classes. However, this must be based on the assumption that different classes are mutually exclusive. In real applications, the above assumption does not always hold. For example, the same book may belong to multiple subjects. From this point of view, researchers were motivated to formulate multi-label learning problems. In this context, each instance can be assigned multiple labels but the classifiers training is still typically undertaken in the setting of single-task learning. When probabilistic approaches are adopted for classifiers training, multi-task learning can be enabled through transformation of a multi-labelled data set into several binary data sets. The above data transformation could usually result in the class imbalance issue. Without the above data transformation, multi-labelling of data results in an exponential increase of the number of classes, leading to fewer instances for each class and a higher difficulty for identifying each class. In addition, multi-labelling of data is very time consuming and expensive in some application areas, such as hate speech detection. In this paper, we introduce a novel formulation of the hate speech type identification problem in the setting of multi-task learning through our proposed fuzzy ensemble approach. In this setting, single-labelled data can be used for semi-supervised multi-label learning and two new metrics (detection rate and irrelevance rate) are thus proposed to measure more effectively the performance for this kind of learning tasks. We report an experimental study on identification of four types of hate speech, namely: religion, race, disability and sexual orientation. The experimental results show that our proposed fuzzy ensemble approach outperforms other popular probabilistic approaches, with an overall detection rate of 0.93.", "year": 2019, "venue": "The Web Conference", "authors": [ "Han Liu", "P. Burnap", "Wafa Alorainy", "M. Williams" ], "externalIds": { "DBLP": "conf/www/0002BAW19", "MAG": "2913698966", "DOI": "10.1145/3308558.3313546", "CorpusId": 86513978 }, "url": "https://www.semanticscholar.org/paper/f5aef44d14484ad006966b84e7a63810731303f8", "referenceCount": 42, "citationCount": 33, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Stereotypical Bias Removal for Hate Speech Detection Task using Knowledge-based Generalizations", "abstract": "With the ever-increasing cases of hate spread on social media platforms, it is critical to design abuse detection mechanisms to pro-actively avoid and control such incidents. While there exist methods for hate speech detection, they stereotype words and hence suffer from inherently biased training. Bias removal has been traditionally studied for structured datasets, but we aim at bias mitigation from unstructured text data. In this paper, we make two important contributions. First, we systematically design methods to quantify the bias for any model and propose algorithms for identifying the set of words which the model stereotypes. Second, we propose novel methods leveraging knowledge-based generalizations for bias-free learning. Knowledge-based generalization provides an effective way to encode knowledge because the abstraction they provide not only generalizes content but also facilitates retraction of information from the hate speech detection classifier, thereby reducing the imbalance. We experiment with multiple knowledge generalization policies and analyze their effect on general performance and in mitigating bias. Our experiments with two real-world datasets, a Wikipedia Talk Pages dataset (WikiDetox) of size ~ 96k and a Twitter dataset of size ~ 24k, show that the use of knowledge-based generalizations results in better performance by forcing the classifier to learn from generalized content. Our methods utilize existing knowledge-bases and can easily be extended to other tasks.", "year": 2019, "venue": "The Web Conference", "authors": [ "Pinkesh Badjatiya", "Manish Gupta", "Vasudeva Varma" ], "externalIds": { "DBLP": "journals/corr/abs-2001-05495", "ArXiv": "2001.05495", "MAG": "2914097099", "DOI": "10.1145/3308558.3313504", "CorpusId": 86446136 }, "url": "https://www.semanticscholar.org/paper/6ef21d5bf92ca5bab0ba304a12454e6d48b6527a", "referenceCount": 23, "citationCount": 96, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Combating Adversarial Misspellings with Robust Word Recognition", "abstract": "To combat adversarial spelling mistakes, we propose placing a word recognition model in front of the downstream classifier. Our word recognition models build upon the RNN semi-character architecture, introducing several new backoff strategies for handling rare and unseen words. Trained to recognize words corrupted by random adds, drops, swaps, and keyboard mistakes, our method achieves 32% relative (and 3.3% absolute) error reduction over the vanilla semi-character model. Notably, our pipeline confers robustness on the downstream classifier, outperforming both adversarial training and off-the-shelf spell checkers. Against a BERT model fine-tuned for sentiment analysis, a single adversarially-chosen character attack lowers accuracy from 90.3% to 45.8%. Our defense restores accuracy to 75%. Surprisingly, better word recognition does not always entail greater robustness. Our analysis reveals that robustness also depends upon a quantity that we denote the sensitivity.", "year": 2019, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Danish Pruthi", "Bhuwan Dhingra", "Zachary Chase Lipton" ], "externalIds": { "MAG": "2952872637", "DBLP": "conf/acl/PruthiDL19", "ACL": "P19-1561", "ArXiv": "1905.11268", "DOI": "10.18653/v1/P19-1561", "CorpusId": 166228669 }, "url": "https://www.semanticscholar.org/paper/162515d87256f13888d9d7ba95275ac4b6c35396", "referenceCount": 30, "citationCount": 283, "influentialCitationCount": 42, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Unsupervised Data Augmentation for Consistency Training", "abstract": "Semi-supervised learning lately has shown much promise in improving deep learning models when labeled data is scarce. Common among recent approaches is the use of consistency training on a large amount of unlabeled data to constrain model predictions to be invariant to input noise. In this work, we present a new perspective on how to effectively noise unlabeled examples and argue that the quality of noising, specifically those produced by advanced data augmentation methods, plays a crucial role in semi-supervised learning. By substituting simple noising operations with advanced data augmentation methods such as RandAugment and back-translation, our method brings substantial improvements across six language and three vision tasks under the same consistency training framework. On the IMDb text classification dataset, with only 20 labeled examples, our method achieves an error rate of 4.20, outperforming the state-of-the-art model trained on 25,000 labeled examples. On a standard semi-supervised learning benchmark, CIFAR-10, our method outperforms all previous approaches and achieves an error rate of 5.43 with only 250 examples. Our method also combines well with transfer learning, e.g., when finetuning from BERT, and yields improvements in high-data regime, such as ImageNet, whether when there is only 10% labeled data or when a full labeled set with 1.3M extra unlabeled examples is used. Code is available at this https URL.", "year": 2019, "venue": "Neural Information Processing Systems", "authors": [ "Qizhe Xie", "Zihang Dai", "E. Hovy", "Minh-Thang Luong", "Quoc V. Le" ], "externalIds": { "MAG": "2962369866", "DBLP": "conf/nips/XieDHL020", "ArXiv": "1904.12848", "CorpusId": 195873898 }, "url": "https://www.semanticscholar.org/paper/0feea94f89d395436bf41bd10c797447eecbc128", "referenceCount": 84, "citationCount": 2066, "influentialCitationCount": 286, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "The Curious Case of Neural Text Degeneration", "abstract": "Despite considerable advancements with deep neural language models, the enigma of neural text degeneration persists when these models are tested as text generators. The counter-intuitive empirical observation is that even though the use of likelihood as training objective leads to high quality models for a broad range of language understanding tasks, using likelihood as a decoding objective leads to text that is bland and strangely repetitive. \nIn this paper, we reveal surprising distributional differences between human text and machine text. In addition, we find that decoding strategies alone can dramatically effect the quality of machine text, even when generated from exactly the same neural language model. Our findings motivate Nucleus Sampling, a simple but effective method to draw the best out of neural generation. By sampling text from the dynamic nucleus of the probability distribution, which allows for diversity while effectively truncating the less reliable tail of the distribution, the resulting text better demonstrates the quality of human text, yielding enhanced diversity without sacrificing fluency and coherence.", "year": 2019, "venue": "International Conference on Learning Representations", "authors": [ "Ari Holtzman", "Jan Buys", "Li Du", "Maxwell Forbes", "Yejin Choi" ], "externalIds": { "DBLP": "journals/corr/abs-1904-09751", "MAG": "2938704169", "ArXiv": "1904.09751", "CorpusId": 127986954 }, "url": "https://www.semanticscholar.org/paper/cf4aa38ae31b43fd07abe13b4ffdb265babb7be1", "referenceCount": 44, "citationCount": 2636, "influentialCitationCount": 488, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "EDA: Easy Data Augmentation Techniques for Boosting Performance on Text Classification Tasks", "abstract": "We present EDA: easy data augmentation techniques for boosting performance on text classification tasks. EDA consists of four simple but powerful operations: synonym replacement, random insertion, random swap, and random deletion. On five text classification tasks, we show that EDA improves performance for both convolutional and recurrent neural networks. EDA demonstrates particularly strong results for smaller datasets; on average, across five datasets, training with EDA while using only 50% of the available training set achieved the same accuracy as normal training with all available data. We also performed extensive ablation studies and suggest parameters for practical use.", "year": 2019, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Jason Wei", "Kai Zou" ], "externalIds": { "ArXiv": "1901.11196", "MAG": "2911588830", "DBLP": "conf/emnlp/WeiZ19", "ACL": "D19-1670", "DOI": "10.18653/v1/D19-1670", "CorpusId": 59523656 }, "url": "https://www.semanticscholar.org/paper/162cad5df347bdac469331df540440b320b5aa21", "referenceCount": 36, "citationCount": 1718, "influentialCitationCount": 265, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Modeling Human Motion with Quaternion-Based Neural Networks", "abstract": null, "year": 2019, "venue": "International Journal of Computer Vision", "authors": [ "Dario Pavllo", "Christoph Feichtenhofer", "Michael Auli", "David Grangier" ], "externalIds": { "DBLP": "journals/ijcv/PavlloFAG20", "MAG": "2979708657", "ArXiv": "1901.07677", "DOI": "10.1007/s11263-019-01245-6", "CorpusId": 59158790 }, "url": "https://www.semanticscholar.org/paper/2549dba602502ced05f25bea41b1e66b5f0bfd40", "referenceCount": 107, "citationCount": 159, "influentialCitationCount": 18, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Measuring and Mitigating Unintended Bias in Text Classification", "abstract": "We introduce and illustrate a new approach to measuring and mitigating unintended bias in machine learning models. Our definition of unintended bias is parameterized by a test set and a subset of input features. We illustrate how this can be used to evaluate text classifiers using a synthetic test set and a public corpus of comments annotated for toxicity from Wikipedia Talk pages. We also demonstrate how imbalances in training data can lead to unintended bias in the resulting models, and therefore potentially unfair applications. We use a set of common demographic identity terms as the subset of input features on which we measure bias. This technique permits analysis in the common scenario where demographic information on authors and readers is unavailable, so that bias mitigation must focus on the content of the text itself. The mitigation method we introduce is an unsupervised approach based on balancing the training dataset. We demonstrate that this approach reduces the unintended bias without compromising overall model quality.", "year": 2018, "venue": "AAAI/ACM Conference on AI, Ethics, and Society", "authors": [ "Lucas Dixon", "John Li", "Jeffrey Scott Sorensen", "Nithum Thain", "Lucy Vasserman" ], "externalIds": { "MAG": "2791170418", "DBLP": "conf/aies/DixonLSTV18", "DOI": "10.1145/3278721.3278729", "CorpusId": 54997157 }, "url": "https://www.semanticscholar.org/paper/44fc8d79fb8e0f8c6c6f680179b5803a789c6227", "referenceCount": 14, "citationCount": 692, "influentialCitationCount": 106, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Conditional BERT Contextual Augmentation", "abstract": null, "year": 2018, "venue": "International Conference on Conceptual Structures", "authors": [ "Xing Wu", "Shangwen Lv", "Liangjun Zang", "Jizhong Han", "Songlin Hu" ], "externalIds": { "DBLP": "conf/iccS/WuLZHH19", "MAG": "2956130159", "ArXiv": "1812.06705", "DOI": "10.1007/978-3-030-22747-0_7", "CorpusId": 56482333 }, "url": "https://www.semanticscholar.org/paper/188024469a2443f262b3cbb5c5d4a96851949d68", "referenceCount": 42, "citationCount": 290, "influentialCitationCount": 39, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "TextBugger: Generating Adversarial Text Against Real-world Applications", "abstract": "Deep Learning-based Text Understanding (DLTU) is the backbone technique behind various applications, including question answering, machine translation, and text classification. Despite its tremendous popularity, the security vulnerabilities of DLTU are still largely unknown, which is highly concerning given its increasing use in security-sensitive applications such as sentiment analysis and toxic content detection. In this paper, we show that DLTU is inherently vulnerable to adversarial text attacks, in which maliciously crafted texts trigger target DLTU systems and services to misbehave. Specifically, we present TextBugger, a general attack framework for generating adversarial texts. In contrast to prior works, TextBugger differs in significant ways: (i) effective -- it outperforms state-of-the-art attacks in terms of attack success rate; (ii) evasive -- it preserves the utility of benign text, with 94.9\\% of the adversarial text correctly recognized by human readers; and (iii) efficient -- it generates adversarial text with computational complexity sub-linear to the text length. We empirically evaluate TextBugger on a set of real-world DLTU systems and services used for sentiment analysis and toxic content detection, demonstrating its effectiveness, evasiveness, and efficiency. For instance, TextBugger achieves 100\\% success rate on the IMDB dataset based on Amazon AWS Comprehend within 4.61 seconds and preserves 97\\% semantic similarity. We further discuss possible defense mechanisms to mitigate such attack and the adversary's potential countermeasures, which leads to promising directions for further research.", "year": 2018, "venue": "Network and Distributed System Security Symposium", "authors": [ "Jinfeng Li", "S. Ji", "Tianyu Du", "Bo Li", "Ting Wang" ], "externalIds": { "MAG": "2963859254", "ArXiv": "1812.05271", "DBLP": "journals/corr/abs-1812-05271", "DOI": "10.14722/ndss.2019.23138", "CorpusId": 54815878 }, "url": "https://www.semanticscholar.org/paper/f91175950edf3804ff1573f570b03db9b108dece", "referenceCount": 40, "citationCount": 653, "influentialCitationCount": 119, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Text Data Augmentation Made Simple By Leveraging NLP Cloud APIs", "abstract": "In practice, it is common to find oneself with far too little text data to train a deep neural network. This \"Big Data Wall\" represents a challenge for minority language communities on the Internet, organizations, laboratories and companies that compete the GAFAM (Google, Amazon, Facebook, Apple, Microsoft). While most of the research effort in text data augmentation aims on the long-term goal of finding end-to-end learning solutions, which is equivalent to \"using neural networks to feed neural networks\", this engineering work focuses on the use of practical, robust, scalable and easy-to-implement data augmentation pre-processing techniques similar to those that are successful in computer vision. Several text augmentation techniques have been experimented. Some existing ones have been tested for comparison purposes such as noise injection or the use of regular expressions. Others are modified or improved techniques like lexical replacement. Finally more innovative ones, such as the generation of paraphrases using back-translation or by the transformation of syntactic trees, are based on robust, scalable, and easy-to-use NLP Cloud APIs. All the text augmentation techniques studied, with an amplification factor of only 5, increased the accuracy of the results in a range of 4.3% to 21.6%, with significant statistical fluctuations, on a standardized task of text polarity prediction. Some standard deep neural network architectures were tested: the multilayer perceptron (MLP), the long short-term memory recurrent network (LSTM) and the bidirectional LSTM (biLSTM). Classical XGBoost algorithm has been tested with up to 2.5% improvements.", "year": 2018, "venue": "arXiv.org", "authors": [ "Claude Coulombe" ], "externalIds": { "MAG": "2903745629", "ArXiv": "1812.04718", "DBLP": "journals/corr/abs-1812-04718", "CorpusId": 54481373 }, "url": "https://www.semanticscholar.org/paper/982aa0ee48a5fd228fb9fb3b3edd319b8af6f76d", "referenceCount": 52, "citationCount": 101, "influentialCitationCount": 6, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Counterfactual Fairness in Text Classification through Robustness", "abstract": "In this paper, we study counterfactual fairness in text classification, which asks the question: How would the prediction change if the sensitive attribute referenced in the example were different? Toxicity classifiers demonstrate a counterfactual fairness issue by predicting that \"Some people are gay\" is toxic while \"Some people are straight\" is nontoxic. We offer a metric, counterfactual token fairness (CTF), for measuring this particular form of fairness in text classifiers, and describe its relationship with group fairness. Further, we offer three approaches, blindness, counterfactual augmentation, and counterfactual logit pairing (CLP), for optimizing counterfactual token fairness during training, bridging the robustness and fairness literature. Empirically, we find that blindness and CLP address counterfactual token fairness. The methods do not harm classifier performance, and have varying tradeoffs with group fairness. These approaches, both for measurement and optimization, provide a new path forward for addressing fairness concerns in text classification.", "year": 2018, "venue": "AAAI/ACM Conference on AI, Ethics, and Society", "authors": [ "Sahaj Garg", "Vincent Perot", "Nicole Limtiaco", "Ankur Taly", "Ed H. Chi", "Alex Beutel" ], "externalIds": { "MAG": "2960301233", "ArXiv": "1809.10610", "DBLP": "journals/corr/abs-1809-10610", "DOI": "10.1145/3306618.3317950", "CorpusId": 52880735 }, "url": "https://www.semanticscholar.org/paper/70e28eb8ee40cf5caa704ac7f87940c0818ba28e", "referenceCount": 27, "citationCount": 244, "influentialCitationCount": 27, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Hate Speech Dataset from a White Supremacy Forum", "abstract": "Hate speech is commonly defined as any communication that disparages a target group of people based on some characteristic such as race, colour, ethnicity, gender, sexual orientation, nationality, religion, or other characteristic. Due to the massive rise of user-generated web content on social media, the amount of hate speech is also steadily increasing. Over the past years, interest in online hate speech detection and, particularly, the automation of this task has continuously grown, along with the societal impact of the phenomenon. This paper describes a hate speech dataset composed of thousands of sentences manually labelled as containing hate speech or not. The sentences have been extracted from Stormfront, a white supremacist forum. A custom annotation tool has been developed to carry out the manual labelling task which, among other things, allows the annotators to choose whether to read the context of a sentence before labelling it. The paper also provides a thoughtful qualitative and quantitative study of the resulting dataset and several baseline experiments with different classification models. The dataset is publicly available.", "year": 2018, "venue": "Workshop on Abusive Language Online", "authors": [ "Ona de Gibert", "Naiara Pérez", "Aitor García-Pablos", "Montse Cuadros" ], "externalIds": { "DBLP": "conf/acl-alw/GibertPPC18", "ArXiv": "1809.04444", "MAG": "2890727387", "ACL": "W18-5102", "DOI": "10.18653/v1/W18-5102", "CorpusId": 52194540 }, "url": "https://www.semanticscholar.org/paper/1083ca53df2fb3df1d0df22fd39966bc9fba7f94", "referenceCount": 38, "citationCount": 387, "influentialCitationCount": 37, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Psychology" ] }, { "title": "Thou shalt not hate: Countering Online Hate Speech", "abstract": "Hate content in social media is ever increasing. While Facebook, Twitter, Google have attempted to take several steps to tackle the hateful content, they have mostly been unsuccessful. Counterspeech is seen as an effective way of tackling the online hate without any harm to the freedom of speech. Thus, an alternative strategy for these platforms could be to promote counterspeech as a defense against hate content. However, in order to have a successful promotion of such counterspeech, one has to have a deep understanding of its dynamics in the online world. Lack of carefully curated data largely inhibits such understanding. In this paper, we create and release the first ever dataset for counterspeech using comments from YouTube. The data contains 13,924 manually annotated comments where the labels indicate whether a comment is a counterspeech or not. This data allows us to perform a rigorous measurement study characterizing the linguistic structure of counterspeech for the first time. This analysis results in various interesting insights such as: the counterspeech comments receive much more likes as compared to the noncounterspeech comments, for certain communities majority of the non-counterspeech comments tend to be hate speech, the different types of counterspeech are not all equally effective and the language choice of users posting counterspeech is largely different from those posting non-counterspeech as revealed by a detailed psycholinguistic analysis. Finally, we build a set of machine learning models that are able to automatically detect counterspeech in YouTube videos with an F1-score of 0.71. We also build multilabel models that can detect different types of counterspeech in a comment with an F1-score of 0.60.", "year": 2018, "venue": "International Conference on Web and Social Media", "authors": [ "Binny Mathew", "Hardik Tharad", "Subham Rajgaria", "Prajwal Singhania", "S. Maity", "Pawan Goyal", "Animesh Mukherjee" ], "externalIds": { "MAG": "2955865060", "DBLP": "journals/corr/abs-1808-04409", "ArXiv": "1808.04409", "DOI": "10.13140/RG.2.2.31128.85765", "CorpusId": 52002120 }, "url": "https://www.semanticscholar.org/paper/41b27301406600fc772c67cc45a2497e5dea0e50", "referenceCount": 41, "citationCount": 151, "influentialCitationCount": 20, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Classification with Fairness Constraints: A Meta-Algorithm with Provable Guarantees", "abstract": "Developing classification algorithms that are fair with respect to sensitive attributes of the data is an important problem due to the increased deployment of classification algorithms in societal contexts. Several recent works have focused on studying classification with respect to specific fairness metrics, modeled the corresponding fair classification problem as constrained optimization problems, and developed tailored algorithms to solve them. Despite this, there still remain important metrics for which there are no fair classifiers with theoretical guarantees; primarily because the resulting optimization problem is non-convex. The main contribution of this paper is a meta-algorithm for classification that can take as input a general class of fairness constraints with respect to multiple non-disjoint and multi-valued sensitive attributes, and which comes with provable guarantees. In particular, our algorithm can handle non-convex \"linear fractional\" constraints (which includes fairness constraints such as predictive parity) for which no prior algorithm was known. Key to our results is an algorithm for a family of classification problems with convex constraints along with a reduction from classification problems with linear fractional constraints to this family. Empirically, we observe that our algorithm is fast, can achieve near-perfect fairness with respect to various fairness metrics, and the loss in accuracy due to the imposed fairness constraints is often small.", "year": 2018, "venue": "FAT", "authors": [ "L. E. Celis", "Lingxiao Huang", "Vijay Keswani", "Nisheeth K. Vishnoi" ], "externalIds": { "DBLP": "conf/fat/CelisHKV19", "ArXiv": "1806.06055", "MAG": "2950458290", "DOI": "10.1145/3287560.3287586", "CorpusId": 49272330 }, "url": "https://www.semanticscholar.org/paper/95e920c2ba4ed19e462d42b2802536a5b35b796b", "referenceCount": 70, "citationCount": 286, "influentialCitationCount": 22, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Quaternion Recurrent Neural Networks", "abstract": "Recurrent neural networks (RNNs) are powerful architectures to model sequential data, due to their capability to learn short and long-term dependencies between the basic elements of a sequence. Nonetheless, popular tasks such as speech or images recognition, involve multi-dimensional input features that are characterized by strong internal dependencies between the dimensions of the input vector. We propose a novel quaternion recurrent neural network (QRNN), alongside with a quaternion long-short term memory neural network (QLSTM), that take into account both the external relations and these internal structural dependencies with the quaternion algebra. Similarly to capsules, quaternions allow the QRNN to code internal dependencies by composing and processing multidimensional features as single entities, while the recurrent operation reveals correlations between the elements composing the sequence. We show that both QRNN and QLSTM achieve better performances than RNN and LSTM in a realistic application of automatic speech recognition. Finally, we show that QRNN and QLSTM reduce by a maximum factor of 3.3x the number of free parameters needed, compared to real-valued RNNs and LSTMs to reach better results, leading to a more compact representation of the relevant information.", "year": 2018, "venue": "International Conference on Learning Representations", "authors": [ "Titouan Parcollet", "M. Ravanelli", "Mohamed Morchid", "G. Linarès", "C. Trabelsi", "R. Mori", "Yoshua Bengio" ], "externalIds": { "MAG": "2807947879", "DBLP": "conf/iclr/ParcolletRMLTMB19", "ArXiv": "1806.04418", "CorpusId": 48365044 }, "url": "https://www.semanticscholar.org/paper/31a857249f9f3bcdeb8a3b2944620fc16f128f64", "referenceCount": 56, "citationCount": 121, "influentialCitationCount": 19, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Detecting Hate Speech on Twitter Using a Convolution-GRU Based Deep Neural Network", "abstract": null, "year": 2018, "venue": "Extended Semantic Web Conference", "authors": [ "Ziqi Zhang", "David Robinson", "Jonathan A. Tepper" ], "externalIds": { "DBLP": "conf/esws/ZhangRT18", "MAG": "2806872289", "DOI": "10.1007/978-3-319-93417-4_48", "CorpusId": 46939253 }, "url": "https://www.semanticscholar.org/paper/f35236ab42fc866be447aad9977ad42adc36c9e8", "referenceCount": 25, "citationCount": 538, "influentialCitationCount": 54, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Neural Network Acceptability Judgments", "abstract": "Abstract This paper investigates the ability of artificial neural networks to judge the grammatical acceptability of a sentence, with the goal of testing their linguistic competence. We introduce the Corpus of Linguistic Acceptability (CoLA), a set of 10,657 English sentences labeled as grammatical or ungrammatical from published linguistics literature. As baselines, we train several recurrent neural network models on acceptability classification, and find that our models outperform unsupervised models by Lau et al. (2016) on CoLA. Error-analysis on specific grammatical phenomena reveals that both Lau et al.’s models and ours learn systematic generalizations like subject-verb-object order. However, all models we test perform far below human level on a wide range of grammatical constructions.", "year": 2018, "venue": "Transactions of the Association for Computational Linguistics", "authors": [ "Alex Warstadt", "Amanpreet Singh", "Samuel R. Bowman" ], "externalIds": { "DBLP": "journals/tacl/WarstadtSB19", "ArXiv": "1805.12471", "MAG": "2978670439", "DOI": "10.1162/tacl_a_00290", "CorpusId": 44072099 }, "url": "https://www.semanticscholar.org/paper/cb0f3ee1e98faf92429d601cdcd76c69c1e484eb", "referenceCount": 70, "citationCount": 1217, "influentialCitationCount": 182, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "QuaterNet: A Quaternion-based Recurrent Model for Human Motion", "abstract": "Deep learning for predicting or generating 3D human pose sequences is an active research area. Previous work regresses either joint rotations or joint positions. The former strategy is prone to error accumulation along the kinematic chain, as well as discontinuities when using Euler angle or exponential map parameterizations. The latter requires re-projection onto skeleton constraints to avoid bone stretching and invalid configurations. This work addresses both limitations. Our recurrent network, QuaterNet, represents rotations with quaternions and our loss function performs forward kinematics on a skeleton to penalize absolute position errors instead of angle errors. On short-term predictions, QuaterNet improves the state-of-the-art quantitatively. For long-term generation, our approach is qualitatively judged as realistic as recent neural strategies from the graphics literature.", "year": 2018, "venue": "British Machine Vision Conference", "authors": [ "Dario Pavllo", "David Grangier", "Michael Auli" ], "externalIds": { "ArXiv": "1805.06485", "DBLP": "journals/corr/abs-1805-06485", "MAG": "2962916650", "CorpusId": 21687255 }, "url": "https://www.semanticscholar.org/paper/643ab168dfa1df777b9276281a54af32bf9b25d2", "referenceCount": 65, "citationCount": 249, "influentialCitationCount": 32, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Generating Natural Language Adversarial Examples", "abstract": "Deep neural networks (DNNs) are vulnerable to adversarial examples, perturbations to correctly classified examples which can cause the model to misclassify. In the image domain, these perturbations can often be made virtually indistinguishable to human perception, causing humans and state-of-the-art models to disagree. However, in the natural language domain, small perturbations are clearly perceptible, and the replacement of a single word can drastically alter the semantics of the document. Given these challenges, we use a black-box population-based optimization algorithm to generate semantically and syntactically similar adversarial examples that fool well-trained sentiment analysis and textual entailment models with success rates of 97% and 70%, respectively. We additionally demonstrate that 92.3% of the successful sentiment analysis adversarial examples are classified to their original label by 20 human annotators, and that the examples are perceptibly quite similar. Finally, we discuss an attempt to use adversarial training as a defense, but fail to yield improvement, demonstrating the strength and diversity of our adversarial examples. We hope our findings encourage researchers to pursue improving the robustness of DNNs in the natural language domain.", "year": 2018, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "M. Alzantot", "Yash Sharma", "Ahmed Elgohary", "Bo-Jhang Ho", "M. Srivastava", "Kai-Wei Chang" ], "externalIds": { "MAG": "2798966449", "ArXiv": "1804.07998", "ACL": "D18-1316", "DBLP": "journals/corr/abs-1804-07998", "DOI": "10.18653/v1/D18-1316", "CorpusId": 5076191 }, "url": "https://www.semanticscholar.org/paper/c68fbc1f4aa72d30974f8a3071054e3b227137fd", "referenceCount": 26, "citationCount": 863, "influentialCitationCount": 133, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Hate Lingo: A Target-based Linguistic Analysis of Hate Speech in Social Media", "abstract": "\n \n While social media empowers freedom of expression and individual voices, it also enables anti-social behavior, online harassment, cyberbullying, and hate speech. In this paper, we deepen our understanding of online hate speech by focusing on a largely neglected but crucial aspect of hate speech -- its target: either directed towards a specific person or entity, or generalized towards a group of people sharing a common protected characteristic. We perform the first linguistic and psycholinguistic analysis of these two forms of hate speech and reveal the presence of interesting markers that distinguish these types of hate speech. Our analysis reveals that Directed hate speech, in addition to being more personal and directed, is more informal, angrier, and often explicitly attacks the target (via name calling) with fewer analytic words and more words suggesting authority and influence. Generalized hate speech, on the other hand, is dominated by religious hate, is characterized by the use of lethal words such as murder, exterminate, and kill; and quantity words such as million and many. Altogether, our work provides a data-driven analysis of the nuances of online-hate speech that enables not only a deepened understanding of hate speech and its social implications, but also its detection.\n \n", "year": 2018, "venue": "International Conference on Web and Social Media", "authors": [ "Mai Elsherief", "Vivek Kulkarni", "Dana Nguyen", "William Yang Wang", "E. Belding-Royer" ], "externalIds": { "DBLP": "journals/corr/abs-1804-04257", "ArXiv": "1804.04257", "MAG": "2951247201", "DOI": "10.1609/icwsm.v12i1.15041", "CorpusId": 4809781 }, "url": "https://www.semanticscholar.org/paper/7554e7c56813731acfefdaf898ccb03e0d667007", "referenceCount": 48, "citationCount": 272, "influentialCitationCount": 19, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Psychology" ] }, { "title": "Mitigating Unwanted Biases with Adversarial Learning", "abstract": "Machine learning is a tool for building models that accurately represent input training data. When undesired biases concerning demographic groups are in the training data, well-trained models will reflect those biases. We present a framework for mitigating such biases by including a variable for the group of interest and simultaneously learning a predictor and an adversary. The input to the network X, here text or census data, produces a prediction Y, such as an analogy completion or income bracket, while the adversary tries to model a protected variable Z, here gender or zip code. The objective is to maximize the predictor's ability to predict Y while minimizing the adversary's ability to predict Z. Applied to analogy completion, this method results in accurate predictions that exhibit less evidence of stereotyping Z. When applied to a classification task using the UCI Adult (Census) Dataset, it results in a predictive model that does not lose much accuracy while achieving very close to equality of odds (Hardt, et al., 2016). The method is flexible and applicable to multiple definitions of fairness as well as a wide range of gradient-based learning models, including both regression and classification tasks.", "year": 2018, "venue": "AAAI/ACM Conference on AI, Ethics, and Society", "authors": [ "B. Zhang", "Blake Lemoine", "Margaret Mitchell" ], "externalIds": { "MAG": "2963116854", "DBLP": "journals/corr/abs-1801-07593", "ArXiv": "1801.07593", "DOI": "10.1145/3278721.3278779", "CorpusId": 9424845 }, "url": "https://www.semanticscholar.org/paper/c7330852a07170cd0e6990f5fbde5fca12b6ccd6", "referenceCount": 16, "citationCount": 1206, "influentialCitationCount": 125, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Universal Language Model Fine-tuning for Text Classification", "abstract": "Inductive transfer learning has greatly impacted computer vision, but existing approaches in NLP still require task-specific modifications and training from scratch. We propose Universal Language Model Fine-tuning (ULMFiT), an effective transfer learning method that can be applied to any task in NLP, and introduce techniques that are key for fine-tuning a language model. Our method significantly outperforms the state-of-the-art on six text classification tasks, reducing the error by 18-24% on the majority of datasets. Furthermore, with only 100 labeled examples, it matches the performance of training from scratch on 100 times more data. We open-source our pretrained models and code.", "year": 2018, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Jeremy Howard", "Sebastian Ruder" ], "externalIds": { "MAG": "2798812533", "DBLP": "conf/acl/RuderH18", "ACL": "P18-1031", "DOI": "10.18653/v1/P18-1031", "CorpusId": 40100965 }, "url": "https://www.semanticscholar.org/paper/1e077413b25c4d34945cc2707e17e46ed4fe784a", "referenceCount": 58, "citationCount": 3367, "influentialCitationCount": 304, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep Quaternion Networks", "abstract": "The field of deep learning has seen significant advancement in recent years. However, much of the existing work has been focused on real-valued numbers. Recent work has shown that a deep learning system using the complex numbers can be deeper for a fixed parameter budget compared to its real-valued counterpart. In this work, we explore the benefits of generalizing one step further into the hyper-complex numbers, quaternions specifically, and provide the architecture components needed to build deep quaternion networks. We develop the theoretical basis by reviewing quaternion convolutions, developing a novel quaternion weight initialization scheme, and developing novel algorithms for quaternion batch-normalization. These pieces are tested in a classification model by end-to-end training on the CIFAR −10 and CIFAR −100 data sets and a segmentation model by end-to-end training on the KITTI Road Segmentation data set. These quaternion networks show improved convergence compared to real-valued and complex-valued networks, especially on the segmentation task, while having fewer parameters.", "year": 2017, "venue": "IEEE International Joint Conference on Neural Network", "authors": [ "Chase J. Gaudet", "A. Maida" ], "externalIds": { "ArXiv": "1712.04604", "DBLP": "conf/ijcnn/GaudetM18", "MAG": "2963230471", "DOI": "10.1109/IJCNN.2018.8489651", "CorpusId": 6912552 }, "url": "https://www.semanticscholar.org/paper/d1fb811ead5d6075992b63db2f6d090e2eae93a9", "referenceCount": 27, "citationCount": 148, "influentialCitationCount": 23, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Using Convolutional Neural Networks to Classify Hate-Speech", "abstract": "The paper introduces a deep learning-based Twitter hate-speech text classification system. The classifier assigns each tweet to one of four predefined categories: racism, sexism, both (racism and sexism) and non-hate-speech. Four Convolutional Neural Network models were trained on resp. character 4-grams, word vectors based on semantic information built using word2vec, randomly generated word vectors, and word vectors combined with character n-grams. The feature set was down-sized in the networks by max-pooling, and a softmax function used to classify tweets. Tested by 10-fold cross-validation, the model based on word2vec embeddings performed best, with higher precision than recall, and a 78.3% F-score.", "year": 2017, "venue": "ALW@ACL", "authors": [ "Björn Gambäck", "Utpal Kumar Sikdar" ], "externalIds": { "ACL": "W17-3013", "MAG": "2741065173", "DBLP": "conf/acl-alw/GambackS17", "DOI": "10.18653/v1/W17-3013", "CorpusId": 20335790 }, "url": "https://www.semanticscholar.org/paper/0dca29b6a5ea2fe2b6373aba9fe0ab829c06fd78", "referenceCount": 26, "citationCount": 477, "influentialCitationCount": 39, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Data Decisions and Theoretical Implications when Adversarially Learning Fair Representations", "abstract": "How can we learn a classifier that is \"fair\" for a protected or sensitive group, when we do not know if the input to the classifier belongs to the protected group? How can we train such a classifier when data on the protected group is difficult to attain? In many settings, finding out the sensitive input attribute can be prohibitively expensive even during model training, and sometimes impossible during model serving. For example, in recommender systems, if we want to predict if a user will click on a given recommendation, we often do not know many attributes of the user, e.g., race or age, and many attributes of the content are hard to determine, e.g., the language or topic. Thus, it is not feasible to use a different classifier calibrated based on knowledge of the sensitive attribute. \nHere, we use an adversarial training procedure to remove information about the sensitive attribute from the latent representation learned by a neural network. In particular, we study how the choice of data for the adversarial training effects the resulting fairness properties. We find two interesting results: a small amount of data is needed to train these adversarial models, and the data distribution empirically drives the adversary's notion of fairness.", "year": 2017, "venue": "arXiv.org", "authors": [ "Alex Beutel", "Jilin Chen", "Zhe Zhao", "Ed H. Chi" ], "externalIds": { "DBLP": "journals/corr/BeutelCZC17", "ArXiv": "1707.00075", "MAG": "2725155646", "CorpusId": 24990444 }, "url": "https://www.semanticscholar.org/paper/4eef0519f75911a2e132fac12427fa13bdb32a71", "referenceCount": 11, "citationCount": 416, "influentialCitationCount": 34, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Optimal classifier for imbalanced data using Matthews Correlation Coefficient metric", "abstract": "Data imbalance is frequently encountered in biomedical applications. Resampling techniques can be used in binary classification to tackle this issue. However such solutions are not desired when the number of samples in the small class is limited. Moreover the use of inadequate performance metrics, such as accuracy, lead to poor generalization results because the classifiers tend to predict the largest size class. One of the good approaches to deal with this issue is to optimize performance metrics that are designed to handle data imbalance. Matthews Correlation Coefficient (MCC) is widely used in Bioinformatics as a performance metric. We are interested in developing a new classifier based on the MCC metric to handle imbalanced data. We derive an optimal Bayes classifier for the MCC metric using an approach based on Frechet derivative. We show that the proposed algorithm has the nice theoretical property of consistency. Using simulated data, we verify the correctness of our optimality result by searching in the space of all possible binary classifiers. The proposed classifier is evaluated on 64 datasets from a wide range data imbalance. We compare both classification performance and CPU efficiency for three classifiers: 1) the proposed algorithm (MCC-classifier), the Bayes classifier with a default threshold (MCC-base) and imbalanced SVM (SVM-imba). The experimental evaluation shows that MCC-classifier has a close performance to SVM-imba while being simpler and more efficient.", "year": 2017, "venue": "PLoS ONE", "authors": [ "Sabri Boughorbel", "Fethi Jarray", "Mohammed El-Anbari" ], "externalIds": { "PubMedCentral": "5456046", "MAG": "2620760558", "DOI": "10.1371/journal.pone.0177678", "CorpusId": 10830747, "PubMed": "28574989" }, "url": "https://www.semanticscholar.org/paper/8bed6a51c0ee8bf5343c853e1bec14f572588db2", "referenceCount": 25, "citationCount": 905, "influentialCitationCount": 56, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Deep Learning for Hate Speech Detection in Tweets", "abstract": "Hate speech detection on Twitter is critical for applications like controversial event extraction, building AI chatterbots, content recommendation, and sentiment analysis. We define this task as being able to classify a tweet as racist, sexist or neither. The complexity of the natural language constructs makes this task very challenging. We perform extensive experiments with multiple deep learning architectures to learn semantic word embeddings to handle this complexity. Our experiments on a benchmark dataset of 16K annotated tweets show that such deep learning methods outperform state-of-the-art char/word n-gram methods by ~18 F1 points.", "year": 2017, "venue": "The Web Conference", "authors": [ "Pinkesh Badjatiya", "Shashank Gupta", "Manish Gupta", "Vasudeva Varma" ], "externalIds": { "MAG": "2613977835", "DBLP": "conf/www/BadjatiyaG0V17", "ArXiv": "1706.00188", "DOI": "10.1145/3041021.3054223", "CorpusId": 2880908 }, "url": "https://www.semanticscholar.org/paper/651644a5dbbe97cf69e4e64c0c6afb2b532c447d", "referenceCount": 7, "citationCount": 1040, "influentialCitationCount": 117, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Automated Hate Speech Detection and the Problem of Offensive Language", "abstract": "\n \n A key challenge for automatic hate-speech detection on social media is the separation of hate speech from other instances of offensive language. Lexical detection methods tend to have low precision because they classify all messages containing particular terms as hate speech and previous work using supervised learning has failed to distinguish between the two categories. We used a crowd-sourced hate speech lexicon to collect tweets containing hate speech keywords. We use crowd-sourcing to label a sample of these tweets into three categories: those containing hate speech, only offensive language, and those with neither. We train a multi-class classifier to distinguish between these different categories. Close analysis of the predictions and the errors shows when we can reliably separate hate speech from other offensive language and when this differentiation is more difficult. We find that racist and homophobic tweets are more likely to be classified as hate speech but that sexist tweets are generally classified as offensive. Tweets without explicit hate keywords are also more difficult to classify.\n \n", "year": 2017, "venue": "International Conference on Web and Social Media", "authors": [ "Thomas Davidson", "Dana Warmsley", "M. Macy", "Ingmar Weber" ], "externalIds": { "MAG": "2595653137", "ArXiv": "1703.04009", "DBLP": "conf/icwsm/DavidsonWMW17", "DOI": "10.1609/icwsm.v11i1.14955", "CorpusId": 1733167 }, "url": "https://www.semanticscholar.org/paper/8dd6a2c9c88c9b3465484228c93f4dcc11cfeab9", "referenceCount": 16, "citationCount": 2386, "influentialCitationCount": 358, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Simple and Scalable Predictive Uncertainty Estimation using Deep Ensembles", "abstract": "Deep neural networks (NNs) are powerful black box predictors that have recently achieved impressive performance on a wide spectrum of tasks. Quantifying predictive uncertainty in NNs is a challenging and yet unsolved problem. Bayesian NNs, which learn a distribution over weights, are currently the state-of-the-art for estimating predictive uncertainty; however these require significant modifications to the training procedure and are computationally expensive compared to standard (non-Bayesian) NNs. We propose an alternative to Bayesian NNs that is simple to implement, readily parallelizable, requires very little hyperparameter tuning, and yields high quality predictive uncertainty estimates. Through a series of experiments on classification and regression benchmarks, we demonstrate that our method produces well-calibrated uncertainty estimates which are as good or better than approximate Bayesian NNs. To assess robustness to dataset shift, we evaluate the predictive uncertainty on test examples from known and unknown distributions, and show that our method is able to express higher uncertainty on out-of-distribution examples. We demonstrate the scalability of our method by evaluating predictive uncertainty estimates on ImageNet.", "year": 2016, "venue": "Neural Information Processing Systems", "authors": [ "Balaji Lakshminarayanan", "A. Pritzel", "C. Blundell" ], "externalIds": { "DBLP": "conf/nips/Lakshminarayanan17", "ArXiv": "1612.01474", "MAG": "2963238274", "CorpusId": 6294674 }, "url": "https://www.semanticscholar.org/paper/802168a81571dde28f5ddb94d84677bc007afa7b", "referenceCount": 63, "citationCount": 4959, "influentialCitationCount": 701, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Quaternion Neural Networks for Spoken Language Understanding", "abstract": "Machine Learning (ML) techniques have allowed a great performance improvement of different challenging Spoken Language Understanding (SLU) tasks. Among these methods, Neural Networks (NN), or Multilayer Perceptron (MLP), recently received a great interest from researchers due to their representation capability of complex internal structures in a low dimensional subspace. However, MLPs employ document representations based on basic word level or topic-based features. Therefore, these basic representations reveal little in way of document statistical structure by only considering words or topics contained in the document as a “bag-of-words”, ignoring relations between them. We propose to remedy this weakness by extending the complex features based on Quaternion algebra presented in [1] to neural networks called QMLP. This original QMLP approach is based on hyper-complex algebra to take into consideration features dependencies in documents. New document features, based on the document structure itself, used as input of the QMLP, are also investigated in this paper, in comparison to those initially proposed in [1]. Experiments made on a SLU task from a real framework of human spoken dialogues showed that our QMLP approach associated with the proposed document features outperforms other approaches, with an accuracy gain of 2% with respect to the MLP based on real numbers and more than 3% with respect to the first Quaternion-based features proposed in [1]. We finally demonstrated that less iterations are needed by our QMLP architecture to be efficient and to reach promising accuracies.", "year": 2016, "venue": "Spoken Language Technology Workshop", "authors": [ "Titouan Parcollet", "Mohamed Morchid", "Pierre-Michel Bousquet", "Richard Dufour", "G. Linarès", "R. Mori" ], "externalIds": { "MAG": "2586489272", "DBLP": "conf/slt/ParcolletMBDLM16", "DOI": "10.1109/SLT.2016.7846290", "CorpusId": 2259559 }, "url": "https://www.semanticscholar.org/paper/b81f3822936adcb15bcb09067f1f93df9a935ec9", "referenceCount": 20, "citationCount": 38, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Quasi-Recurrent Neural Networks", "abstract": "Recurrent neural networks are a powerful tool for modeling sequential data, but the dependence of each timestep's computation on the previous timestep's output limits parallelism and makes RNNs unwieldy for very long sequences. We introduce quasi-recurrent neural networks (QRNNs), an approach to neural sequence modeling that alternates convolutional layers, which apply in parallel across timesteps, and a minimalist recurrent pooling function that applies in parallel across channels. Despite lacking trainable recurrent layers, stacked QRNNs have better predictive accuracy than stacked LSTMs of the same hidden size. Due to their increased parallelism, they are up to 16 times faster at train and test time. Experiments on language modeling, sentiment classification, and character-level neural machine translation demonstrate these advantages and underline the viability of QRNNs as a basic building block for a variety of sequence tasks.", "year": 2016, "venue": "International Conference on Learning Representations", "authors": [ "James Bradbury", "Stephen Merity", "Caiming Xiong", "R. Socher" ], "externalIds": { "MAG": "2553397501", "DBLP": "journals/corr/BradburyMXS16", "ArXiv": "1611.01576", "CorpusId": 51559 }, "url": "https://www.semanticscholar.org/paper/2d876ed1dd2c58058d7197b734a8e4d349b8f231", "referenceCount": 39, "citationCount": 422, "influentialCitationCount": 66, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Are You a Racist or Am I Seeing Things? Annotator Influence on Hate Speech Detection on Twitter", "abstract": "Hate speech in the form of racism and sexism is commonplace on the internet (Waseem and Hovy, 2016). For this reason, there has been both an academic and an industry interest in detection of hate speech. The volume of data to be reviewed for creating data sets encourages a use of crowd sourcing for the annotation efforts. In this paper, we provide an examination of the influence of annotator knowledge of hate speech on classification models by comparing classification results obtained from training on expert and amateur annotations. We provide an evaluation on our own data set and run our models on the data set released by Waseem and Hovy (2016). We find that amateur annotators are more likely than expert annotators to label items as hate speech, and that systems trained on expert annotations outperform systems trained on amateur annotations.", "year": 2016, "venue": "NLP+CSS@EMNLP", "authors": [ "Zeerak Talat" ], "externalIds": { "ACL": "W16-5618", "DBLP": "conf/acl-nlpcss/Waseem16", "MAG": "2563826943", "DOI": "10.18653/v1/W16-5618", "CorpusId": 406026 }, "url": "https://www.semanticscholar.org/paper/3eebb7907a9b94f8d65f969f63b76ff5f643f6d3", "referenceCount": 14, "citationCount": 564, "influentialCitationCount": 65, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Political Science" ] }, { "title": "Ex Machina: Personal Attacks Seen at Scale", "abstract": "The damage personal attacks cause to online discourse motivates many platforms to try to curb the phenomenon. However, understanding the prevalence and impact of personal attacks in online platforms at scale remains surprisingly difficult. The contribution of this paper is to develop and illustrate a method that combines crowdsourcing and machine learning to analyze personal attacks at scale. We show an evaluation method for a classifier in terms of the aggregated number of crowd-workers it can approximate. We apply our methodology to English Wikipedia, generating a corpus of over 100k high quality human-labeled comments and 63M machine-labeled ones from a classifier that is as good as the aggregate of 3 crowd-workers, as measured by the area under the ROC curve and Spearman correlation. Using this corpus of machine-labeled scores, our methodology allows us to explore some of the open questions about the nature of online personal attacks. This reveals that the majority of personal attacks on Wikipedia are not the result of a few malicious users, nor primarily the consequence of allowing anonymous contributions from unregistered users.", "year": 2016, "venue": "The Web Conference", "authors": [ "Ellery Wulczyn", "Nithum Thain", "Lucas Dixon" ], "externalIds": { "DBLP": "conf/www/WulczynTD17", "MAG": "2949089361", "ArXiv": "1610.08914", "DOI": "10.1145/3038912.3052591", "CorpusId": 6060248 }, "url": "https://www.semanticscholar.org/paper/6f35b070c250507dbec8a7365cf01b25eb66d792", "referenceCount": 43, "citationCount": 707, "influentialCitationCount": 109, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Equality of Opportunity in Supervised Learning", "abstract": "We propose a criterion for discrimination against a specified sensitive attribute in supervised learning, where the goal is to predict some target based on available features. Assuming data about the predictor, target, and membership in the protected group are available, we show how to optimally adjust any learned predictor so as to remove discrimination according to our definition. Our framework also improves incentives by shifting the cost of poor classification from disadvantaged groups to the decision maker, who can respond by improving the classification accuracy. We enourage readers to consult the more complete manuscript on the arXiv.", "year": 2016, "venue": "Neural Information Processing Systems", "authors": [ "Moritz Hardt", "Eric Price", "N. Srebro" ], "externalIds": { "DBLP": "journals/corr/HardtPS16", "ArXiv": "1610.02413", "MAG": "2530395818", "CorpusId": 7567061 }, "url": "https://www.semanticscholar.org/paper/d42b11ce90c9c69a20ed015b73dc33e0e4100a7b", "referenceCount": 16, "citationCount": 3802, "influentialCitationCount": 721, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Hateful Symbols or Hateful People? Predictive Features for Hate Speech Detection on Twitter", "abstract": "Hate speech in the form of racist and sexist remarks are a common occurrence on social media. For that reason, many social media services address the problem of identifying hate speech, but the definition of hate speech varies markedly and is largely a manual effort (BBC, 2015; Lomas, 2015). We provide a list of criteria founded in critical race theory, and use them to annotate a publicly available corpus of more than 16k tweets. We analyze the impact of various extra-linguistic features in conjunction with character n-grams for hatespeech detection. We also present a dictionary based the most indicative words in our data.", "year": 2016, "venue": "North American Chapter of the Association for Computational Linguistics", "authors": [ "Zeerak Talat", "Dirk Hovy" ], "externalIds": { "DBLP": "conf/naacl/WaseemH16", "ACL": "N16-2013", "MAG": "2473555522", "DOI": "10.18653/v1/N16-2013", "CorpusId": 1721388 }, "url": "https://www.semanticscholar.org/paper/df704cca917666dace4e42b4d3a50f65597b8f06", "referenceCount": 30, "citationCount": 1501, "influentialCitationCount": 257, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Abusive Language Detection in Online User Content", "abstract": "Detection of abusive language in user generated online content has become an issue of increasing importance in recent years. Most current commercial methods make use of blacklists and regular expressions, however these measures fall short when contending with more subtle, less ham-fisted examples of hate speech. In this work, we develop a machine learning based method to detect hate speech on online user comments from two domains which outperforms a state-of-the-art deep learning approach. We also develop a corpus of user comments annotated for abusive language, the first of its kind. Finally, we use our detection tool to analyze abusive language over time and in different settings to further enhance our knowledge of this behavior.", "year": 2016, "venue": "The Web Conference", "authors": [ "Chikashi Nobata", "Joel R. Tetreault", "A. Thomas", "Yashar Mehdad", "Yi Chang" ], "externalIds": { "MAG": "2340954483", "DBLP": "conf/www/NobataTTMC16", "DOI": "10.1145/2872427.2883062", "CorpusId": 11546523 }, "url": "https://www.semanticscholar.org/paper/e39b586e561b36a3b71fa3d9ee7cb15c35d84203", "referenceCount": 23, "citationCount": 1060, "influentialCitationCount": 91, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "That’s So Annoying!!!: A Lexical and Frame-Semantic Embedding Based Data Augmentation Approach to Automatic Categorization of Annoying Behaviors using #petpeeve Tweets", "abstract": "We propose a novel data augmentation approach to enhance computational behavioral analysis using social media text. In particular, we collect a Twitter corpus of the descriptions of annoying behaviors using the #petpeeve hashtags. In the qualitative analysis, we study the language use in these tweets, with a special focus on the fine-grained categories and the geographic variation of the language. In quantitative analysis, we show that lexical and syntactic features are useful for automatic categorization of annoying behaviors, and frame-semantic features further boost the performance; that leveraging large lexical embeddings to create additional training instances significantly improves the lexical model; and incorporating frame-semantic embedding achieves the best overall performance.", "year": 2015, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "William Yang Wang", "Diyi Yang" ], "externalIds": { "ACL": "D15-1306", "MAG": "2251658415", "DBLP": "conf/emnlp/WangY15", "DOI": "10.18653/v1/D15-1306", "CorpusId": 3257353 }, "url": "https://www.semanticscholar.org/paper/0d13dae976c95853039395d8544b7cd31987783f", "referenceCount": 44, "citationCount": 280, "influentialCitationCount": 14, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning", "abstract": "Deep learning tools have gained tremendous attention in applied machine learning. However such tools for regression and classification do not capture model uncertainty. In comparison, Bayesian models offer a mathematically grounded framework to reason about model uncertainty, but usually come with a prohibitive computational cost. In this paper we develop a new theoretical framework casting dropout training in deep neural networks (NNs) as approximate Bayesian inference in deep Gaussian processes. A direct result of this theory gives us tools to model uncertainty with dropout NNs -- extracting information from existing models that has been thrown away so far. This mitigates the problem of representing uncertainty in deep learning without sacrificing either computational complexity or test accuracy. We perform an extensive study of the properties of dropout's uncertainty. Various network architectures and non-linearities are assessed on tasks of regression and classification, using MNIST as an example. We show a considerable improvement in predictive log-likelihood and RMSE compared to existing state-of-the-art methods, and finish by using dropout's uncertainty in deep reinforcement learning.", "year": 2015, "venue": "International Conference on Machine Learning", "authors": [ "Y. Gal", "Zoubin Ghahramani" ], "externalIds": { "MAG": "2964059111", "DBLP": "conf/icml/GalG16", "ArXiv": "1506.02142", "CorpusId": 160705 }, "url": "https://www.semanticscholar.org/paper/f35de4f9b1a7c4d3fa96a0d2ab1bf8937671f6b6", "referenceCount": 56, "citationCount": 8177, "influentialCitationCount": 1281, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Hate Speech Detection with Comment Embeddings", "abstract": "We address the problem of hate speech detection in online user comments. Hate speech, defined as an \"abusive speech targeting specific group characteristics, such as ethnicity, religion, or gender\", is an important problem plaguing websites that allow users to leave feedback, having a negative impact on their online business and overall user experience. We propose to learn distributed low-dimensional representations of comments using recently proposed neural language models, that can then be fed as inputs to a classification algorithm. Our approach addresses issues of high-dimensionality and sparsity that impact the current state-of-the-art, resulting in highly efficient and effective hate speech detectors.", "year": 2015, "venue": "The Web Conference", "authors": [ "Nemanja Djuric", "Jing Zhou", "Robin Morris", "Mihajlo Grbovic", "Vladan Radosavljevic", "Narayan L. Bhamidipati" ], "externalIds": { "DBLP": "conf/www/DjuricZMGRB15", "MAG": "1071251684", "DOI": "10.1145/2740908.2742760", "CorpusId": 2039295 }, "url": "https://www.semanticscholar.org/paper/c9948f7213167d65db79b60381d01ea71d438f94", "referenceCount": 7, "citationCount": 655, "influentialCitationCount": 41, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Convolutional Neural Networks for Sentence Classification", "abstract": "We report on a series of experiments with convolutional neural networks (CNN) trained on top of pre-trained word vectors for sentence-level classification tasks. We show that a simple CNN with little hyperparameter tuning and static vectors achieves excellent results on multiple benchmarks. Learning task-specific vectors through fine-tuning offers further gains in performance. We additionally propose a simple modification to the architecture to allow for the use of both task-specific and static vectors. The CNN models discussed herein improve upon the state of the art on 4 out of 7 tasks, which include sentiment analysis and question classification.", "year": 2014, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Yoon Kim" ], "externalIds": { "DBLP": "journals/corr/Kim14f", "MAG": "1832693441", "ACL": "D14-1181", "ArXiv": "1408.5882", "DOI": "10.3115/v1/D14-1181", "CorpusId": 9672033 }, "url": "https://www.semanticscholar.org/paper/1f6ba0782862ec12a5ec6d7fb608523d55b0c6ba", "referenceCount": 33, "citationCount": 12850, "influentialCitationCount": 2059, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Detecting Hate Speech on the World Wide Web", "abstract": "We present an approach to detecting hate speech in online text, where hate speech is defined as abusive speech targeting specific group characteristics, such as ethnic origin, religion, gender, or sexual orientation. While hate speech against any group may exhibit some common characteristics, we have observed that hatred against each different group is typically characterized by the use of a small set of high frequency stereotypical words; however, such words may be used in either a positive or a negative sense, making our task similar to that of words sense disambiguation. In this paper we describe our definition of hate speech, the collection and annotation of our hate speech corpus, and a mechanism for detecting some commonly used methods of evading common \"dirty word\" filters. We describe pilot classification experiments in which we classify anti-semitic speech reaching an accuracy 94%, precision of 68% and recall at 60%, for an F1 measure of. 6375.", "year": 2012, "venue": "", "authors": [ "William Warner", "Julia Hirschberg" ], "externalIds": { "ACL": "W12-2103", "MAG": "78136081", "CorpusId": 12477446 }, "url": "https://www.semanticscholar.org/paper/2aab45ffcd28f3945f2b3bda34887ccdd14adfc3", "referenceCount": 17, "citationCount": 631, "influentialCitationCount": 39, "isOpenAccess": false, "fieldsOfStudy": [ "Psychology" ] }, { "title": "A Bayesian network of eutrophication models for synthesis, prediction, and uncertainty analysis", "abstract": null, "year": 2004, "venue": "", "authors": [ "M. Borsuk", "C. Stow", "K. Reckhow" ], "externalIds": { "MAG": "2050607405", "DOI": "10.1016/J.ECOLMODEL.2003.08.020", "CorpusId": 55347928 }, "url": "https://www.semanticscholar.org/paper/af82655009568057dc9d0e435f2dd3aee30d9ee8", "referenceCount": 102, "citationCount": 512, "influentialCitationCount": 30, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "WordNet: A Lexical Database for English", "abstract": "Because meaningful sentences are composed of meaningful words, any system that hopes to process natural languages as people do must have information about words and their meanings. This information is traditionally provided through dictionaries, and machine-readable dictionaries are now widely available. But dictionary entries evolved for the convenience of human readers, not for machines. WordNet1 provides a more effective combination of traditional lexicographic information and modern computing. WordNet is an online lexical database designed for use under program control. English nouns, verbs, adjectives, and adverbs are organized into sets of synonyms, each representing a lexicalized concept. Semantic relations link the synonym sets [4].", "year": 1995, "venue": "Human Language Technology - The Baltic Perspectiv", "authors": [ "G. Miller" ], "externalIds": { "DBLP": "conf/naacl/Miller93", "ACL": "H92-1116", "MAG": "2081580037", "DOI": "10.1145/219717.219748", "CorpusId": 1671874 }, "url": "https://www.semanticscholar.org/paper/68c03788224000794d5491ab459be0b2a2c38677", "referenceCount": 8, "citationCount": 16159, "influentialCitationCount": 2530, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Malicious Bot Detection in Online Social Networks: Arming Handcrafted Features with Deep Learning", "abstract": null, "year": 2020, "venue": "Social Informatics", "authors": [ "Guanyi Mou", "Kyumin Lee" ], "externalIds": { "MAG": "3092091320", "DBLP": "conf/socinfo/MouL20", "DOI": "10.1007/978-3-030-60975-7_17", "CorpusId": 221876939 }, "url": "https://www.semanticscholar.org/paper/e5bebc2e1d9b94f362078771cf28b67e521b4e75", "referenceCount": 46, "citationCount": 14, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "“Incremental fairness in two-sided market platforms: On smoothly updating recommendations,”", "abstract": null, "year": 2020, "venue": "AAAI Conference on Artificial Intelligence", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding", "abstract": "We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models (Peters et al., 2018a; Radford et al., 2018), BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5 (7.7 point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).", "year": 2019, "venue": "North American Chapter of the Association for Computational Linguistics", "authors": [ "Jacob Devlin", "Ming-Wei Chang", "Kenton Lee", "Kristina Toutanova" ], "externalIds": { "MAG": "2951055169", "ACL": "N19-1423", "DBLP": "journals/corr/abs-1810-04805", "ArXiv": "1810.04805", "DOI": "10.18653/v1/N19-1423", "CorpusId": 52967399 }, "url": "https://www.semanticscholar.org/paper/df2b0e26d0599ce3e70df8a9da02e51594e0e992", "referenceCount": 63, "citationCount": 81678, "influentialCitationCount": 19056, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Language Models are Unsupervised Multitask Learners", "abstract": "Natural language processing tasks, such as question answering, machine translation, reading comprehension, and summarization, are typically approached with supervised learning on taskspecific datasets. We demonstrate that language models begin to learn these tasks without any explicit supervision when trained on a new dataset of millions of webpages called WebText. When conditioned on a document plus questions, the answers generated by the language model reach 55 F1 on the CoQA dataset matching or exceeding the performance of 3 out of 4 baseline systems without using the 127,000+ training examples. The capacity of the language model is essential to the success of zero-shot task transfer and increasing it improves performance in a log-linear fashion across tasks. Our largest model, GPT-2, is a 1.5B parameter Transformer that achieves state of the art results on 7 out of 8 tested language modeling datasets in a zero-shot setting but still underfits WebText. Samples from the model reflect these improvements and contain coherent paragraphs of text. These findings suggest a promising path towards building language processing systems which learn to perform tasks from their naturally occurring demonstrations.", "year": 2019, "venue": "", "authors": [ "Alec Radford", "Jeff Wu", "R. Child", "D. Luan", "Dario Amodei", "I. Sutskever" ], "externalIds": { "MAG": "2955855238", "CorpusId": 160025533 }, "url": "https://www.semanticscholar.org/paper/9405cc0d6169988371b2755e573cc28650d14dfe", "referenceCount": 75, "citationCount": 18460, "influentialCitationCount": 3039, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "“Hate speech on social media: Global comparisons,”", "abstract": null, "year": 2019, "venue": "Council on Foreign Relations", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Equality of Opportunity in Classification: A Causal Approach", "abstract": "The Equalized Odds (for short, EO) is one of the most popular measures of discrimination used in the supervised learning setting. It ascertains fairness through the balance of the misclassification rates (false positive and negative) across the protected groups -- e.g., in the context of law enforcement, an African-American defendant who would not commit a future crime will have an equal opportunity of being released, compared to a non-recidivating Caucasian defendant. Despite this noble goal, it has been acknowledged in the literature that statistical tests based on the EO are oblivious to the underlying causal mechanisms that generated the disparity in the first place (Hardt et al. 2016). This leads to a critical disconnect between statistical measures readable from the data and the meaning of discrimination in the legal system, where compelling evidence that the observed disparity is tied to a specific causal process deemed unfair by society is required to characterize discrimination. The goal of this paper is to develop a principled approach to connect the statistical disparities characterized by the EO and the underlying, elusive, and frequently unobserved, causal mechanisms that generated such inequality. We start by introducing a new family of counterfactual measures that allows one to explain the misclassification disparities in terms of the underlying mechanisms in an arbitrary, non-parametric structural causal model. This will, in turn, allow legal and data analysts to interpret currently deployed classifiers through causal lens, linking the statistical disparities found in the data to the corresponding causal processes. Leveraging the new family of counterfactual measures, we develop a learning procedure to construct a classifier that is statistically efficient, interpretable, and compatible with the basic human intuition of fairness. We demonstrate our results through experiments in both real (COMPAS) and synthetic datasets.", "year": 2018, "venue": "Neural Information Processing Systems", "authors": [ "Junzhe Zhang", "E. Bareinboim" ], "externalIds": { "DBLP": "conf/nips/ZhangB18", "MAG": "2890945214", "CorpusId": 53607027 }, "url": "https://www.semanticscholar.org/paper/6bb8e5c50eb219ce6344523bf9ebf0afc8b53123", "referenceCount": 30, "citationCount": 84, "influentialCitationCount": 6, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "“Rdeepsense: Reliable deep mobile computing models with uncertainty estimations,”", "abstract": null, "year": 2018, "venue": "ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "“How to write plain english: Let’s start with the formula,”", "abstract": null, "year": 1979, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Identifying and Categorizing Offensive Language in Social Media using Sentence Embeddings", "abstract": "This paper describes our system (Fermi) for Task 6: OffensEval: Identifying and Cate-gorizing Offensive Language in Social Media of SemEval-2019. We participated in all the three sub-tasks within Task 6. We evaluate multiple sentence embeddings in conjunction with various supervised machine learning algorithms and evaluate the performance of simple yet effective embedding-ML combination algorithms. Our team ( Fermi )’s model achieved an F1-score of 64.40%, 62.00% and 62.60% for sub-task A, B and C respectively on the official leaderboard. Our model for sub-task C which uses pretrained ELMo embed-dings for transforming the input and uses SVM (RBF kernel) for training, scored third position on the official leaderboard. Through the paper we provide a detailed description of the approach, as well as the results obtained for the task.", "year": null, "venue": "", "authors": [ "Vijayasaradhi Indurthi", "B. Syed", "Manish Shrivastava", "Manish Gupta", "Vasudeva Varma" ], "externalIds": { "CorpusId": 184482787 }, "url": "https://www.semanticscholar.org/paper/aca6821423fdf4c1c804846d20cd20352ad7c169", "referenceCount": 30, "citationCount": 9, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Bert is not robust on misspellings ! generating nature adversarial samples on bert BAE : BERT - based adversarial examples for text classification , ” in EMNLP , 2020", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Gen2Act: Human Video Generation in Novel Scenarios enables Generalizable Robot Manipulation": { "paper_title": "Gen2Act: Human Video Generation in Novel Scenarios enables Generalizable Robot Manipulation", "arxiv_id": "2409.16283v1", "keyword": "generative model", "authors": [ "Homanga Bharadhwaj", "Debidatta Dwibedi", "Abhinav Gupta", "Shubham Tulsiani", "Carl Doersch", "Ted Xiao", "Dhruv Shah", "Fei Xia", "Dorsa Sadigh", "Sean Kirmani" ], "references": [ { "title": "Semantically Controllable Augmentations for Generalizable Robot Learning", "abstract": "Generalization to unseen real-world scenarios for robot manipulation requires exposure to diverse datasets during training. However, collecting large real-world datasets is intractable due to high operational costs. For robot learning to generalize despite these challenges, it is essential to leverage sources of data or priors beyond the robot's direct experience. In this work, we posit that image-text generative models, which are pre-trained on large corpora of web-scraped data, can serve as such a data source. These generative models encompass a broad range of real-world scenarios beyond a robot's direct experience and can synthesize novel synthetic experiences that expose robotic agents to additional world priors aiding real-world generalization at no extra cost. In particular, our approach leverages pre-trained generative models as an effective tool for data augmentation. We propose a generative augmentation framework for semantically controllable augmentations and rapidly multiplying robot datasets while inducing rich variations that enable real-world generalization. Based on diverse augmentations of robot data, we show how scalable robot manipulation policies can be trained and deployed both in simulation and in unseen real-world environments such as kitchens and table-tops. By demonstrating the effectiveness of image-text generative models in diverse real-world robotic applications, our generative augmentation framework provides a scalable and efficient path for boosting generalization in robot learning at no extra human cost.", "year": 2024, "venue": "", "authors": [ "Zoey Chen", "Zhao Mandi", "Homanga Bharadhwaj", "Mohit Sharma", "Shuran Song", "Abhishek Gupta", "Vikash Kumar" ], "externalIds": { "ArXiv": "2409.00951", "CorpusId": 272367778 }, "url": "https://www.semanticscholar.org/paper/9bcfc6c5ddc14bf5c26914f8a3d47eaca4d991c4", "referenceCount": 96, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "R+X: Retrieval and Execution from Everyday Human Videos", "abstract": "We present R+X, a framework which enables robots to learn skills from long, unlabelled, first-person videos of humans performing everyday tasks. Given a language command from a human, R+X first retrieves short video clips containing relevant behaviour, and then executes the skill by conditioning an in-context imitation learning method on this behaviour. By leveraging a Vision Language Model (VLM) for retrieval, R+X does not require any manual annotation of the videos, and by leveraging in-context learning for execution, robots can perform commanded skills immediately, without requiring a period of training on the retrieved videos. Experiments studying a range of everyday household tasks show that R+X succeeds at translating unlabelled human videos into robust robot skills, and that R+X outperforms several recent alternative methods. Videos are available at https://www.robot-learning.uk/r-plus-x.", "year": 2024, "venue": "arXiv.org", "authors": [ "Georgios Papagiannis", "Norman Di Palo", "Pietro Vitiello", "Edward Johns" ], "externalIds": { "DBLP": "journals/corr/abs-2407-12957", "ArXiv": "2407.12957", "DOI": "10.48550/arXiv.2407.12957", "CorpusId": 271270878 }, "url": "https://www.semanticscholar.org/paper/dc40d71d747d19e29587a9da95d316e9b88bd9a4", "referenceCount": 41, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Dreamitate: Real-World Visuomotor Policy Learning via Video Generation", "abstract": "A key challenge in manipulation is learning a policy that can robustly generalize to diverse visual environments. A promising mechanism for learning robust policies is to leverage video generative models, which are pretrained on large-scale datasets of internet videos. In this paper, we propose a visuomotor policy learning framework that fine-tunes a video diffusion model on human demonstrations of a given task. At test time, we generate an example of an execution of the task conditioned on images of a novel scene, and use this synthesized execution directly to control the robot. Our key insight is that using common tools allows us to effortlessly bridge the embodiment gap between the human hand and the robot manipulator. We evaluate our approach on four tasks of increasing complexity and demonstrate that harnessing internet-scale generative models allows the learned policy to achieve a significantly higher degree of generalization than existing behavior cloning approaches.", "year": 2024, "venue": "arXiv.org", "authors": [ "Junbang Liang", "Ruoshi Liu", "Ege Ozguroglu", "Sruthi Sudhakar", "Achal Dave", "P. Tokmakov", "Shuran Song", "Carl Vondrick" ], "externalIds": { "DBLP": "journals/corr/abs-2406-16862", "ArXiv": "2406.16862", "DOI": "10.48550/arXiv.2406.16862", "CorpusId": 270703739 }, "url": "https://www.semanticscholar.org/paper/b0ac4f62f55bcf0427008e18f1b4b5bf7ee43df2", "referenceCount": 34, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "OpenVLA: An Open-Source Vision-Language-Action Model", "abstract": "Large policies pretrained on a combination of Internet-scale vision-language data and diverse robot demonstrations have the potential to change how we teach robots new skills: rather than training new behaviors from scratch, we can fine-tune such vision-language-action (VLA) models to obtain robust, generalizable policies for visuomotor control. Yet, widespread adoption of VLAs for robotics has been challenging as 1) existing VLAs are largely closed and inaccessible to the public, and 2) prior work fails to explore methods for efficiently fine-tuning VLAs for new tasks, a key component for adoption. Addressing these challenges, we introduce OpenVLA, a 7B-parameter open-source VLA trained on a diverse collection of 970k real-world robot demonstrations. OpenVLA builds on a Llama 2 language model combined with a visual encoder that fuses pretrained features from DINOv2 and SigLIP. As a product of the added data diversity and new model components, OpenVLA demonstrates strong results for generalist manipulation, outperforming closed models such as RT-2-X (55B) by 16.5% in absolute task success rate across 29 tasks and multiple robot embodiments, with 7x fewer parameters. We further show that we can effectively fine-tune OpenVLA for new settings, with especially strong generalization results in multi-task environments involving multiple objects and strong language grounding abilities, and outperform expressive from-scratch imitation learning methods such as Diffusion Policy by 20.4%. We also explore compute efficiency; as a separate contribution, we show that OpenVLA can be fine-tuned on consumer GPUs via modern low-rank adaptation methods and served efficiently via quantization without a hit to downstream success rate. Finally, we release model checkpoints, fine-tuning notebooks, and our PyTorch codebase with built-in support for training VLAs at scale on Open X-Embodiment datasets.", "year": 2024, "venue": "arXiv.org", "authors": [ "Moo Jin Kim", "Karl Pertsch", "Siddharth Karamcheti", "Ted Xiao", "A. Balakrishna", "Suraj Nair", "Rafael Rafailov", "Ethan Foster", "Grace Lam", "Pannag R. Sanketi", "Quan Vuong", "Thomas Kollar", "Benjamin Burchfiel", "Russ Tedrake", "Dorsa Sadigh", "Sergey Levine", "Percy Liang", "Chelsea Finn" ], "externalIds": { "DBLP": "journals/corr/abs-2406-09246", "ArXiv": "2406.09246", "DOI": "10.48550/arXiv.2406.09246", "CorpusId": 270440391 }, "url": "https://www.semanticscholar.org/paper/8f9ceb5ffad8e7a066dfc9d9aaa5153b714740ee", "referenceCount": 110, "citationCount": 25, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DROID: A Large-Scale In-The-Wild Robot Manipulation Dataset", "abstract": "The creation of large, diverse, high-quality robot manipulation datasets is an important stepping stone on the path toward more capable and robust robotic manipulation policies. However, creating such datasets is challenging: collecting robot manipulation data in diverse environments poses logistical and safety challenges and requires substantial investments in hardware and human labour. As a result, even the most general robot manipulation policies today are mostly trained on data collected in a small number of environments with limited scene and task diversity. In this work, we introduce DROID (Distributed Robot Interaction Dataset), a diverse robot manipulation dataset with 76k demonstration trajectories or 350 hours of interaction data, collected across 564 scenes and 84 tasks by 50 data collectors in North America, Asia, and Europe over the course of 12 months. We demonstrate that training with DROID leads to policies with higher performance and improved generalization ability. We open source the full dataset, policy learning code, and a detailed guide for reproducing our robot hardware setup.", "year": 2024, "venue": "Robotics", "authors": [ "Alexander Khazatsky", "Karl Pertsch", "S. Nair", "Ashwin Balakrishna", "Sudeep Dasari", "Siddharth Karamcheti", "Soroush Nasiriany", "M. K. Srirama", "L. Chen", "Kirsty Ellis", "P. Fagan", "Joey Hejna", "Masha Itkina", "Marion Lepert", "Ye Ma", "Patrick Tree Miller", "Jimmy Wu", "Suneel Belkhale", "S. Dass", "Huy Ha", "Arhan Jain", "Abraham Lee", "Youngwoon Lee", "Marius Memmel", "S. Park", "Ilija Radosavovic", "Kaiyuan Wang", "Albert Zhan", "Kevin Black", "Cheng Chi", "K. Hatch", "Shan Lin", "Jingpei Lu", "Jean-Pierre Mercat", "Abdul Rehman", "Pannag R. Sanketi", "Archit Sharma", "C. Simpson", "Q. Vương", "Homer Walke", "Blake Wulfe", "Ted Xiao", "J. Yang", "Arefeh Yavary", "Tony Zhao", "Christopher Agia", "R. Baijal", "Mateo Guaman Castro", "D. Chen", "Qiuyu Chen", "Trinity Chung", "Jaimyn Drake", "Ethan P. Foster", "Jensen Gao", "David Antonio Herrera", "Minho Heo", "Kyle Hsu", "Jiaheng Hu", "Donovon Jackson", "Charlotte Le", "Yunshuang Li", "K. Lin", "Roy Lin", "Zehan Ma", "Abhiram Maddukuri", "Suvir Mirchandani", "D. Morton", "Tony Nguyen", "Abigail O'Neill", "R. Scalise", "Derick Seale", "Victor Son", "Stephen Tian", "E. Tran", "Andrew E. Wang", "Yilin Wu", "Annie Xie", "Jingyun Yang", "Patrick Yin", "Yunchu Zhang", "O. Bastani", "G. Berseth", "J. Bohg", "Ken Goldberg", "Abhinav Gupta", "Abhishek Gupta", "Dinesh Jayaraman", "Joseph J. Lim", "Jitendra Malik", "Roberto Mart'in-Mart'in", "S. Ramamoorthy", "Dorsa Sadigh", "Shuran Song", "Jiajun Wu", "Michael C. Yip", "Yuke Zhu", "T. Kollar", "Sergey Levine", "Chelsea Finn" ], "externalIds": { "DBLP": "journals/corr/abs-2403-12945", "ArXiv": "2403.12945", "DOI": "10.48550/arXiv.2403.12945", "CorpusId": 268531351 }, "url": "https://www.semanticscholar.org/paper/ee5070fe52fd17da9a89d3f342fb07cc9ae51afe", "referenceCount": 51, "citationCount": 37, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Vid2Robot: End-to-end Video-conditioned Policy Learning with Cross-Attention Transformers", "abstract": "Large-scale multi-task robotic manipulation systems often rely on text to specify the task. In this work, we explore whether a robot can learn by observing humans. To do so, the robot must understand a person's intent and perform the inferred task despite differences in the embodiments and environments. We introduce Vid2Robot, an end-to-end video-conditioned policy that takes human videos demonstrating manipulation tasks as input and produces robot actions. Our model is trained with a large dataset of prompt video-robot trajectory pairs to learn unified representations of human and robot actions from videos. Vid2Robot uses cross-attention transformer layers between video features and the current robot state to produce the actions and perform the same task as shown in the video. We use auxiliary contrastive losses to align the prompt and robot video representations for better policies. We evaluate Vid2Robot on real-world robots and observe over 20% improvement over BC-Z when using human prompt videos. Further, we also show cross-object motion transfer ability that enables video-conditioned policies to transfer a motion observed on one object in the prompt video to another object in the robot's own environment. Videos available at https://vid2robot.github.io", "year": 2024, "venue": "Robotics", "authors": [ "Vidhi Jain", "Maria Attarian", "Nikhil J. Joshi", "Ayzaan Wahid", "Danny Driess", "Quan Vuong", "Pannag R. Sanketi", "P. Sermanet", "Stefan Welker", "Christine Chan", "Igor Gilitschenski", "Yonatan Bisk", "Debidatta Dwibedi" ], "externalIds": { "DBLP": "journals/corr/abs-2403-12943", "ArXiv": "2403.12943", "DOI": "10.48550/arXiv.2403.12943", "CorpusId": 268532100 }, "url": "https://www.semanticscholar.org/paper/e1f213f89df082e360fca9ed9518383ef6a90db0", "referenceCount": 54, "citationCount": 8, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Video as the New Language for Real-World Decision Making", "abstract": "Both text and video data are abundant on the internet and support large-scale self-supervised learning through next token or frame prediction. However, they have not been equally leveraged: language models have had significant real-world impact, whereas video generation has remained largely limited to media entertainment. Yet video data captures important information about the physical world that is difficult to express in language. To address this gap, we discuss an under-appreciated opportunity to extend video generation to solve tasks in the real world. We observe how, akin to language, video can serve as a unified interface that can absorb internet knowledge and represent diverse tasks. Moreover, we demonstrate how, like language models, video generation can serve as planners, agents, compute engines, and environment simulators through techniques such as in-context learning, planning and reinforcement learning. We identify major impact opportunities in domains such as robotics, self-driving, and science, supported by recent work that demonstrates how such advanced capabilities in video generation are plausibly within reach. Lastly, we identify key challenges in video generation that mitigate progress. Addressing these challenges will enable video generation models to demonstrate unique value alongside language models in a wider array of AI applications.", "year": 2024, "venue": "International Conference on Machine Learning", "authors": [ "Sherry Yang", "Jacob Walker", "Jack Parker-Holder", "Yilun Du", "Jake Bruce", "Andre Barreto", "Pieter Abbeel", "Dale Schuurmans" ], "externalIds": { "ArXiv": "2402.17139", "DBLP": "conf/icml/YangWPDB0AS24", "DOI": "10.48550/arXiv.2402.17139", "CorpusId": 268032943 }, "url": "https://www.semanticscholar.org/paper/1473e0d6ae0790fb0834729577a42b37d68d829c", "referenceCount": 94, "citationCount": 16, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DINOBot: Robot Manipulation via Retrieval and Alignment with Vision Foundation Models", "abstract": "We propose DINOBot, a novel imitation learning framework for robot manipulation, which leverages the image-level and pixel-level capabilities of features extracted from Vision Transformers trained with DINO. When interacting with a novel object, DINOBot first uses these features to retrieve the most visually similar object experienced during human demonstrations, and then uses this object to align its endeffector with the novel object to enable effective interaction. Through a series of real-world experiments on everyday tasks, we show that exploiting both the image-level and pixel-level properties of vision foundation models enables unprecedented learning efficiency and generalisation. Videos and code are available at https://www.robot-learning.uk/dinobot.", "year": 2024, "venue": "IEEE International Conference on Robotics and Automation", "authors": [ "Norman Di Palo", "Edward Johns" ], "externalIds": { "ArXiv": "2402.13181", "DBLP": "conf/icra/PaloJ24", "DOI": "10.1109/ICRA57147.2024.10610923", "CorpusId": 267760260 }, "url": "https://www.semanticscholar.org/paper/b22949a111a311fc2fab8018f922a407b73c15b6", "referenceCount": 47, "citationCount": 12, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "BootsTAP: Bootstrapped Training for Tracking-Any-Point", "abstract": "To endow models with greater understanding of physics and motion, it is useful to enable them to perceive how solid surfaces move and deform in real scenes. This can be formalized as Tracking-Any-Point (TAP), which requires the algorithm to track any point on solid surfaces in a video, potentially densely in space and time. Large-scale groundtruth training data for TAP is only available in simulation, which currently has a limited variety of objects and motion. In this work, we demonstrate how large-scale, unlabeled, uncurated real-world data can improve a TAP model with minimal architectural changes, using a selfsupervised student-teacher setup. We demonstrate state-of-the-art performance on the TAP-Vid benchmark surpassing previous results by a wide margin: for example, TAP-Vid-DAVIS performance improves from 61.3% to 67.4%, and TAP-Vid-Kinetics from 57.2% to 62.5%. For visualizations, see our project webpage at https://bootstap.github.io/", "year": 2024, "venue": "arXiv.org", "authors": [ "Carl Doersch", "Yi Yang", "Dilara Gokay", "Pauline Luc", "Skanda Koppula", "Ankush Gupta", "Joseph Heyward", "Ross Goroshin", "João Carreira", "Andrew Zisserman" ], "externalIds": { "ArXiv": "2402.00847", "DBLP": "journals/corr/abs-2402-00847", "DOI": "10.48550/arXiv.2402.00847", "CorpusId": 267365369 }, "url": "https://www.semanticscholar.org/paper/04068ad34cf4e3b7c0731d294eb8decfab3f51fe", "referenceCount": 73, "citationCount": 8, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "General Flow as Foundation Affordance for Scalable Robot Learning", "abstract": "We address the challenge of acquiring real-world manipulation skills with a scalable framework. We hold the belief that identifying an appropriate prediction target capable of leveraging large-scale datasets is crucial for achieving efficient and universal learning. Therefore, we propose to utilize 3D flow, which represents the future trajectories of 3D points on objects of interest, as an ideal prediction target. To exploit scalable data resources, we turn our attention to human videos. We develop, for the first time, a language-conditioned 3D flow prediction model directly from large-scale RGBD human video datasets. Our predicted flow offers actionable guidance, thus facilitating zero-shot skill transfer in real-world scenarios. We deploy our method with a policy based on closed-loop flow prediction. Remarkably, without any in-domain finetuning, our method achieves an impressive 81\\% success rate in zero-shot human-to-robot skill transfer, covering 18 tasks in 6 scenes. Our framework features the following benefits: (1) scalability: leveraging cross-embodiment data resources; (2) wide application: multiple object categories, including rigid, articulated, and soft bodies; (3) stable skill transfer: providing actionable guidance with a small inference domain-gap. Code, data, and supplementary materials are available https://general-flow.github.io", "year": 2024, "venue": "arXiv.org", "authors": [ "Chengbo Yuan", "Chuan Wen", "Tong Zhang", "Yang Gao" ], "externalIds": { "ArXiv": "2401.11439", "DBLP": "journals/corr/abs-2401-11439", "DOI": "10.48550/arXiv.2401.11439", "CorpusId": 267069070 }, "url": "https://www.semanticscholar.org/paper/df831eeae0385a5cad74e534d79e3bab594b1843", "referenceCount": 106, "citationCount": 8, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Any-point Trajectory Modeling for Policy Learning", "abstract": "Learning from demonstration is a powerful method for teaching robots new skills, and having more demonstration data often improves policy learning. However, the high cost of collecting demonstration data is a significant bottleneck. Videos, as a rich data source, contain knowledge of behaviors, physics, and semantics, but extracting control-specific information from them is challenging due to the lack of action labels. In this work, we introduce a novel framework, Any-point Trajectory Modeling (ATM), that utilizes video demonstrations by pre-training a trajectory model to predict future trajectories of arbitrary points within a video frame. Once trained, these trajectories provide detailed control guidance, enabling the learning of robust visuomotor policies with minimal action-labeled data. Across over 130 language-conditioned tasks we evaluated in both simulation and the real world, ATM outperforms strong video pre-training baselines by 80% on average. Furthermore, we show effective transfer learning of manipulation skills from human videos and videos from a different robot morphology. Visualizations and code are available at: \\url{https://xingyu-lin.github.io/atm}.", "year": 2023, "venue": "Robotics", "authors": [ "Chuan Wen", "Xingyu Lin", "John So", "Kai Chen", "Q. Dou", "Yang Gao", "Pieter Abbeel" ], "externalIds": { "ArXiv": "2401.00025", "DBLP": "journals/corr/abs-2401-00025", "DOI": "10.48550/arXiv.2401.00025", "CorpusId": 266693687 }, "url": "https://www.semanticscholar.org/paper/6a504c1a94139791854bc9a68ee1d3ebb925e642", "referenceCount": 63, "citationCount": 23, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "VideoPoet: A Large Language Model for Zero-Shot Video Generation", "abstract": "We present VideoPoet, a language model capable of synthesizing high-quality video, with matching audio, from a large variety of conditioning signals. VideoPoet employs a decoder-only transformer architecture that processes multimodal inputs -- including images, videos, text, and audio. The training protocol follows that of Large Language Models (LLMs), consisting of two stages: pretraining and task-specific adaptation. During pretraining, VideoPoet incorporates a mixture of multimodal generative objectives within an autoregressive Transformer framework. The pretrained LLM serves as a foundation that can be adapted for a range of video generation tasks. We present empirical results demonstrating the model's state-of-the-art capabilities in zero-shot video generation, specifically highlighting VideoPoet's ability to generate high-fidelity motions. Project page: http://sites.research.google/videopoet/", "year": 2023, "venue": "International Conference on Machine Learning", "authors": [ "D. Kondratyuk", "Lijun Yu", "Xiuye Gu", "José Lezama", "Jonathan Huang", "Rachel Hornung", "Hartwig Adam", "Hassan Akbari", "Y. Alon", "Vighnesh Birodkar", "Yong Cheng", "Ming-Chang Chiu", "Josh Dillon", "Irfan Essa", "Agrim Gupta", "Meera Hahn", "Anja Hauth", "David Hendon", "Alonso Martinez", "David C. Minnen", "David A. Ross", "Grant Schindler", "Mikhail Sirotenko", "Kihyuk Sohn", "Krishna Somandepalli", "Huisheng Wang", "Jimmy Yan", "Ming Yang", "Xuan Yang", "Bryan Seybold", "Lu Jiang" ], "externalIds": { "DBLP": "conf/icml/KondratyukYGLHS24", "ArXiv": "2312.14125", "DOI": "10.48550/arXiv.2312.14125", "CorpusId": 266435847 }, "url": "https://www.semanticscholar.org/paper/0c4f46e4dcae5527018e6432fb60cfe8c3354e97", "referenceCount": 97, "citationCount": 86, "influentialCitationCount": 7, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Unleashing Large-Scale Video Generative Pre-training for Visual Robot Manipulation", "abstract": "Generative pre-trained models have demonstrated remarkable effectiveness in language and vision domains by learning useful representations. In this paper, we extend the scope of this effectiveness by showing that visual robot manipulation can significantly benefit from large-scale video generative pre-training. We introduce GR-1, a straightforward GPT-style model designed for multi-task language-conditioned visual robot manipulation. GR-1 takes as inputs a language instruction, a sequence of observation images, and a sequence of robot states. It predicts robot actions as well as future images in an end-to-end manner. Thanks to a flexible design, GR-1 can be seamlessly finetuned on robot data after pre-trained on a large-scale video dataset. We perform extensive experiments on the challenging CALVIN benchmark and a real robot. On CALVIN benchmark, our method outperforms state-of-the-art baseline methods and improves the success rate from 88.9% to 94.9%. In the setting of zero-shot unseen scene generalization, GR-1 improves the success rate from 53.3% to 85.4%. In real robot experiments, GR-1 also outperforms baseline methods and shows strong potentials in generalization to unseen scenes and objects. We provide inaugural evidence that a unified GPT-style transformer, augmented with large-scale video generative pre-training, exhibits remarkable generalization to multi-task visual robot manipulation. Project page: https://GR1-Manipulation.github.io", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Hongtao Wu", "Ya Jing", "Chi-Hou Cheang", "Guangzeng Chen", "Jiafeng Xu", "Xinghang Li", "Minghuan Liu", "Hang Li", "Tao Kong" ], "externalIds": { "DBLP": "conf/iclr/WuJCCXLLLK24", "ArXiv": "2312.13139", "DOI": "10.48550/arXiv.2312.13139", "CorpusId": 266374724 }, "url": "https://www.semanticscholar.org/paper/4443c9a43bff8dcd717e5c75115ec6497af2b953", "referenceCount": 62, "citationCount": 27, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Dense Optical Tracking: Connecting the Dots", "abstract": "Recent approaches to point tracking are able to recover the trajectory of any scene point through a large portion of a video despite the presence of occlusions. They are, how-ever, too slow in practice to track every point observed in a single frame in a reasonable amount of time. This paper introduces DOT, a novel, simple and efficient method for solving this problem. It first extracts a small set of tracks from key regions at motion boundaries using an off-the-shelf point tracking algorithm. Given source and target frames, DOT then computes rough initial estimates of a dense flow field and visibility mask through nearest-neighbor inter-polation, before refining them using a learnable optical flow estimator that explicitly handles occlusions and can be trained on synthetic data with ground-truth correspon-dences. We show that DOT is significantly more accurate than current optical flow techniques, outperforms sophis-ticated “universal” trackers like OmniMotion, and is on par with, or better than, the best point tracking algorithms like CoTracker while being at least two orders of magnitude faster. Quantitative and qualitative experiments with syn-thetic and real videos validate the promise of the proposed approach. Code, data, and videos showcasing the capabili-ties of our approach are available in the project webpage.11https://161ernoing.github.io/dot", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "G. L. Moing", "Jean Ponce", "Cordelia Schmid" ], "externalIds": { "DBLP": "journals/corr/abs-2312-00786", "ArXiv": "2312.00786", "DOI": "10.1109/CVPR52733.2024.01815", "CorpusId": 265551757 }, "url": "https://www.semanticscholar.org/paper/0c1952119b991133891773fba398e5a6a7a2dca5", "referenceCount": 75, "citationCount": 11, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Towards Generalizable Zero-Shot Manipulation via Translating Human Interaction Plans", "abstract": "We pursue the goal of developing robots that can interact zero-shot with generic unseen objects via a diverse repertoire of manipulation skills and show how passive human videos can serve as a rich source of data for learning such generalist robots. Unlike typical robot learning approaches which directly learn how a robot should act from interaction data, we adopt a factorized approach that can leverage large-scale human videos to learn how a human would accomplish a desired task (a human ‘plan’), followed by ‘translating’ this plan to the robot’s embodiment. Specifically, we learn a human ‘plan predictor’ that, given a current image of a scene and a goal image, predicts the future hand and object configurations. We combine this with a ‘translation’ module that learns a plan-conditioned robot manipulation policy, and allows following humans plans for generic manipulation tasks in a zero-shot manner with no deployment-time training. Importantly, while the plan predictor can leverage large-scale human videos for learning, the translation module only requires a small amount of in-domain data, and can generalize to tasks not seen during training. We show that our learned system can perform over 16 manipulation skills that generalize to 40 objects, encompassing 100 real-world tasks for table-top manipulation and diverse in-the-wild manipulation. https://homangab.github.io/hopman/", "year": 2023, "venue": "IEEE International Conference on Robotics and Automation", "authors": [ "Homanga Bharadhwaj", "Abhi Gupta", "Vikash Kumar", "Shubham Tulsiani" ], "externalIds": { "DBLP": "conf/icra/Bharadhwaj0KT24", "ArXiv": "2312.00775", "DOI": "10.1109/ICRA57147.2024.10610288", "CorpusId": 265551754 }, "url": "https://www.semanticscholar.org/paper/bfae03cba791a429821cee6bb6a8acb8edf85616", "referenceCount": 64, "citationCount": 17, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Emu Video: Factorizing Text-to-Video Generation by Explicit Image Conditioning", "abstract": "We present Emu Video, a text-to-video generation model that factorizes the generation into two steps: first generating an image conditioned on the text, and then generating a video conditioned on the text and the generated image. We identify critical design decisions--adjusted noise schedules for diffusion, and multi-stage training that enable us to directly generate high quality and high resolution videos, without requiring a deep cascade of models as in prior work. In human evaluations, our generated videos are strongly preferred in quality compared to all prior work--81% vs. Google's Imagen Video, 90% vs. Nvidia's PYOCO, and 96% vs. Meta's Make-A-Video. Our model outperforms commercial solutions such as RunwayML's Gen2 and Pika Labs. Finally, our factorizing approach naturally lends itself to animating images based on a user's text prompt, where our generations are preferred 96% over prior work.", "year": 2023, "venue": "arXiv.org", "authors": [ "Rohit Girdhar", "Mannat Singh", "Andrew Brown", "Quentin Duval", "S. Azadi", "Sai Saketh Rambhatla", "Akbar Shah", "Xi Yin", "Devi Parikh", "Ishan Misra" ], "externalIds": { "ArXiv": "2311.10709", "DBLP": "journals/corr/abs-2311-10709", "DOI": "10.48550/arXiv.2311.10709", "CorpusId": 265281059 }, "url": "https://www.semanticscholar.org/paper/85b10400864187230714506412c85610c786b5c3", "referenceCount": 90, "citationCount": 102, "influentialCitationCount": 7, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "RT-Trajectory: Robotic Task Generalization via Hindsight Trajectory Sketches", "abstract": "Generalization remains one of the most important desiderata for robust robot learning systems. While recently proposed approaches show promise in generalization to novel objects, semantic concepts, or visual distribution shifts, generalization to new tasks remains challenging. For example, a language-conditioned policy trained on pick-and-place tasks will not be able to generalize to a folding task, even if the arm trajectory of folding is similar to pick-and-place. Our key insight is that this kind of generalization becomes feasible if we represent the task through rough trajectory sketches. We propose a policy conditioning method using such rough trajectory sketches, which we call RT-Trajectory, that is practical, easy to specify, and allows the policy to effectively perform new tasks that would otherwise be challenging to perform. We find that trajectory sketches strike a balance between being detailed enough to express low-level motion-centric guidance while being coarse enough to allow the learned policy to interpret the trajectory sketch in the context of situational visual observations. In addition, we show how trajectory sketches can provide a useful interface to communicate with robotic policies: they can be specified through simple human inputs like drawings or videos, or through automated methods such as modern image-generating or waypoint-generating methods. We evaluate RT-Trajectory at scale on a variety of real-world robotic tasks, and find that RT-Trajectory is able to perform a wider range of tasks compared to language-conditioned and goal-conditioned policies, when provided the same training data.", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Jiayuan Gu", "Sean Kirmani", "Paul Wohlhart", "Yao Lu", "Montse Gonzalez Arenas", "Kanishka Rao", "Wenhao Yu", "Chuyuan Fu", "K. Gopalakrishnan", "Zhuo Xu", "Priya Sundaresan", "Peng Xu", "Hao Su", "Karol Hausman", "Chelsea Finn", "Q. Vuong", "Ted Xiao" ], "externalIds": { "ArXiv": "2311.01977", "DBLP": "journals/corr/abs-2311-01977", "DOI": "10.48550/arXiv.2311.01977", "CorpusId": 265018996 }, "url": "https://www.semanticscholar.org/paper/1ba3adf8f2049e672c0b8786c18a1f2ffcd21fa0", "referenceCount": 37, "citationCount": 19, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Zero-Shot Robotic Manipulation with Pretrained Image-Editing Diffusion Models", "abstract": "If generalist robots are to operate in truly unstructured environments, they need to be able to recognize and reason about novel objects and scenarios. Such objects and scenarios might not be present in the robot's own training data. We propose SuSIE, a method that leverages an image-editing diffusion model to act as a high-level planner by proposing intermediate subgoals that a low-level controller can accomplish. Specifically, we finetune InstructPix2Pix on video data, consisting of both human videos and robot rollouts, such that it outputs hypothetical future\"subgoal\"observations given the robot's current observation and a language command. We also use the robot data to train a low-level goal-conditioned policy to act as the aforementioned low-level controller. We find that the high-level subgoal predictions can utilize Internet-scale pretraining and visual understanding to guide the low-level goal-conditioned policy, achieving significantly better generalization and precision than conventional language-conditioned policies. We achieve state-of-the-art results on the CALVIN benchmark, and also demonstrate robust generalization on real-world manipulation tasks, beating strong baselines that have access to privileged information or that utilize orders of magnitude more compute and training data. The project website can be found at http://rail-berkeley.github.io/susie .", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Kevin Black", "Mitsuhiko Nakamoto", "P. Atreya", "Homer Walke", "Chelsea Finn", "Aviral Kumar", "Sergey Levine" ], "externalIds": { "ArXiv": "2310.10639", "DBLP": "journals/corr/abs-2310-10639", "DOI": "10.48550/arXiv.2310.10639", "CorpusId": 264172455 }, "url": "https://www.semanticscholar.org/paper/d6bd400073090b88ea535a6166ca9c164b8015b7", "referenceCount": 68, "citationCount": 52, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "RoboAgent: Generalization and Efficiency in Robot Manipulation via Semantic Augmentations and Action Chunking", "abstract": "The grand aim of having a single robot that can manipulate arbitrary objects in diverse settings is at odds with the paucity of robotics datasets. Acquiring and growing such datasets is strenuous due to manual efforts, operational costs, and safety challenges. A path toward such a universal agent requires an efficient framework capable of generalization but within a reasonable data budget. In this paper, we develop an efficient framework (MT-ACT) for training universal agents capable of multi-task manipulation skills using (a) semantic augmentations that can rapidly multiply existing datasets and (b) action representations that can extract performant policies with small yet diverse multi-modal datasets without overfitting. In addition, reliable task conditioning and an expressive policy architecture enables our agent to exhibit a diverse repertoire of skills in novel situations specified using task commands. Using merely 7500 demonstrations, we are able to train a single policy RoboAgent capable of 12 unique skills, and demonstrate its generalization over 38 tasks spread across common daily activities in diverse kitchen scenes. On average, RoboAgent outperforms prior methods by over 40% in unseen situations while being more sample efficient. See https://robopen.github.io/for video results and appendix.", "year": 2023, "venue": "IEEE International Conference on Robotics and Automation", "authors": [ "Homanga Bharadhwaj", "Jay Vakil", "Mohit Sharma", "Abhi Gupta", "Shubham Tulsiani", "Vikash Kumar" ], "externalIds": { "DBLP": "conf/icra/BharadhwajVSGTK24", "ArXiv": "2309.01918", "DOI": "10.1109/ICRA57147.2024.10611293", "CorpusId": 261518421 }, "url": "https://www.semanticscholar.org/paper/148e95859248878a0695a31ef6165614a01df631", "referenceCount": 72, "citationCount": 51, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "BridgeData V2: A Dataset for Robot Learning at Scale", "abstract": "We introduce BridgeData V2, a large and diverse dataset of robotic manipulation behaviors designed to facilitate research on scalable robot learning. BridgeData V2 contains 60,096 trajectories collected across 24 environments on a publicly available low-cost robot. BridgeData V2 provides extensive task and environment variability, leading to skills that can generalize across environments, domains, and institutions, making the dataset a useful resource for a broad range of researchers. Additionally, the dataset is compatible with a wide variety of open-vocabulary, multi-task learning methods conditioned on goal images or natural language instructions. In our experiments, we train 6 state-of-the-art imitation learning and offline reinforcement learning methods on our dataset, and find that they succeed on a suite of tasks requiring varying amounts of generalization. We also demonstrate that the performance of these methods improves with more data and higher capacity models, and that training on a greater variety of skills leads to improved generalization. By publicly sharing BridgeData V2 and our pre-trained models, we aim to accelerate research in scalable robot learning methods. Project page at https://rail-berkeley.github.io/bridgedata", "year": 2023, "venue": "Conference on Robot Learning", "authors": [ "Homer Walke", "Kevin Black", "Abraham Lee", "Moo Jin Kim", "Maximilian Du", "Chongyi Zheng", "Tony Zhao", "Philippe Hansen-Estruch", "Q. Vuong", "Andre Wang He", "Vivek Myers", "Kuan Fang", "Chelsea Finn", "S. Levine" ], "externalIds": { "ArXiv": "2308.12952", "DBLP": "conf/corl/WalkeBZVZHHMKDL23", "DOI": "10.48550/arXiv.2308.12952", "CorpusId": 261100981 }, "url": "https://www.semanticscholar.org/paper/6a375be82efc01ec4ed73334655935a56ba82d38", "referenceCount": 61, "citationCount": 84, "influentialCitationCount": 12, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "RT-2: Vision-Language-Action Models Transfer Web Knowledge to Robotic Control", "abstract": "We study how vision-language models trained on Internet-scale data can be incorporated directly into end-to-end robotic control to boost generalization and enable emergent semantic reasoning. Our goal is to enable a single end-to-end trained model to both learn to map robot observations to actions and enjoy the benefits of large-scale pretraining on language and vision-language data from the web. To this end, we propose to co-fine-tune state-of-the-art vision-language models on both robotic trajectory data and Internet-scale vision-language tasks, such as visual question answering. In contrast to other approaches, we propose a simple, general recipe to achieve this goal: in order to fit both natural language responses and robotic actions into the same format, we express the actions as text tokens and incorporate them directly into the training set of the model in the same way as natural language tokens. We refer to such category of models as vision-language-action models (VLA) and instantiate an example of such a model, which we call RT-2. Our extensive evaluation (6k evaluation trials) shows that our approach leads to performant robotic policies and enables RT-2 to obtain a range of emergent capabilities from Internet-scale training. This includes significantly improved generalization to novel objects, the ability to interpret commands not present in the robot training data (such as placing an object onto a particular number or icon), and the ability to perform rudimentary reasoning in response to user commands (such as picking up the smallest or largest object, or the one closest to another object). We further show that incorporating chain of thought reasoning allows RT-2 to perform multi-stage semantic reasoning, for example figuring out which object to pick up for use as an improvised hammer (a rock), or which type of drink is best suited for someone who is tired (an energy drink).", "year": 2023, "venue": "Conference on Robot Learning", "authors": [ "Anthony Brohan", "Noah Brown", "Justice Carbajal", "Yevgen Chebotar", "K. Choromanski", "Tianli Ding", "Danny Driess", "Kumar Avinava Dubey", "Chelsea Finn", "Peter R. Florence", "Chuyuan Fu", "Montse Gonzalez Arenas", "K. Gopalakrishnan", "Kehang Han", "Karol Hausman", "Alexander Herzog", "Jasmine Hsu", "Brian Ichter", "A. Irpan", "Nikhil J. Joshi", "Ryan C. Julian", "Dmitry Kalashnikov", "Yuheng Kuang", "Isabel Leal", "S. Levine", "H. Michalewski", "Igor Mordatch", "Karl Pertsch", "Kanishka Rao", "Krista Reymann", "M. Ryoo", "Grecia Salazar", "Pannag R. Sanketi", "P. Sermanet", "Jaspiar Singh", "Anika Singh", "Radu Soricut", "Huong Tran", "Vincent Vanhoucke", "Q. Vuong", "Ayzaan Wahid", "Stefan Welker", "Paul Wohlhart", "Ted Xiao", "Tianhe Yu", "Brianna Zitkovich" ], "externalIds": { "ArXiv": "2307.15818", "DBLP": "conf/corl/ZitkovichYXXXXW23", "DOI": "10.48550/arXiv.2307.15818", "CorpusId": 260293142 }, "url": "https://www.semanticscholar.org/paper/38939304bb760473141c2aca0305e44fbe04e6e8", "referenceCount": 94, "citationCount": 595, "influentialCitationCount": 47, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "CoTracker: It is Better to Track Together", "abstract": "We introduce CoTracker, a transformer-based model that tracks dense points in a frame jointly across a video sequence. This differs from most existing state-of-the-art approaches that track points independently, ignoring their correlation. We show that joint tracking results in a significantly higher tracking accuracy and robustness. We also provide several technical innovations, including the concept of virtual tracks, which allows CoTracker to track 70k points jointly and simultaneously. Furthermore, CoTracker operates causally on short windows (hence, it is suitable for online tasks), but is trained by unrolling the windows across longer video sequences, which enables and significantly improves long-term tracking. We demonstrate qualitatively impressive tracking results, where points can be tracked for a long time even when they are occluded or leave the field of view. Quantitatively, CoTracker outperforms all recent trackers on standard benchmarks, often by a substantial margin.", "year": 2023, "venue": "arXiv.org", "authors": [ "Nikita Karaev", "Ignacio Rocco", "Benjamin Graham", "N. Neverova", "A. Vedaldi", "C. Rupprecht" ], "externalIds": { "DBLP": "journals/corr/abs-2307-07635", "ArXiv": "2307.07635", "DOI": "10.48550/arXiv.2307.07635", "CorpusId": 259937159 }, "url": "https://www.semanticscholar.org/paper/fa85633fb74d071911fe64d952afe47adc9928d6", "referenceCount": 54, "citationCount": 110, "influentialCitationCount": 31, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Visual Affordance Prediction for Guiding Robot Exploration", "abstract": "Motivated by the intuitive understanding humans have about the space of possible interactions, and the ease with which they can generalize this understanding to previously unseen scenes, we develop an approach for learning ‘visual affordances’. Given an input image of a scene, we infer a distribution over plausible future states that can be achieved via interactions with it. To allow predicting diverse plausible futures, we discretize the space of continuous images with a VQ-VAE and use a Transformer-based model to learn a conditional distribution in the latent embedding space. We show that these models can be trained using large-scale and diverse passive data, and that the learned models exhibit compositional generalization to diverse objects beyond the training distribution. We evaluate the quality and diversity of the generations, and demonstrate how the trained affordance model can be used for guiding exploration during visual goal-conditioned policy learning in robotic manipulation.", "year": 2023, "venue": "IEEE International Conference on Robotics and Automation", "authors": [ "Homanga Bharadhwaj", "Abhi Gupta", "Shubham Tulsiani" ], "externalIds": { "DBLP": "conf/icra/BharadhwajGT23", "ArXiv": "2305.17783", "DOI": "10.1109/ICRA48891.2023.10161288", "CorpusId": 258959322 }, "url": "https://www.semanticscholar.org/paper/1455461948fbe945f47b9a2fe3bd88fc4e1d155c", "referenceCount": 48, "citationCount": 10, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Affordances from Human Videos as a Versatile Representation for Robotics", "abstract": "Building a robot that can understand and learn to interact by watching humans has inspired several vision problems. However, despite some successful results on static datasets, it remains unclear how current models can be used on a robot directly. In this paper, we aim to bridge this gap by leveraging videos of human interactions in an environment centric manner. Utilizing internet videos of human behavior, we train a visual affordance model that estimates where and how in the scene a human is likely to interact. The structure of these behavioral affordances directly enables the robot to perform many complex tasks. We show how to seamlessly integrate our affordance model with four robot learning paradigms including offline imitation learning, exploration, goal-conditioned learning, and action parameterization for reinforcement learning. We show the efficacy of our approach, which we call Vision-Robotics Bridge (VRB) across 4 real world environments, over 10 different tasks, and 2 robotic platforms operating in the wild.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Shikhar Bahl", "Russell Mendonca", "Lili Chen", "Unnat Jain", "Deepak Pathak" ], "externalIds": { "DBLP": "journals/corr/abs-2304-08488", "ArXiv": "2304.08488", "DOI": "10.1109/CVPR52729.2023.01324", "CorpusId": 258180471 }, "url": "https://www.semanticscholar.org/paper/253b41369d003952874c6a47a6038277b165cfa0", "referenceCount": 142, "citationCount": 92, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Lossless Adaptation of Pretrained Vision Models For Robotic Manipulation", "abstract": "Recent works have shown that large models pretrained on common visual learning tasks can provide useful representations for a wide range of specialized perception problems, as well as a variety of robotic manipulation tasks. While prior work on robotic manipulation has predominantly used frozen pretrained features, we demonstrate that in robotics this approach can fail to reach optimal performance, and that fine-tuning of the full model can lead to significantly better results. Unfortunately, fine-tuning disrupts the pretrained visual representation, and causes representational drift towards the fine-tuned task thus leading to a loss of the versatility of the original model. We introduce\"lossless adaptation\"to address this shortcoming of classical fine-tuning. We demonstrate that appropriate placement of our parameter efficient adapters can significantly reduce the performance gap between frozen pretrained representations and full end-to-end fine-tuning without changes to the original representation and thus preserving original capabilities of the pretrained model. We perform a comprehensive investigation across three major model architectures (ViTs, NFNets, and ResNets), supervised (ImageNet-1K classification) and self-supervised pretrained weights (CLIP, BYOL, Visual MAE) in 3 task domains and 35 individual tasks, and demonstrate that our claims are strongly validated in various settings.", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Mohit Sharma", "C. Fantacci", "Yuxiang Zhou", "Skanda Koppula", "N. Heess", "Jonathan Scholz", "Y. Aytar" ], "externalIds": { "DBLP": "journals/corr/abs-2304-06600", "ArXiv": "2304.06600", "DOI": "10.48550/arXiv.2304.06600", "CorpusId": 258108073 }, "url": "https://www.semanticscholar.org/paper/016315a3df05cb07a6f67fa5f6a3265b55909644", "referenceCount": 64, "citationCount": 20, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Where are we in the search for an Artificial Visual Cortex for Embodied Intelligence?", "abstract": "We present the largest and most comprehensive empirical study of pre-trained visual representations (PVRs) or visual 'foundation models' for Embodied AI. First, we curate CortexBench, consisting of 17 different tasks spanning locomotion, navigation, dexterous, and mobile manipulation. Next, we systematically evaluate existing PVRs and find that none are universally dominant. To study the effect of pre-training data size and diversity, we combine over 4,000 hours of egocentric videos from 7 different sources (over 4.3M images) and ImageNet to train different-sized vision transformers using Masked Auto-Encoding (MAE) on slices of this data. Contrary to inferences from prior work, we find that scaling dataset size and diversity does not improve performance universally (but does so on average). Our largest model, named VC-1, outperforms all prior PVRs on average but does not universally dominate either. Next, we show that task- or domain-specific adaptation of VC-1 leads to substantial gains, with VC-1 (adapted) achieving competitive or superior performance than the best known results on all of the benchmarks in CortexBench. Finally, we present real-world hardware experiments, in which VC-1 and VC-1 (adapted) outperform the strongest pre-existing PVR. Overall, this paper presents no new techniques but a rigorous systematic evaluation, a broad set of findings about PVRs (that in some cases, refute those made in narrow domains in prior work), and open-sourced code and models (that required over 10,000 GPU-hours to train) for the benefit of the research community.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Arjun Majumdar", "Karmesh Yadav", "Sergio Arnaud", "Yecheng Jason Ma", "Claire Chen", "Sneha Silwal", "Aryan Jain", "Vincent-Pierre Berges", "P. Abbeel", "J. Malik", "Dhruv Batra", "Yixin Lin", "Oleksandr Maksymets", "A. Rajeswaran", "Franziska Meier" ], "externalIds": { "DBLP": "conf/nips/MajumdarYAMCSJB23", "ArXiv": "2303.18240", "DOI": "10.48550/arXiv.2303.18240", "CorpusId": 257901087 }, "url": "https://www.semanticscholar.org/paper/326f6a8011e43322c433751b9cc31fd56564621c", "referenceCount": 83, "citationCount": 111, "influentialCitationCount": 16, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MimicPlay: Long-Horizon Imitation Learning by Watching Human Play", "abstract": "Imitation learning from human demonstrations is a promising paradigm for teaching robots manipulation skills in the real world. However, learning complex long-horizon tasks often requires an unattainable amount of demonstrations. To reduce the high data requirement, we resort to human play data - video sequences of people freely interacting with the environment using their hands. Even with different morphologies, we hypothesize that human play data contain rich and salient information about physical interactions that can readily facilitate robot policy learning. Motivated by this, we introduce a hierarchical learning framework named MimicPlay that learns latent plans from human play data to guide low-level visuomotor control trained on a small number of teleoperated demonstrations. With systematic evaluations of 14 long-horizon manipulation tasks in the real world, we show that MimicPlay outperforms state-of-the-art imitation learning methods in task success rate, generalization ability, and robustness to disturbances. Code and videos are available at https://mimic-play.github.io", "year": 2023, "venue": "Conference on Robot Learning", "authors": [ "Chen Wang", "Linxi (Jim) Fan", "Jiankai Sun", "Ruohan Zhang", "Li Fei-Fei", "Danfei Xu", "Yuke Zhu", "Anima Anandkumar" ], "externalIds": { "DBLP": "journals/corr/abs-2302-12422", "ArXiv": "2302.12422", "DOI": "10.48550/arXiv.2302.12422", "CorpusId": 257205825 }, "url": "https://www.semanticscholar.org/paper/a68db57f08f6d72c0d3b22d451d2606dca880f94", "referenceCount": 71, "citationCount": 93, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Language-Driven Representation Learning for Robotics", "abstract": "Recent work in visual representation learning for robotics demonstrates the viability of learning from large video datasets of humans performing everyday tasks. Leveraging methods such as masked autoencoding and contrastive learning, these representations exhibit strong transfer to policy learning for visuomotor control. But, robot learning encompasses a diverse set of problems beyond control including grasp affordance prediction, language-conditioned imitation learning, and intent scoring for human-robot collaboration, amongst others. First, we demonstrate that existing representations yield inconsistent results across these tasks: masked autoencoding approaches pick up on low-level spatial features at the cost of high-level semantics, while contrastive learning approaches capture the opposite. We then introduce Voltron, a framework for language-driven representation learning from human videos and associated captions. Voltron trades off language-conditioned visual reconstruction to learn low-level visual patterns, and visually-grounded language generation to encode high-level semantics. We also construct a new evaluation suite spanning five distinct robot learning problems $\\unicode{x2013}$ a unified platform for holistically evaluating visual representations for robotics. Through comprehensive, controlled experiments across all five problems, we find that Voltron's language-driven representations outperform the prior state-of-the-art, especially on targeted problems requiring higher-level features.", "year": 2023, "venue": "Robotics: Science and Systems", "authors": [ "Siddharth Karamcheti", "Suraj Nair", "Annie S. Chen", "T. Kollar", "Chelsea Finn", "Dorsa Sadigh", "Percy Liang" ], "externalIds": { "DBLP": "conf/rss/KaramchetiNCKFS23", "ArXiv": "2302.12766", "DOI": "10.48550/arXiv.2302.12766", "CorpusId": 257205716 }, "url": "https://www.semanticscholar.org/paper/3396609b96dd24cac3b1542aec686ce362f32fe2", "referenceCount": 113, "citationCount": 91, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Scaling Robot Learning with Semantically Imagined Experience", "abstract": "Recent advances in robot learning have shown promise in enabling robots to perform a variety of manipulation tasks and generalize to novel scenarios. One of the key contributing factors to this progress is the scale of robot data used to train the models. To obtain large-scale datasets, prior approaches have relied on either demonstrations requiring high human involvement or engineering-heavy autonomous data collection schemes, both of which are challenging to scale. To mitigate this issue, we propose an alternative route and leverage text-to-image foundation models widely used in computer vision and natural language processing to obtain meaningful data for robot learning without requiring additional robot data. We term our method Robot Learning with Semantically Imagened Experience (ROSIE). Specifically, we make use of the state of the art text-to-image diffusion models and perform aggressive data augmentation on top of our existing robotic manipulation datasets via inpainting various unseen objects for manipulation, backgrounds, and distractors with text guidance. Through extensive real-world experiments, we show that manipulation policies trained on data augmented this way are able to solve completely unseen tasks with new objects and can behave more robustly w.r.t. novel distractors. In addition, we find that we can improve the robustness and generalization of high-level robot learning tasks such as success detection through training with the diffusion-based data augmentation. The project's website and videos can be found at diffusion-rosie.github.io", "year": 2023, "venue": "Robotics: Science and Systems", "authors": [ "Tianhe Yu", "Ted Xiao", "Austin Stone", "Jonathan Tompson", "Anthony Brohan", "Su Wang", "Jaspiar Singh", "Clayton Tan", "M. Dee", "Jodilyn Peralta", "Brian Ichter", "Karol Hausman", "F. Xia" ], "externalIds": { "DBLP": "conf/rss/YuXTSWBSTMPHIX23", "ArXiv": "2302.11550", "DOI": "10.48550/arXiv.2302.11550", "CorpusId": 257079001 }, "url": "https://www.semanticscholar.org/paper/e701e4c02a32da186d25b08373ada12d83b73b3d", "referenceCount": 82, "citationCount": 95, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "GenAug: Retargeting behaviors to unseen situations via Generative Augmentation", "abstract": "Robot learning methods have the potential for widespread generalization across tasks, environments, and objects. However, these methods require large diverse datasets that are expensive to collect in real-world robotics settings. For robot learning to generalize, we must be able to leverage sources of data or priors beyond the robot's own experience. In this work, we posit that image-text generative models, which are pre-trained on large corpora of web-scraped data, can serve as such a data source. We show that despite these generative models being trained on largely non-robotics data, they can serve as effective ways to impart priors into the process of robot learning in a way that enables widespread generalization. In particular, we show how pre-trained generative models can serve as effective tools for semantically meaningful data augmentation. By leveraging these pre-trained models for generating appropriate\"semantic\"data augmentations, we propose a system GenAug that is able to significantly improve policy generalization. We apply GenAug to tabletop manipulation tasks, showing the ability to re-target behavior to novel scenarios, while only requiring marginal amounts of real-world data. We demonstrate the efficacy of this system on a number of object manipulation problems in the real world, showing a 40% improvement in generalization to novel scenes and objects.", "year": 2023, "venue": "Robotics: Science and Systems", "authors": [ "Zoey Chen", "Sho Kiami", "Abhishek Gupta", "Vikash Kumar" ], "externalIds": { "DBLP": "journals/corr/abs-2302-06671", "ArXiv": "2302.06671", "DOI": "10.48550/arXiv.2302.06671", "CorpusId": 256846801 }, "url": "https://www.semanticscholar.org/paper/9a01fc428d195a9c5ea2005dc2943a650d59aa76", "referenceCount": 39, "citationCount": 50, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning Universal Policies via Text-Guided Video Generation", "abstract": "A goal of artificial intelligence is to construct an agent that can solve a wide variety of tasks. Recent progress in text-guided image synthesis has yielded models with an impressive ability to generate complex novel images, exhibiting combinatorial generalization across domains. Motivated by this success, we investigate whether such tools can be used to construct more general-purpose agents. Specifically, we cast the sequential decision making problem as a text-conditioned video generation problem, where, given a text-encoded specification of a desired goal, a planner synthesizes a set of future frames depicting its planned actions in the future, after which control actions are extracted from the generated video. By leveraging text as the underlying goal specification, we are able to naturally and combinatorially generalize to novel goals. The proposed policy-as-video formulation can further represent environments with different state and action spaces in a unified space of images, which, for example, enables learning and generalization across a variety of robot manipulation tasks. Finally, by leveraging pretrained language embeddings and widely available videos from the internet, the approach enables knowledge transfer through predicting highly realistic video plans for real robots.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Yilun Du", "Mengjiao Yang", "Bo Dai", "H. Dai", "Ofir Nachum", "J. Tenenbaum", "D. Schuurmans", "P. Abbeel" ], "externalIds": { "DBLP": "conf/nips/DuY0DN0SA23", "ArXiv": "2302.00111", "DOI": "10.48550/arXiv.2302.00111", "CorpusId": 256459809 }, "url": "https://www.semanticscholar.org/paper/da2fe6cd385194b0274d04d04ee72e8caf3854d4", "referenceCount": 51, "citationCount": 130, "influentialCitationCount": 13, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "RT-1: Robotics Transformer for Real-World Control at Scale", "abstract": "By transferring knowledge from large, diverse, task-agnostic datasets, modern machine learning models can solve specific downstream tasks either zero-shot or with small task-specific datasets to a high level of performance. While this capability has been demonstrated in other fields such as computer vision, natural language processing or speech recognition, it remains to be shown in robotics, where the generalization capabilities of the models are particularly critical due to the difficulty of collecting real-world robotic data. We argue that one of the keys to the success of such general robotic models lies with open-ended task-agnostic training, combined with high-capacity architectures that can absorb all of the diverse, robotic data. In this paper, we present a model class, dubbed Robotics Transformer, that exhibits promising scalable model properties. We verify our conclusions in a study of different model classes and their ability to generalize as a function of the data size, model size, and data diversity based on a large-scale data collection on real robots performing real-world tasks. The project's website and videos can be found at robotics-transformer1.github.io", "year": 2022, "venue": "Robotics: Science and Systems", "authors": [ "Anthony Brohan", "Noah Brown", "Justice Carbajal", "Yevgen Chebotar", "Joseph Dabis", "Chelsea Finn", "K. Gopalakrishnan", "Karol Hausman", "Alexander Herzog", "Jasmine Hsu", "Julian Ibarz", "Brian Ichter", "A. Irpan", "Tomas Jackson", "Sally Jesmonth", "Nikhil J. Joshi", "Ryan C. Julian", "Dmitry Kalashnikov", "Yuheng Kuang", "Isabel Leal", "Kuang-Huei Lee", "S. Levine", "Yao Lu", "U. Malla", "D. Manjunath", "Igor Mordatch", "Ofir Nachum", "Carolina Parada", "Jodilyn Peralta", "Emily Perez", "Karl Pertsch", "Jornell Quiambao", "Kanishka Rao", "M. Ryoo", "Grecia Salazar", "Pannag R. Sanketi", "Kevin Sayed", "Jaspiar Singh", "S. Sontakke", "Austin Stone", "Clayton Tan", "Huong Tran", "Vincent Vanhoucke", "Steve Vega", "Q. Vuong", "F. Xia", "Ted Xiao", "Peng Xu", "Sichun Xu", "Tianhe Yu", "Brianna Zitkovich" ], "externalIds": { "DBLP": "conf/rss/BrohanBCCDFGHHH23", "ArXiv": "2212.06817", "DOI": "10.48550/arXiv.2212.06817", "CorpusId": 254591260 }, "url": "https://www.semanticscholar.org/paper/fd1cf28a2b8caf2fe29af5e7fa9191cecfedf84d", "referenceCount": 72, "citationCount": 608, "influentialCitationCount": 64, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "On Pre-Training for Visuo-Motor Control: Revisiting a Learning-from-Scratch Baseline", "abstract": "In this paper, we examine the effectiveness of pre-training for visuo-motor control tasks. We revisit a simple Learning-from-Scratch (LfS) baseline that incorporates data augmentation and a shallow ConvNet, and find that this baseline is surprisingly competitive with recent approaches (PVR, MVP, R3M) that leverage frozen visual representations trained on large-scale vision datasets -- across a variety of algorithms, task domains, and metrics in simulation and on a real robot. Our results demonstrate that these methods are hindered by a significant domain gap between the pre-training datasets and current benchmarks for visuo-motor control, which is alleviated by finetuning. Based on our findings, we provide recommendations for future research in pre-training for control and hope that our simple yet strong baseline will aid in accurately benchmarking progress in this area.", "year": 2022, "venue": "International Conference on Machine Learning", "authors": [ "Nicklas Hansen", "Zhecheng Yuan", "Yanjie Ze", "Tongzhou Mu", "A. Rajeswaran", "H. Su", "Huazhe Xu", "Xiaolong Wang" ], "externalIds": { "DBLP": "conf/icml/0001YZMR0XW23", "ArXiv": "2212.05749", "DOI": "10.48550/arXiv.2212.05749", "CorpusId": 254222854 }, "url": "https://www.semanticscholar.org/paper/668ef8248bf0ecfaf36cc6a6c65a4f136b976858", "referenceCount": 58, "citationCount": 49, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "CACTI: A Framework for Scalable Multi-Task Multi-Scene Visual Imitation Learning", "abstract": "Large-scale training have propelled significant progress in various sub-fields of AI such as computer vision and natural language processing. However, building robot learning systems at a comparable scale remains challenging. To develop robots that can perform a wide range of skills and adapt to new scenarios, efficient methods for collecting vast and diverse amounts of data on physical robot systems are required, as well as the capability to train high-capacity policies using such datasets. In this work, we propose a framework for scaling robot learning, with specific focus on multi-task and multi-scene manipulation in kitchen environments, both in simulation and in the real world. Our proposed framework, CACTI, comprises four stages that separately handle: data collection, data augmentation, visual representation learning, and imitation policy training, to enable scalability in robot learning . We make use of state-of-the-art generative models as part of the data augmentation stage, and use pre-trained out-of-domain visual representations to improve training efficiency. Experimental results demonstrate the effectiveness of our approach. On a real robot setup, CACTI enables efficient training of a single policy that can perform 10 manipulation tasks involving kitchen objects, and is robust to varying layouts of distractors. In a simulated kitchen environment, CACTI trains a single policy to perform 18 semantic tasks across 100 layout variations for each individual task. We will release the simulation task benchmark and augmented datasets in both real and simulated environments to facilitate future research.", "year": 2022, "venue": "arXiv.org", "authors": [ "Zhao Mandi", "Homanga Bharadhwaj", "V. Moens", "Shuran Song", "A. Rajeswaran", "Vikash Kumar" ], "externalIds": { "DBLP": "journals/corr/abs-2212-05711", "ArXiv": "2212.05711", "DOI": "10.48550/arXiv.2212.05711", "CorpusId": 254183982 }, "url": "https://www.semanticscholar.org/paper/9ffc8f7b3fbd5e609f609b1c20206129f22b4eb7", "referenceCount": 59, "citationCount": 47, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "VideoDex: Learning Dexterity from Internet Videos", "abstract": "To build general robotic agents that can operate in many environments, it is often imperative for the robot to collect experience in the real world. However, this is often not feasible due to safety, time, and hardware restrictions. We thus propose leveraging the next best thing as real-world experience: internet videos of humans using their hands. Visual priors, such as visual features, are often learned from videos, but we believe that more information from videos can be utilized as a stronger prior. We build a learning algorithm, VideoDex, that leverages visual, action, and physical priors from human video datasets to guide robot behavior. These actions and physical priors in the neural network dictate the typical human behavior for a particular robot task. We test our approach on a robot arm and dexterous hand-based system and show strong results on various manipulation tasks, outperforming various state-of-the-art methods. Videos at https://video-dex.github.io", "year": 2022, "venue": "Conference on Robot Learning", "authors": [ "Kenneth Shaw", "Shikhar Bahl", "Deepak Pathak" ], "externalIds": { "DBLP": "journals/corr/abs-2212-04498", "ArXiv": "2212.04498", "DOI": "10.48550/arXiv.2212.04498", "CorpusId": 254408735 }, "url": "https://www.semanticscholar.org/paper/5ba2a2ab5c5569056b3a9c520706d75421cb41f2", "referenceCount": 73, "citationCount": 56, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "TAP-Vid: A Benchmark for Tracking Any Point in a Video", "abstract": "Generic motion understanding from video involves not only tracking objects, but also perceiving how their surfaces deform and move. This information is useful to make inferences about 3D shape, physical properties and object interactions. While the problem of tracking arbitrary physical points on surfaces over longer video clips has received some attention, no dataset or benchmark for evaluation existed, until now. In this paper, we first formalize the problem, naming it tracking any point (TAP). We introduce a companion benchmark, TAP-Vid, which is composed of both real-world videos with accurate human annotations of point tracks, and synthetic videos with perfect ground-truth point tracks. Central to the construction of our benchmark is a novel semi-automatic crowdsourced pipeline which uses optical flow estimates to compensate for easier, short-term motion like camera shake, allowing annotators to focus on harder sections of video. We validate our pipeline on synthetic data and propose a simple end-to-end point tracking model TAP-Net, showing that it outperforms all prior methods on our benchmark when trained on synthetic data.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Carl Doersch", "Ankush Gupta", "L. Markeeva", "Adrià Recasens", "Lucas Smaira", "Y. Aytar", "João Carreira", "Andrew Zisserman", "Yezhou Yang" ], "externalIds": { "DBLP": "conf/nips/DoerschGMRSACZY22", "ArXiv": "2211.03726", "DOI": "10.48550/arXiv.2211.03726", "CorpusId": 253384359 }, "url": "https://www.semanticscholar.org/paper/d81c7c08765add0fd478aa3d6b3a9f4b7c1003a8", "referenceCount": 92, "citationCount": 86, "influentialCitationCount": 27, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "DALL-E-Bot: Introducing Web-Scale Diffusion Models to Robotics", "abstract": "We introduce the first work to explore web-scale diffusion models for robotics. DALL-E-Bot enables a robot to rearrange objects in a scene, by first inferring a text description of those objects, then generating an image representing a natural, human-like arrangement of those objects, and finally physically arranging the objects according to that goal image. We show that this is possible zero-shot using DALL-E, without needing any further example arrangements, data collection, or training. DALL-E-Bot is fully autonomous and is not restricted to a pre-defined set of objects or scenes, thanks to DALL-E's web-scale pre-training. Encouraging real-world results, with both human studies and objective metrics, show that integrating web-scale diffusion models into robotics pipelines is a promising direction for scalable, unsupervised robot learning.", "year": 2022, "venue": "IEEE Robotics and Automation Letters", "authors": [ "Ivan Kapelyukh", "Vitalis Vosylius", "Edward Johns" ], "externalIds": { "DBLP": "journals/corr/abs-2210-02438", "ArXiv": "2210.02438", "DOI": "10.1109/LRA.2023.3272516", "CorpusId": 252715865 }, "url": "https://www.semanticscholar.org/paper/4fd4e392fb39124744bdfbb6d71ae2030be5132e", "referenceCount": 71, "citationCount": 104, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "VIP: Towards Universal Visual Reward and Representation via Value-Implicit Pre-Training", "abstract": "Reward and representation learning are two long-standing challenges for learning an expanding set of robot manipulation skills from sensory observations. Given the inherent cost and scarcity of in-domain, task-specific robot data, learning from large, diverse, offline human videos has emerged as a promising path towards acquiring a generally useful visual representation for control; however, how these human videos can be used for general-purpose reward learning remains an open question. We introduce $\\textbf{V}$alue-$\\textbf{I}$mplicit $\\textbf{P}$re-training (VIP), a self-supervised pre-trained visual representation capable of generating dense and smooth reward functions for unseen robotic tasks. VIP casts representation learning from human videos as an offline goal-conditioned reinforcement learning problem and derives a self-supervised dual goal-conditioned value-function objective that does not depend on actions, enabling pre-training on unlabeled human videos. Theoretically, VIP can be understood as a novel implicit time contrastive objective that generates a temporally smooth embedding, enabling the value function to be implicitly defined via the embedding distance, which can then be used to construct the reward for any goal-image specified downstream task. Trained on large-scale Ego4D human videos and without any fine-tuning on in-domain, task-specific data, VIP's frozen representation can provide dense visual reward for an extensive set of simulated and $\\textbf{real-robot}$ tasks, enabling diverse reward-based visual control methods and significantly outperforming all prior pre-trained representations. Notably, VIP can enable simple, $\\textbf{few-shot}$ offline RL on a suite of real-world robot tasks with as few as 20 trajectories.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Yecheng Jason Ma", "Shagun Sodhani", "Dinesh Jayaraman", "O. Bastani", "Vikash Kumar", "Amy Zhang" ], "externalIds": { "DBLP": "journals/corr/abs-2210-00030", "ArXiv": "2210.00030", "DOI": "10.48550/arXiv.2210.00030", "CorpusId": 252683397 }, "url": "https://www.semanticscholar.org/paper/3fbe2e8413df0207c26ff393c9aaa8488e3ca4c3", "referenceCount": 71, "citationCount": 190, "influentialCitationCount": 13, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Human-to-Robot Imitation in the Wild", "abstract": "We approach the problem of learning by watching humans in the wild. While traditional approaches in Imitation and Reinforcement Learning are promising for learning in the real world, they are either sample inefficient or are constrained to lab settings. Meanwhile, there has been a lot of success in processing passive, unstructured human data. We propose tackling this problem via an efficient one-shot robot learning algorithm, centered around learning from a third-person perspective. We call our method WHIRL: In-the-Wild Human Imitating Robot Learning. WHIRL extracts a prior over the intent of the human demonstrator, using it to initialize our agent's policy. We introduce an efficient real-world policy learning scheme that improves using interactions. Our key contributions are a simple sampling-based policy optimization approach, a novel objective function for aligning human and robot videos as well as an exploration method to boost sample efficiency. We show one-shot generalization and success in real-world settings, including 20 different manipulation tasks in the wild. Videos and talk at https://human2robot.github.io", "year": 2022, "venue": "Robotics: Science and Systems", "authors": [ "Shikhar Bahl", "Abhi Gupta", "Deepak Pathak" ], "externalIds": { "DBLP": "journals/corr/abs-2207-09450", "ArXiv": "2207.09450", "DOI": "10.15607/rss.2022.xviii.026", "CorpusId": 248941578 }, "url": "https://www.semanticscholar.org/paper/d28b9f65c849eba9ba2b27f7e91906f46fbe7fa1", "referenceCount": 87, "citationCount": 110, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Photorealistic Text-to-Image Diffusion Models with Deep Language Understanding", "abstract": "We present Imagen, a text-to-image diffusion model with an unprecedented degree of photorealism and a deep level of language understanding. Imagen builds on the power of large transformer language models in understanding text and hinges on the strength of diffusion models in high-fidelity image generation. Our key discovery is that generic large language models (e.g. T5), pretrained on text-only corpora, are surprisingly effective at encoding text for image synthesis: increasing the size of the language model in Imagen boosts both sample fidelity and image-text alignment much more than increasing the size of the image diffusion model. Imagen achieves a new state-of-the-art FID score of 7.27 on the COCO dataset, without ever training on COCO, and human raters find Imagen samples to be on par with the COCO data itself in image-text alignment. To assess text-to-image models in greater depth, we introduce DrawBench, a comprehensive and challenging benchmark for text-to-image models. With DrawBench, we compare Imagen with recent methods including VQ-GAN+CLIP, Latent Diffusion Models, and DALL-E 2, and find that human raters prefer Imagen over other models in side-by-side comparisons, both in terms of sample quality and image-text alignment. See https://imagen.research.google/ for an overview of the results.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Chitwan Saharia", "William Chan", "Saurabh Saxena", "Lala Li", "Jay Whang", "Emily L. Denton", "Seyed Kamyar Seyed Ghasemipour", "Burcu Karagol Ayan", "S. S. Mahdavi", "Raphael Gontijo Lopes", "Tim Salimans", "Jonathan Ho", "David J. Fleet", "Mohammad Norouzi" ], "externalIds": { "DBLP": "journals/corr/abs-2205-11487", "ArXiv": "2205.11487", "DOI": "10.48550/arXiv.2205.11487", "CorpusId": 248986576 }, "url": "https://www.semanticscholar.org/paper/9695824d7a01fad57ba9c01d7d76a519d78d65e7", "referenceCount": 108, "citationCount": 4291, "influentialCitationCount": 361, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Flamingo: a Visual Language Model for Few-Shot Learning", "abstract": "Building models that can be rapidly adapted to novel tasks using only a handful of annotated examples is an open challenge for multimodal machine learning research. We introduce Flamingo, a family of Visual Language Models (VLM) with this ability. We propose key architectural innovations to: (i) bridge powerful pretrained vision-only and language-only models, (ii) handle sequences of arbitrarily interleaved visual and textual data, and (iii) seamlessly ingest images or videos as inputs. Thanks to their flexibility, Flamingo models can be trained on large-scale multimodal web corpora containing arbitrarily interleaved text and images, which is key to endow them with in-context few-shot learning capabilities. We perform a thorough evaluation of our models, exploring and measuring their ability to rapidly adapt to a variety of image and video tasks. These include open-ended tasks such as visual question-answering, where the model is prompted with a question which it has to answer; captioning tasks, which evaluate the ability to describe a scene or an event; and close-ended tasks such as multiple-choice visual question-answering. For tasks lying anywhere on this spectrum, a single Flamingo model can achieve a new state of the art with few-shot learning, simply by prompting the model with task-specific examples. On numerous benchmarks, Flamingo outperforms models fine-tuned on thousands of times more task-specific data.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Jean-Baptiste Alayrac", "Jeff Donahue", "Pauline Luc", "Antoine Miech", "Iain Barr", "Yana Hasson", "Karel Lenc", "A. Mensch", "Katie Millican", "Malcolm Reynolds", "Roman Ring", "Eliza Rutherford", "Serkan Cabi", "Tengda Han", "Zhitao Gong", "Sina Samangooei", "Marianne Monteiro", "Jacob Menick", "Sebastian Borgeaud", "Andy Brock", "Aida Nematzadeh", "Sahand Sharifzadeh", "Mikolaj Binkowski", "Ricardo Barreira", "O. Vinyals", "Andrew Zisserman", "K. Simonyan" ], "externalIds": { "ArXiv": "2204.14198", "DBLP": "journals/corr/abs-2204-14198", "CorpusId": 248476411 }, "url": "https://www.semanticscholar.org/paper/26218bdcc3945c7edae7aa2adbfba4cd820a2df3", "referenceCount": 182, "citationCount": 2312, "influentialCitationCount": 251, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Joint Hand Motion and Interaction Hotspots Prediction from Egocentric Videos", "abstract": "We propose to forecast future hand-object interactions given an egocentric video. Instead of predicting action labels or pixels, we directly predict the hand motion trajectory and the future contact points on the next active object (i.e., interaction hotspots). This relatively low-dimensional representation provides a con-crete description of future interactions. To tackle this task, we first provide an automatic way to collect trajectory and hotspots labels on large-scale data. We then use this data to train an Object-Centric Transformer (OCT) model for prediction. Our model performs hand and object interaction reasoning via the self-attention mechanism in Transformers. OCT also provides a probabilistic framework to sample the future trajectory and hotspots to handle uncertainty in prediction. We perform experi-ments on the Epic-Kitchens-55, Epic-Kitchens-100 and EGTEA Gaze+ datasets, and show that OCT significantly outperforms state-of the-art approaches by a large margin. Project page is available at https://stevenlsw.github.io/hoi-forecast.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Shao-Wei Liu", "Subarna Tripathi", "Somdeb Majumdar", "Xiaolong Wang" ], "externalIds": { "ArXiv": "2204.01696", "DBLP": "journals/corr/abs-2204-01696", "DOI": "10.1109/CVPR52688.2022.00328", "CorpusId": 247939501 }, "url": "https://www.semanticscholar.org/paper/95833e22c1ef49bb65e49c01d75dfd269603a621", "referenceCount": 93, "citationCount": 76, "influentialCitationCount": 14, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "R3M: A Universal Visual Representation for Robot Manipulation", "abstract": "We study how visual representations pre-trained on diverse human video data can enable data-efficient learning of downstream robotic manipulation tasks. Concretely, we pre-train a visual representation using the Ego4D human video dataset using a combination of time-contrastive learning, video-language alignment, and an L1 penalty to encourage sparse and compact representations. The resulting representation, R3M, can be used as a frozen perception module for downstream policy learning. Across a suite of 12 simulated robot manipulation tasks, we find that R3M improves task success by over 20% compared to training from scratch and by over 10% compared to state-of-the-art visual representations like CLIP and MoCo. Furthermore, R3M enables a Franka Emika Panda arm to learn a range of manipulation tasks in a real, cluttered apartment given just 20 demonstrations. Code and pre-trained models are available at https://tinyurl.com/robotr3m.", "year": 2022, "venue": "Conference on Robot Learning", "authors": [ "Suraj Nair", "A. Rajeswaran", "Vikash Kumar", "Chelsea Finn", "Abhi Gupta" ], "externalIds": { "DBLP": "conf/corl/NairRKF022", "ArXiv": "2203.12601", "DOI": "10.48550/arXiv.2203.12601", "CorpusId": 247618840 }, "url": "https://www.semanticscholar.org/paper/c9bdc9ad2c3cf3230ba9aac7b5783ab411f0d204", "referenceCount": 74, "citationCount": 385, "influentialCitationCount": 60, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Masked Visual Pre-training for Motor Control", "abstract": "This paper shows that self-supervised visual pre-training from real-world images is effective for learning motor control tasks from pixels. We first train the visual representations by masked modeling of natural images. We then freeze the visual encoder and train neural network controllers on top with reinforcement learning. We do not perform any task-specific fine-tuning of the encoder; the same visual representations are used for all motor control tasks. To the best of our knowledge, this is the first self-supervised model to exploit real-world images at scale for motor control. To accelerate progress in learning from pixels, we contribute a benchmark suite of hand-designed tasks varying in movements, scenes, and robots. Without relying on labels, state-estimation, or expert demonstrations, we consistently outperform supervised encoders by up to 80% absolute success rate, sometimes even matching the oracle state performance. We also find that in-the-wild images, e.g., from YouTube or Egocentric videos, lead to better visual representations for various manipulation tasks than ImageNet images.", "year": 2022, "venue": "arXiv.org", "authors": [ "Tete Xiao", "Ilija Radosavovic", "Trevor Darrell", "J. Malik" ], "externalIds": { "DBLP": "journals/corr/abs-2203-06173", "ArXiv": "2203.06173", "DOI": "10.48550/arXiv.2203.06173", "CorpusId": 247411071 }, "url": "https://www.semanticscholar.org/paper/523acd658742fb9c978e3f7638c09d7ce78af719", "referenceCount": 81, "citationCount": 197, "influentialCitationCount": 20, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The Unsurprising Effectiveness of Pre-Trained Vision Models for Control", "abstract": "Recent years have seen the emergence of pretrained representations as a powerful abstraction for AI applications in computer vision, natural language, and speech. However, policy learning for control is still dominated by a tabula-rasa learning paradigm, with visuo-motor policies often trained from scratch using data from deployment environments. In this context, we revisit and study the role of pre-trained visual representations for control, and in particular representations trained on large-scale computer vision datasets. Through extensive empirical evaluation in diverse control domains (Habitat, DeepMind Control, Adroit, Franka Kitchen), we isolate and study the importance of different representation training methods, data augmentations, and feature hierarchies. Overall, we find that pre-trained visual representations can be competitive or even better than ground-truth state representations to train control policies. This is in spite of using only out-of-domain data from standard vision datasets, without any in-domain data from the deployment environments.", "year": 2022, "venue": "International Conference on Machine Learning", "authors": [ "Simone Parisi", "A. Rajeswaran", "Senthil Purushwalkam", "A. Gupta" ], "externalIds": { "DBLP": "conf/icml/ParisiRP022", "ArXiv": "2203.03580", "DOI": "10.48550/arXiv.2203.03580", "CorpusId": 247292805 }, "url": "https://www.semanticscholar.org/paper/7b3d26bd1d65ed5937c76043b5cd058260d8469f", "referenceCount": 73, "citationCount": 158, "influentialCitationCount": 13, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "BC-Z: Zero-Shot Task Generalization with Robotic Imitation Learning", "abstract": "In this paper, we study the problem of enabling a vision-based robotic manipulation system to generalize to novel tasks, a long-standing challenge in robot learning. We approach the challenge from an imitation learning perspective, aiming to study how scaling and broadening the data collected can facilitate such generalization. To that end, we develop an interactive and flexible imitation learning system that can learn from both demonstrations and interventions and can be conditioned on different forms of information that convey the task, including pre-trained embeddings of natural language or videos of humans performing the task. When scaling data collection on a real robot to more than 100 distinct tasks, we find that this system can perform 24 unseen manipulation tasks with an average success rate of 44%, without any robot demonstrations for those tasks.", "year": 2022, "venue": "Conference on Robot Learning", "authors": [ "Eric Jang", "A. Irpan", "Mohi Khansari", "Daniel Kappler", "F. Ebert", "Corey Lynch", "S. Levine", "Chelsea Finn" ], "externalIds": { "ArXiv": "2202.02005", "DBLP": "journals/corr/abs-2202-02005", "CorpusId": 237257594 }, "url": "https://www.semanticscholar.org/paper/1d803f07e4591bd67c358eef715bcd443e821894", "referenceCount": 56, "citationCount": 362, "influentialCitationCount": 40, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Human Hands as Probes for Interactive Object Understanding", "abstract": "Interactive object understanding, or what we can do to objects and how is a long-standing goal of computer vision. In this paper, we tackle this problem through observation of human hands in in-the-wild egocentric videos. We demonstrate that observation of what human hands interact with and how can provide both the relevant data and the necessary supervision. Attending to hands, readily localizes and stabilizes active objects for learning and reveals places where interactions with objects occur. Analyzing the hands shows what we can do to objects and how. We apply these basic principles on the EPIC-KITCHENS dataset, and successfully learn state-sensitive features, and object affordances (regions of interaction and afforded grasps), purely by observing hands in egocentric videos.", "year": 2021, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Mohit Goyal", "Sahil Modi", "Rishabh Goyal", "Saurabh Gupta" ], "externalIds": { "DBLP": "journals/corr/abs-2112-09120", "ArXiv": "2112.09120", "DOI": "10.1109/CVPR52688.2022.00329", "CorpusId": 245218499 }, "url": "https://www.semanticscholar.org/paper/701ce6e257e0a9cdc4efb2a266edf870781f9cfe", "referenceCount": 80, "citationCount": 40, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Ego4D: Around the World in 3,000 Hours of Egocentric Video", "abstract": "We introduce Ego4D, a massive-scale egocentric video dataset and benchmark suite. It offers 3,670 hours of dailylife activity video spanning hundreds of scenarios (household, outdoor, workplace, leisure, etc.) captured by 931 unique camera wearers from 74 worldwide locations and 9 different countries. The approach to collection is designed to uphold rigorous privacy and ethics standards, with consenting participants and robust de-identification procedures where relevant. Ego4D dramatically expands the volume of diverse egocentric video footage publicly available to the research community. Portions of the video are accompanied by audio, 3D meshes of the environment, eye gaze, stereo, and/or synchronized videos from multiple egocentric cameras at the same event. Furthermore, we present a host of new benchmark challenges centered around understanding the first-person visual experience in the past (querying an episodic memory), present (analyzing hand-object manipulation, audio-visual conversation, and social interactions), and future (forecasting activities). By publicly sharing this massive annotated dataset and benchmark suite, we aim to push the frontier of first-person perception. Project page: https://ego4d-data.org/", "year": 2021, "venue": "Computer Vision and Pattern Recognition", "authors": [ "K. Grauman", "Andrew Westbury", "Eugene Byrne", "Zachary Chavis", "Antonino Furnari", "Rohit Girdhar", "Jackson Hamburger", "Hao Jiang", "Miao Liu", "Xingyu Liu", "Miguel Martin", "Tushar Nagarajan", "Ilija Radosavovic", "Santhosh K. Ramakrishnan", "Fiona Ryan", "J. Sharma", "Michael Wray", "Mengmeng Xu", "Eric Z. Xu", "Chen Zhao", "Siddhant Bansal", "Dhruv Batra", "Vincent Cartillier", "S. Crane", "Tien Do", "Morrie Doulaty", "Akshay Erapalli", "Christoph Feichtenhofer", "A. Fragomeni", "Qichen Fu", "Christian Fuegen", "A. Gebreselasie", "Cristina González", "James M. Hillis", "Xuhua Huang", "Yifei Huang", "Wenqi Jia", "Weslie Khoo", "J. Kolár", "Satwik Kottur", "Anurag Kumar", "F. Landini", "Chao Li", "Yanghao Li", "Zhenqiang Li", "K. Mangalam", "Raghava Modhugu", "Jonathan Munro", "Tullie Murrell", "Takumi Nishiyasu", "Will Price", "Paola Ruiz Puentes", "Merey Ramazanova", "Leda Sari", "K. Somasundaram", "Audrey Southerland", "Yusuke Sugano", "Ruijie Tao", "Minh Vo", "Yuchen Wang", "Xindi Wu", "Takuma Yagi", "Yunyi Zhu", "P. Arbeláez", "David J. Crandall", "D. Damen", "G. Farinella", "Bernard Ghanem", "V. Ithapu", "C. V. Jawahar", "H. Joo", "Kris Kitani", "Haizhou Li", "Richard A. Newcombe", "A. Oliva", "H. Park", "James M. Rehg", "Yoichi Sato", "Jianbo Shi", "Mike Zheng Shou", "A. Torralba", "L. Torresani", "Mingfei Yan", "J. Malik" ], "externalIds": { "DBLP": "journals/corr/abs-2110-07058", "ArXiv": "2110.07058", "DOI": "10.1109/CVPR52688.2022.01842", "CorpusId": 238856888, "PubMed": "39058617" }, "url": "https://www.semanticscholar.org/paper/848eb8367785910c2fe31372605954ad8f9dfe6c", "referenceCount": 227, "citationCount": 722, "influentialCitationCount": 117, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "DexMV: Imitation Learning for Dexterous Manipulation from Human Videos", "abstract": null, "year": 2021, "venue": "European Conference on Computer Vision", "authors": [ "Yuzhe Qin", "Yueh-Hua Wu", "Shaowei Liu", "Hanwen Jiang", "Ruihan Yang", "Yang Fu", "Xiaolong Wang" ], "externalIds": { "DBLP": "conf/eccv/QinWLJYFW22", "ArXiv": "2108.05877", "DOI": "10.1007/978-3-031-19842-7_33", "CorpusId": 236986915 }, "url": "https://www.semanticscholar.org/paper/93230bed6246400949ac514df46fc1c56af80ec2", "referenceCount": 98, "citationCount": 120, "influentialCitationCount": 8, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning by Watching: Physical Imitation of Manipulation Skills from Human Videos", "abstract": "Learning from visual data opens the potential to accrue a large range of manipulation behaviors by leveraging human demonstrations without specifying each of them mathe-matically, but rather through natural task specification. In this paper, we present Learning by Watching (LbW), an algorithmic framework for policy learning through imitation from a single video specifying the task. The key insights of our method are two-fold. First, since the human arms may not have the same morphology as robot arms, our framework learns unsupervised human to robot translation to overcome the morphology mis-match issue. Second, to capture the details in salient regions that are crucial for learning state representations, our model performs unsupervised keypoint detection on the translated robot videos. The detected keypoints form a structured representation that contains semantically meaningful information and can be used directly for computing reward and policy learning. We evaluate the effectiveness of our LbW framework on five robot manipulation tasks, including reaching, pushing, sliding, coffee making, and drawer closing. Extensive experimental evaluations demonstrate that our method performs favorably against the state-of-the-art approaches. More results and analysis are available at pair.toronto.edu/lbw-kp/.", "year": 2021, "venue": "IEEE/RJS International Conference on Intelligent RObots and Systems", "authors": [ "Haoyu Xiong", "Quanzhou Li", "Yun-Chun Chen", "Homanga Bharadhwaj", "Samarth Sinha", "Animesh Garg" ], "externalIds": { "DBLP": "conf/iros/XiongLCBSG21", "ArXiv": "2101.07241", "DOI": "10.1109/IROS51168.2021.9636080", "CorpusId": 231632575 }, "url": "https://www.semanticscholar.org/paper/a7b7eed040046fba519230218e550dfd71185d86", "referenceCount": 56, "citationCount": 67, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Where2Act: From Pixels to Actions for Articulated 3D Objects", "abstract": "One of the fundamental goals of visual perception is to allow agents to meaningfully interact with their environment. In this paper, we take a step towards that long-term goal – we extract highly localized actionable information related to elementary actions such as pushing or pulling for articulated objects with movable parts. For example, given a drawer, our network predicts that applying a pulling force on the handle opens the drawer. We propose, discuss, and evaluate novel network architectures that given image and depth data, predict the set of actions possible at each pixel, and the regions over articulated parts that are likely to move under the force. We propose a learning-from-interaction framework with an online data sampling strategy that allows us to train the network in simulation (SAPIEN) and generalizes across categories. Check the website for code and data release.", "year": 2021, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Kaichun Mo", "L. Guibas", "Mustafa Mukadam", "A. Gupta", "Shubham Tulsiani" ], "externalIds": { "ArXiv": "2101.02692", "DBLP": "journals/corr/abs-2101-02692", "DOI": "10.1109/ICCV48922.2021.00674", "CorpusId": 230800148 }, "url": "https://www.semanticscholar.org/paper/197037c0d6612d1510f98eb2a1719da05bede103", "referenceCount": 61, "citationCount": 142, "influentialCitationCount": 26, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Visual Imitation Made Easy", "abstract": "Visual imitation learning provides a framework for learning complex manipulation behaviors by leveraging human demonstrations. However, current interfaces for imitation such as kinesthetic teaching or teleoperation prohibitively restrict our ability to efficiently collect large-scale data in the wild. Obtaining such diverse demonstration data is paramount for the generalization of learned skills to novel scenarios. In this work, we present an alternate interface for imitation that simplifies the data collection process while allowing for easy transfer to robots. We use commercially available reacher-grabber assistive tools both as a data collection device and as the robot's end-effector. To extract action information from these visual demonstrations, we use off-the-shelf Structure from Motion (SfM) techniques in addition to training a finger detection network. We experimentally evaluate on two challenging tasks: non-prehensile pushing and prehensile stacking, with 1000 diverse demonstrations for each task. For both tasks, we use standard behavior cloning to learn executable policies from the previously collected offline demonstrations. To improve learning performance, we employ a variety of data augmentations and provide an extensive analysis of its effects. Finally, we demonstrate the utility of our interface by evaluating on real robotic scenarios with previously unseen objects and achieve a 87% success rate on pushing and a 62% success rate on stacking. Robot videos are available at this https URL.", "year": 2020, "venue": "Conference on Robot Learning", "authors": [ "S. Young", "Dhiraj Gandhi", "Shubham Tulsiani", "A. Gupta", "P. Abbeel", "Lerrel Pinto" ], "externalIds": { "ArXiv": "2008.04899", "DBLP": "journals/corr/abs-2008-04899", "MAG": "3048833305", "CorpusId": 221095826 }, "url": "https://www.semanticscholar.org/paper/59d8b07813df4a9d7fb750fe65bb54dabf160788", "referenceCount": 48, "citationCount": 107, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "AVID: Learning Multi-Stage Tasks via Pixel-Level Translation of Human Videos", "abstract": "Robotic reinforcement learning (RL) holds the promise of enabling robots to learn complex behaviors through experience. However, realizing this promise for long-horizon tasks in the real world requires mechanisms to reduce human burden in terms of defining the task and scaffolding the learning process. In this paper, we study how these challenges can be alleviated with an automated robotic learning framework, in which multi-stage tasks are defined simply by providing videos of a human demonstrator and then learned autonomously by the robot from raw image observations. A central challenge in imitating human videos is the difference in appearance between the human and robot, which typically requires manual correspondence. We instead take an automated approach and perform pixel-level image translation via CycleGAN to convert the human demonstration into a video of a robot, which can then be used to construct a reward function for a model-based RL algorithm. The robot then learns the task one stage at a time, automatically learning how to reset each stage to retry it multiple times without human-provided resets. This makes the learning process largely automatic, from intuitive task specification via a video to automated training with minimal human intervention. We demonstrate that our approach is capable of learning complex tasks, such as operating a coffee machine, directly from raw image observations, requiring only 20 minutes to provide human demonstrations and about 180 minutes of robot interaction.", "year": 2019, "venue": "Robotics: Science and Systems", "authors": [ "Laura M. Smith", "Nikita Dhawan", "Marvin Zhang", "P. Abbeel", "S. Levine" ], "externalIds": { "MAG": "2996086858", "DBLP": "conf/rss/SmithDZAL20", "ArXiv": "1912.04443", "DOI": "10.15607/rss.2020.xvi.024", "CorpusId": 209140723 }, "url": "https://www.semanticscholar.org/paper/465c4fe8e4e4d43cfc89802a76b99bbcaaaa565d", "referenceCount": 64, "citationCount": 132, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ROBOTURK: A Crowdsourcing Platform for Robotic Skill Learning through Imitation", "abstract": "Imitation Learning has empowered recent advances in learning robotic manipulation tasks by addressing shortcomings of Reinforcement Learning such as exploration and reward specification. However, research in this area has been limited to modest-sized datasets due to the difficulty of collecting large quantities of task demonstrations through existing mechanisms. This work introduces RoboTurk to address this challenge. RoboTurk is a crowdsourcing platform for high quality 6-DoF trajectory based teleoperation through the use of widely available mobile devices (e.g. iPhone). We evaluate RoboTurk on three manipulation tasks of varying timescales (15-120s) and observe that our user interface is statistically similar to special purpose hardware such as virtual reality controllers in terms of task completion times. Furthermore, we observe that poor network conditions, such as low bandwidth and high delay links, do not substantially affect the remote users' ability to perform task demonstrations successfully on RoboTurk. Lastly, we demonstrate the efficacy of RoboTurk through the collection of a pilot dataset; using RoboTurk, we collected 137.5 hours of manipulation data from remote workers, amounting to over 2200 successful task demonstrations in 22 hours of total system usage. We show that the data obtained through RoboTurk enables policy learning on multi-step manipulation tasks with sparse rewards and that using larger quantities of demonstrations during policy learning provides benefits in terms of both learning consistency and final performance. For additional results, videos, and to download our pilot dataset, visit $\\href{this http URL}{\\texttt{roboturk.stanford.edu}}$", "year": 2018, "venue": "Conference on Robot Learning", "authors": [ "Ajay Mandlekar", "Yuke Zhu", "Animesh Garg", "Jonathan Booher", "Max Spero", "Albert Tung", "J. Gao", "John Emmons", "Anchit Gupta", "Emre Orbay", "S. Savarese", "Li Fei-Fei" ], "externalIds": { "ArXiv": "1811.02790", "MAG": "2898634286", "DBLP": "conf/corl/MandlekarZGBSTG18", "CorpusId": 53057199 }, "url": "https://www.semanticscholar.org/paper/cda470bede832f2965e594f9bdee79d6973a91e9", "referenceCount": 48, "citationCount": 228, "influentialCitationCount": 7, "isOpenAccess": false, "fieldsOfStudy": [ "Engineering", "Computer Science" ] }, { "title": "Manipulation", "abstract": null, "year": 2018, "venue": "Encyclopedia of Evolutionary Psychological Science", "authors": [ "A. Cousins", "Madeleine Fugère" ], "externalIds": { "DOI": "10.1016/b978-0-12-086425-6.50022-x", "CorpusId": 71231642 }, "url": "https://www.semanticscholar.org/paper/09fb12673e64310abefa1dae2b6a438f9a2af6d0", "referenceCount": 138, "citationCount": 126, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "QT-Opt: Scalable Deep Reinforcement Learning for Vision-Based Robotic Manipulation", "abstract": "In this paper, we study the problem of learning vision-based dynamic manipulation skills using a scalable reinforcement learning approach. We study this problem in the context of grasping, a longstanding challenge in robotic manipulation. In contrast to static learning behaviors that choose a grasp point and then execute the desired grasp, our method enables closed-loop vision-based control, whereby the robot continuously updates its grasp strategy based on the most recent observations to optimize long-horizon grasp success. To that end, we introduce QT-Opt, a scalable self-supervised vision-based reinforcement learning framework that can leverage over 580k real-world grasp attempts to train a deep neural network Q-function with over 1.2M parameters to perform closed-loop, real-world grasping that generalizes to 96% grasp success on unseen objects. Aside from attaining a very high success rate, our method exhibits behaviors that are quite distinct from more standard grasping systems: using only RGB vision-based perception from an over-the-shoulder camera, our method automatically learns regrasping strategies, probes objects to find the most effective grasps, learns to reposition objects and perform other non-prehensile pre-grasp manipulations, and responds dynamically to disturbances and perturbations.", "year": 2018, "venue": "Conference on Robot Learning", "authors": [ "Dmitry Kalashnikov", "A. Irpan", "P. Pastor", "Julian Ibarz", "Alexander Herzog", "Eric Jang", "Deirdre Quillen", "E. Holly", "Mrinal Kalakrishnan", "Vincent Vanhoucke", "S. Levine" ], "externalIds": { "ArXiv": "1806.10293", "DBLP": "journals/corr/abs-1806-10293", "MAG": "2951747857", "CorpusId": 49470584 }, "url": "https://www.semanticscholar.org/paper/eb37e7b76d26b75463df22b2a3aa32b6a765c672", "referenceCount": 49, "citationCount": 1319, "influentialCitationCount": 79, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "One-Shot Visual Imitation Learning via Meta-Learning", "abstract": "In order for a robot to be a generalist that can perform a wide range of jobs, it must be able to acquire a wide variety of skills quickly and efficiently in complex unstructured environments. High-capacity models such as deep neural networks can enable a robot to represent complex skills, but learning each skill from scratch then becomes infeasible. In this work, we present a meta-imitation learning method that enables a robot to learn how to learn more efficiently, allowing it to acquire new skills from just a single demonstration. Unlike prior methods for one-shot imitation, our method can scale to raw pixel inputs and requires data from significantly fewer prior tasks for effective learning of new skills. Our experiments on both simulated and real robot platforms demonstrate the ability to learn new tasks, end-to-end, from a single visual demonstration.", "year": 2017, "venue": "Conference on Robot Learning", "authors": [ "Chelsea Finn", "Tianhe Yu", "Tianhao Zhang", "P. Abbeel", "S. Levine" ], "externalIds": { "MAG": "2755546070", "ArXiv": "1709.04905", "DBLP": "conf/corl/FinnYZAL17", "CorpusId": 22221787 }, "url": "https://www.semanticscholar.org/paper/482c0cbfffa77154e3c879c497f50b605297d5bc", "referenceCount": 34, "citationCount": 512, "influentialCitationCount": 47, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "End-to-end learning for lane keeping of self-driving cars", "abstract": "Lane keeping is an important feature for self-driving cars. This paper presents an end-to-end learning approach to obtain the proper steering angle to maintain the car in the lane. The convolutional neural network (CNN) model takes raw image frames as input and outputs the steering angles accordingly. The model is trained and evaluated using the comma.ai dataset, which contains the front view image frames and the steering angle data captured when driving on the road. Unlike the traditional approach that manually decomposes the autonomous driving problem into technical components such as lane detection, path planning and steering control, the end-to-end model can directly steer the vehicle from the front view camera data after training. It learns how to keep in lane from human driving data. Further discussion of this end-to-end approach and its limitation are also provided.", "year": 2017, "venue": "2017 IEEE Intelligent Vehicles Symposium (IV)", "authors": [ "Zhilu Chen", "Xinming Huang" ], "externalIds": { "DBLP": "conf/ivs/Chen017", "MAG": "2740067745", "DOI": "10.1109/IVS.2017.7995975", "CorpusId": 21893787 }, "url": "https://www.semanticscholar.org/paper/0147370ca0bf1617856a42004e66ac6e97236f6e", "referenceCount": 14, "citationCount": 215, "influentialCitationCount": 9, "isOpenAccess": false, "fieldsOfStudy": [ "Engineering", "Computer Science" ] }, { "title": "Learning monocular reactive UAV control in cluttered natural environments", "abstract": "Autonomous navigation for large Unmanned Aerial Vehicles (UAVs) is fairly straight-forward, as expensive sensors and monitoring devices can be employed. In contrast, obstacle avoidance remains a challenging task for Micro Aerial Vehicles (MAVs) which operate at low altitude in cluttered environments. Unlike large vehicles, MAVs can only carry very light sensors, such as cameras, making autonomous navigation through obstacles much more challenging. In this paper, we describe a system that navigates a small quadrotor helicopter autonomously at low altitude through natural forest environments. Using only a single cheap camera to perceive the environment, we are able to maintain a constant velocity of up to 1.5m/s. Given a small set of human pilot demonstrations, we use recent state-of-the-art imitation learning techniques to train a controller that can avoid trees by adapting the MAVs heading. We demonstrate the performance of our system in a more controlled environment indoors, and in real natural forest environments outdoors.", "year": 2012, "venue": "IEEE International Conference on Robotics and Automation", "authors": [ "Stéphane Ross", "Narek Melik-Barkhudarov", "Kumar Shaurya Shankar", "Andreas Wendel", "Debadeepta Dey", "J. Bagnell", "M. Hebert" ], "externalIds": { "MAG": "1980969546", "DBLP": "conf/icra/RossMSWDBH13", "ArXiv": "1211.1690", "DOI": "10.1109/ICRA.2013.6630809", "CorpusId": 479635 }, "url": "https://www.semanticscholar.org/paper/0f0d11429e5aaecbc9fce8445afaa3bad7a74888", "referenceCount": 33, "citationCount": 422, "influentialCitationCount": 24, "isOpenAccess": true, "fieldsOfStudy": [ "Engineering", "Computer Science" ] }, { "title": "ImageNet: A large-scale hierarchical image database", "abstract": "The explosion of image data on the Internet has the potential to foster more sophisticated and robust models and algorithms to index, retrieve, organize and interact with images and multimedia data. But exactly how such data can be harnessed and organized remains a critical problem. We introduce here a new database called “ImageNet”, a large-scale ontology of images built upon the backbone of the WordNet structure. ImageNet aims to populate the majority of the 80,000 synsets of WordNet with an average of 500-1000 clean and full resolution images. This will result in tens of millions of annotated images organized by the semantic hierarchy of WordNet. This paper offers a detailed analysis of ImageNet in its current state: 12 subtrees with 5247 synsets and 3.2 million images in total. We show that ImageNet is much larger in scale and diversity and much more accurate than the current image datasets. Constructing such a large-scale database is a challenging task. We describe the data collection scheme with Amazon Mechanical Turk. Lastly, we illustrate the usefulness of ImageNet through three simple applications in object recognition, image classification and automatic object clustering. We hope that the scale, accuracy, diversity and hierarchical structure of ImageNet can offer unparalleled opportunities to researchers in the computer vision community and beyond.", "year": 2009, "venue": "2009 IEEE Conference on Computer Vision and Pattern Recognition", "authors": [ "Jia Deng", "Wei Dong", "R. Socher", "Li-Jia Li", "K. Li", "Li Fei-Fei" ], "externalIds": { "DBLP": "conf/cvpr/DengDSLL009", "MAG": "2108598243", "DOI": "10.1109/CVPR.2009.5206848", "CorpusId": 57246310 }, "url": "https://www.semanticscholar.org/paper/d2c733e34d48784a37d717fe43d9e93277a8c53e", "referenceCount": 27, "citationCount": 56678, "influentialCitationCount": 8947, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Track2Act: Predicting Point Tracks from Internet Videos enables Diverse Zero-shot Robot Manipulation", "abstract": ": We seek to learn a generalizable goal-conditioned policy that enables zero-shot robot manipulation — interacting with unseen objects in novel scenes without test-time adaptation. While typical approaches rely on a large amount of demonstration data for such generalization, we propose an approach that leverages web videos to predict plausible interaction plans and learns a task-agnostic transformation to obtain robot actions in the real world. Our framework, Track2Act predicts tracks of how points in an image should move in future time-steps based on a goal, and can be trained with diverse videos on the web including those of humans and robots manipulating everyday objects. We use these 2D track predictions to infer a sequence of rigid transforms of the object to be manipulated, and obtain robot end-effector poses that can be executed in an open-loop manner. We then refine this open-loop plan by predicting residual actions through a closed loop policy trained with a few embodiment-specific demonstrations. We show that this approach of combining scalably learned track prediction with a residual policy requiring minimal in-domain robot-specific data enables zero-shot robot manipulation, and present a wide array of real-world robot manipulation results across un-seen tasks, objects, and scenes. https://homangab.github.io/track2act/ 1", "year": 2024, "venue": "arXiv.org", "authors": [ "Homanga Bharadhwaj", "Roozbeh Mottaghi", "Abhinav Gupta", "Shubham Tulsiani" ], "externalIds": { "DBLP": "journals/corr/abs-2405-01527", "DOI": "10.48550/arXiv.2405.01527", "CorpusId": 269502317 }, "url": "https://www.semanticscholar.org/paper/7eda2b5dee846b90bfd96a8f839564ac29633d7b", "referenceCount": 73, "citationCount": 16, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "“Gemini: a family of highly capable multimodal models,”", "abstract": null, "year": null, "venue": "arXiv preprint", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "TabEBM: A Tabular Data Augmentation Method with Distinct Class-Specific Energy-Based Models": { "paper_title": "TabEBM: A Tabular Data Augmentation Method with Distinct Class-Specific Energy-Based Models", "arxiv_id": "2409.16118v1", "keyword": "generative model", "authors": [ "Andrei Margeloiu", "Xiangjian Jiang", "Nikola Simidjievski", "Mateja Jamnik" ], "references": [ { "title": "Causality for Tabular Data Synthesis: A High-Order Structure Causal Benchmark Framework", "abstract": "Tabular synthesis models remain ineffective at capturing complex dependencies, and the quality of synthetic data is still insufficient for comprehensive downstream tasks, such as prediction under distribution shifts, automated decision-making, and cross-table understanding. A major challenge is the lack of prior knowledge about underlying structures and high-order relationships in tabular data. We argue that a systematic evaluation on high-order structural information for tabular data synthesis is the first step towards solving the problem. In this paper, we introduce high-order structural causal information as natural prior knowledge and provide a benchmark framework for the evaluation of tabular synthesis models. The framework allows us to generate benchmark datasets with a flexible range of data generation processes and to train tabular synthesis models using these datasets for further evaluation. We propose multiple benchmark tasks, high-order metrics, and causal inference tasks as downstream tasks for evaluating the quality of synthetic data generated by the trained models. Our experiments demonstrate to leverage the benchmark framework for evaluating the model capability of capturing high-order structural causal information. Furthermore, our benchmarking results provide an initial assessment of state-of-the-art tabular synthesis models. They have clearly revealed significant gaps between ideal and actual performance and how baseline methods differ. Our benchmark framework is available at URL https://github.com/TURuibo/CauTabBench.", "year": 2024, "venue": "arXiv.org", "authors": [ "Ruibo Tu", "Zineb Senane", "Lele Cao", "Cheng Zhang", "H. Kjellström", "G. Henter" ], "externalIds": { "ArXiv": "2406.08311", "DBLP": "journals/corr/abs-2406-08311", "DOI": "10.48550/arXiv.2406.08311", "CorpusId": 270391998 }, "url": "https://www.semanticscholar.org/paper/612b10419f017d20c8b229a25d5e6939dd80c084", "referenceCount": 69, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "TabPFGen - Tabular Data Generation with TabPFN", "abstract": "Advances in deep generative modelling have not translated well to tabular data. We argue that this is caused by a mismatch in structure between popular generative models and discriminative models of tabular data. We thus devise a technique to turn TabPFN -- a highly performant transformer initially designed for in-context discriminative tabular tasks -- into an energy-based generative model, which we dub TabPFGen. This novel framework leverages the pre-trained TabPFN as part of the energy function and does not require any additional training or hyperparameter tuning, thus inheriting TabPFN's in-context learning capability. We can sample from TabPFGen analogously to other energy-based models. We demonstrate strong results on standard generative modelling tasks, including data augmentation, class-balancing, and imputation, unlocking a new frontier of tabular data generation.", "year": 2024, "venue": "arXiv.org", "authors": [ "Junwei Ma", "Apoorv Dankar", "George Stein", "Guangwei Yu", "Anthony L. Caterini" ], "externalIds": { "ArXiv": "2406.05216", "DBLP": "journals/corr/abs-2406-05216", "DOI": "10.48550/arXiv.2406.05216", "CorpusId": 270371434 }, "url": "https://www.semanticscholar.org/paper/f754247b58189aacc1060e9b17ba1aa54bd754cd", "referenceCount": 0, "citationCount": 6, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Addressing Data Scarcity in the Medical Domain: A GPT-Based Approach for Synthetic Data Generation and Feature Extraction", "abstract": "This research confronts the persistent challenge of data scarcity in medical machine learning by introducing a pioneering methodology that harnesses the capabilities of Generative Pre-trained Transformers (GPT). In response to the limitations posed by a dearth of labeled medical data, our approach involves the synthetic generation of comprehensive patient discharge messages, setting a new standard in the field with GPT autonomously generating 20 fields. Through a meticulous review of the existing literature, we systematically explore GPT’s aptitude for synthetic data generation and feature extraction, providing a robust foundation for subsequent phases of the research. The empirical demonstration showcases the transformative potential of our proposed solution, presenting over 70 patient discharge messages with synthetically generated fields, including severity and chances of hospital re-admission with justification. Moreover, the data had been deployed in a mobile solution where regression algorithms autonomously identified the correlated factors for ascertaining the severity of patients’ conditions. This study not only establishes a novel and comprehensive methodology but also contributes significantly to medical machine learning, presenting the most extensive patient discharge summaries reported in the literature. The results underscore the efficacy of GPT in overcoming data scarcity challenges and pave the way for future research to refine and expand the application of GPT in diverse medical contexts.", "year": 2024, "venue": "Inf.", "authors": [ "F. Sufi" ], "externalIds": { "DBLP": "journals/information/Sufi24b", "DOI": "10.3390/info15050264", "CorpusId": 269622742 }, "url": "https://www.semanticscholar.org/paper/4cb7d6cfaa15045c414b213dbefdb58a31c9dc70", "referenceCount": 71, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Why Tabular Foundation Models Should Be a Research Priority", "abstract": "Recent text and image foundation models are incredibly impressive, and these models are attracting an ever-increasing portion of research resources. In this position piece we aim to shift the ML research community's priorities ever so slightly to a different modality: tabular data. Tabular data is the dominant modality in many fields, yet it is given hardly any research attention and significantly lags behind in terms of scale and power. We believe the time is now to start developing tabular foundation models, or what we coin a Large Tabular Model (LTM). LTMs could revolutionise the way science and ML use tabular data: not as single datasets that are analyzed in a vacuum, but contextualized with respect to related datasets. The potential impact is far-reaching: from few-shot tabular models to automating data science; from out-of-distribution synthetic data to empowering multidisciplinary scientific discovery. We intend to excite reflections on the modalities we study, and convince some researchers to study large tabular models.", "year": 2024, "venue": "International Conference on Machine Learning", "authors": [ "B. V. Breugel", "M. Schaar" ], "externalIds": { "DBLP": "journals/corr/abs-2405-01147", "ArXiv": "2405.01147", "DOI": "10.48550/arXiv.2405.01147", "CorpusId": 269502664 }, "url": "https://www.semanticscholar.org/paper/f89ba7a675769d6c4c2ee2b9f967fad34e6f971a", "referenceCount": 102, "citationCount": 12, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Large Language Models(LLMs) on Tabular Data: Prediction, Generation, and Understanding - A Survey", "abstract": "Recent breakthroughs in large language modeling have facilitated rigorous exploration of their application in diverse tasks related to tabular data modeling, such as prediction, tabular data synthesis, question answering, and table understanding. Each task presents unique challenges and opportunities. However, there is currently a lack of comprehensive review that summarizes and compares the key techniques, metrics, datasets, models, and optimization approaches in this research domain. This survey aims to address this gap by consolidating recent progress in these areas, offering a thorough survey and taxonomy of the datasets, metrics, and methodologies utilized. It identifies strengths, limitations, unexplored territories, and gaps in the existing literature, while providing some insights for future research directions in this vital and rapidly evolving field. It also provides relevant code and datasets references. Through this comprehensive review, we hope to provide interested readers with pertinent references and insightful perspectives, empowering them with the necessary tools and knowledge to effectively navigate and address the prevailing challenges in the field.", "year": 2024, "venue": "Trans. Mach. Learn. Res.", "authors": [ "Xi Fang", "Weijie Xu", "Fiona Anting Tan", "Jiani Zhang", "Ziqing Hu", "Yanjun Qi", "Scott Nickleach", "Diego Socolinsky", "Srinivasan H. Sengamedu", "Christos Faloutsos" ], "externalIds": { "ArXiv": "2402.17944", "DBLP": "journals/corr/abs-2402-17944", "DOI": "10.48550/arXiv.2402.17944", "CorpusId": 268041519 }, "url": "https://www.semanticscholar.org/paper/2046b2da23eb2f79744eb391d902da9cedf87947", "referenceCount": 221, "citationCount": 23, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Evading Data Contamination Detection for Language Models is (too) Easy", "abstract": "Large language models are widespread, with their performance on benchmarks frequently guiding user preferences for one model over another. However, the vast amount of data these models are trained on can inadvertently lead to contamination with public benchmarks, thus compromising performance measurements. While recently developed contamination detection methods try to address this issue, they overlook the possibility of deliberate contamination by malicious model providers aiming to evade detection. We argue that this setting is of crucial importance as it casts doubt on the reliability of public benchmarks. To more rigorously study this issue, we propose a categorization of both model providers and contamination detection methods. This reveals vulnerabilities in existing methods that we exploit with EAL, a simple yet effective contamination technique that significantly inflates benchmark performance while completely evading current detection methods.", "year": 2024, "venue": "arXiv.org", "authors": [ "Jasper Dekoninck", "Mark Niklas Muller", "Maximilian Baader", "Marc Fischer", "Martin T. Vechev" ], "externalIds": { "DBLP": "journals/corr/abs-2402-02823", "ArXiv": "2402.02823", "DOI": "10.48550/arXiv.2402.02823", "CorpusId": 267412617 }, "url": "https://www.semanticscholar.org/paper/4d249bbfc172d5d4360244447f9e2245e318803d", "referenceCount": 53, "citationCount": 9, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Investigating Data Contamination for Pre-training Language Models", "abstract": "Language models pre-trained on web-scale corpora demonstrate impressive capabilities on diverse downstream tasks. However, there is increasing concern whether such capabilities might arise from evaluation datasets being included in the pre-training corpus -- a phenomenon known as \\textit{data contamination} -- in a manner that artificially increases performance. There has been little understanding of how this potential contamination might influence LMs' performance on downstream tasks. In this paper, we explore the impact of data contamination at the pre-training stage by pre-training a series of GPT-2 models \\textit{from scratch}. We highlight the effect of both text contamination (\\textit{i.e.}\\ input text of the evaluation samples) and ground-truth contamination (\\textit{i.e.}\\ the prompts asked on the input and the desired outputs) from evaluation data. We also investigate the effects of repeating contamination for various downstream tasks. Additionally, we examine the prevailing n-gram-based definitions of contamination within current LLM reports, pinpointing their limitations and inadequacy. Our findings offer new insights into data contamination's effects on language model capabilities and underscore the need for independent, comprehensive contamination assessments in LLM studies.", "year": 2024, "venue": "arXiv.org", "authors": [ "Minhao Jiang", "Ken Ziyu Liu", "Ming Zhong", "Rylan Schaeffer", "Siru Ouyang", "Jiawei Han", "Sanmi Koyejo" ], "externalIds": { "DBLP": "journals/corr/abs-2401-06059", "ArXiv": "2401.06059", "DOI": "10.48550/arXiv.2401.06059", "CorpusId": 266933004 }, "url": "https://www.semanticscholar.org/paper/b2fda33b7c122c044a7faa185d250d59ce9e4453", "referenceCount": 27, "citationCount": 33, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Curated LLM: Synergy of LLMs and Data Curation for tabular augmentation in ultra low-data regimes", "abstract": "Machine Learning (ML) in low-data settings remains an underappreciated yet crucial problem. Hence, data augmentation methods to increase the sample size of datasets needed for ML are key to unlocking the transformative potential of ML in data-deprived regions and domains. Unfortunately, the limited training set constrains traditional tabular synthetic data generators in their ability to generate a large and diverse augmented dataset needed for ML tasks. To address this challenge, we introduce CLLM, which leverages the prior knowledge of Large Language Models (LLMs) for data augmentation in the low-data regime. However, not all the data generated by LLMs will improve downstream utility, as for any generative model. Consequently, we introduce a principled curation mechanism, leveraging learning dynamics, coupled with confidence and uncertainty metrics, to obtain a high-quality dataset. Empirically, on multiple real-world datasets, we demonstrate the superior performance of CLLM in the low-data regime compared to conventional generators. Additionally, we provide insights into the LLM generation and curation mechanism, shedding light on the features that enable them to output high-quality augmented datasets.", "year": 2023, "venue": "International Conference on Machine Learning", "authors": [ "Nabeel Seedat", "Nicolas Huynh", "B. V. Breugel", "M. Schaar" ], "externalIds": { "DBLP": "journals/corr/abs-2312-12112", "ArXiv": "2312.12112", "DOI": "10.48550/arXiv.2312.12112", "CorpusId": 266362265 }, "url": "https://www.semanticscholar.org/paper/0a0c3d8650d2c0ffeecd491356393b93bba63fed", "referenceCount": 98, "citationCount": 8, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Understanding the Detrimental Class-level Effects of Data Augmentation", "abstract": "Data augmentation (DA) encodes invariance and provides implicit regularization critical to a model's performance in image classification tasks. However, while DA improves average accuracy, recent studies have shown that its impact can be highly class dependent: achieving optimal average accuracy comes at the cost of significantly hurting individual class accuracy by as much as 20% on ImageNet. There has been little progress in resolving class-level accuracy drops due to a limited understanding of these effects. In this work, we present a framework for understanding how DA interacts with class-level learning dynamics. Using higher-quality multi-label annotations on ImageNet, we systematically categorize the affected classes and find that the majority are inherently ambiguous, co-occur, or involve fine-grained distinctions, while DA controls the model's bias towards one of the closely related classes. While many of the previously reported performance drops are explained by multi-label annotations, our analysis of class confusions reveals other sources of accuracy degradation. We show that simple class-conditional augmentation strategies informed by our framework improve performance on the negatively affected classes.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "P. Kirichenko", "Mark Ibrahim", "Randall Balestriero", "Diane Bouchacourt", "Ramakrishna Vedantam", "Hamed Firooz", "Andrew Gordon Wilson" ], "externalIds": { "DBLP": "conf/nips/KirichenkoIBBVF23", "ArXiv": "2401.01764", "DOI": "10.48550/arXiv.2401.01764", "CorpusId": 266741725 }, "url": "https://www.semanticscholar.org/paper/3b99d63b05aee56ec632364d83e2c8630cb3f96a", "referenceCount": 74, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Investigating Data Contamination in Modern Benchmarks for Large Language Models", "abstract": "Recent observations have underscored a disparity between the inflated benchmark scores and the actual performance of LLMs, raising concerns about potential contamination of evaluation benchmarks. This issue is especially critical for closed-source models and certain open-source models where training data transparency is lacking. In this paper we study data contamination by proposing two methods tailored for both open-source and proprietary LLMs. We first introduce a retrieval-based system to explore potential overlaps between evaluation benchmarks and pretraining corpora. We further present a novel investigation protocol named Testset Slot Guessing (TS-Guessing), applicable to both open and proprietary models. This approach entails masking a wrong answer in a multiple-choice question and prompting the model to fill in the gap. Additionally, it involves obscuring an unlikely word in an evaluation example and asking the model to produce it. We find that certain commercial LLMs could surprisingly guess the missing option in various test sets. Specifically, in the MMLU benchmark, ChatGPT and GPT-4 demonstrated an exact match rate of 52% and 57%, respectively, in guessing the missing options in benchmark test data. We hope these results underscore the need for more robust evaluation methodologies and benchmarks in the field.", "year": 2023, "venue": "North American Chapter of the Association for Computational Linguistics", "authors": [ "Chunyuan Deng", "Yilun Zhao", "Xiangru Tang", "Mark B. Gerstein", "Arman Cohan" ], "externalIds": { "DBLP": "journals/corr/abs-2311-09783", "ACL": "2024.naacl-long.482", "ArXiv": "2311.09783", "DOI": "10.48550/arXiv.2311.09783", "CorpusId": 265220695 }, "url": "https://www.semanticscholar.org/paper/af565483dfbe3b0fa4fe9f715170666a06bce5ac", "referenceCount": 46, "citationCount": 14, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ForecastPFN: Synthetically-Trained Zero-Shot Forecasting", "abstract": "The vast majority of time-series forecasting approaches require a substantial training dataset. However, many real-life forecasting applications have very little initial observations, sometimes just 40 or fewer. Thus, the applicability of most forecasting methods is restricted in data-sparse commercial applications. While there is recent work in the setting of very limited initial data (so-called `zero-shot' forecasting), its performance is inconsistent depending on the data used for pretraining. In this work, we take a different approach and devise ForecastPFN, the first zero-shot forecasting model trained purely on a novel synthetic data distribution. ForecastPFN is a prior-data fitted network, trained to approximate Bayesian inference, which can make predictions on a new time series dataset in a single forward pass. Through extensive experiments, we show that zero-shot predictions made by ForecastPFN are more accurate and faster compared to state-of-the-art forecasting methods, even when the other methods are allowed to train on hundreds of additional in-distribution data points.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Samuel Dooley", "Gurnoor Singh Khurana", "Chirag Mohapatra", "Siddartha Naidu", "Colin White" ], "externalIds": { "DBLP": "journals/corr/abs-2311-01933", "ArXiv": "2311.01933", "DOI": "10.48550/arXiv.2311.01933", "CorpusId": 265019400 }, "url": "https://www.semanticscholar.org/paper/2d2bfb068e3441aaa9743043603d00f860dd0308", "referenceCount": 63, "citationCount": 26, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Reimagining Synthetic Tabular Data Generation through Data-Centric AI: A Comprehensive Benchmark", "abstract": "Synthetic data serves as an alternative in training machine learning models, particularly when real-world data is limited or inaccessible. However, ensuring that synthetic data mirrors the complex nuances of real-world data is a challenging task. This paper addresses this issue by exploring the potential of integrating data-centric AI techniques which profile the data to guide the synthetic data generation process. Moreover, we shed light on the often ignored consequences of neglecting these data profiles during synthetic data generation -- despite seemingly high statistical fidelity. Subsequently, we propose a novel framework to evaluate the integration of data profiles to guide the creation of more representative synthetic data. In an empirical study, we evaluate the performance of five state-of-the-art models for tabular data generation on eleven distinct tabular datasets. The findings offer critical insights into the successes and limitations of current synthetic data generation techniques. Finally, we provide practical recommendations for integrating data-centric insights into the synthetic data generation process, with a specific focus on classification performance, model selection, and feature selection. This study aims to reevaluate conventional approaches to synthetic data generation and promote the application of data-centric AI techniques in improving the quality and effectiveness of synthetic data.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Lasse Hansen", "Nabeel Seedat", "M. Schaar", "Andrija Petrovic" ], "externalIds": { "DBLP": "journals/corr/abs-2310-16981", "ArXiv": "2310.16981", "DOI": "10.48550/arXiv.2310.16981", "CorpusId": 264490707 }, "url": "https://www.semanticscholar.org/paper/b4874b9fae1ed199c2fc19d8312d25bfe2044d4b", "referenceCount": 44, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Private Synthetic Data Meets Ensemble Learning", "abstract": "When machine learning models are trained on synthetic data and then deployed on real data, there is often a performance drop due to the distribution shift between synthetic and real data. In this paper, we introduce a new ensemble strategy for training downstream models, with the goal of enhancing their performance when used on real data. We generate multiple synthetic datasets by applying a differential privacy (DP) mechanism several times in parallel and then ensemble the downstream models trained on these datasets. While each synthetic dataset might deviate more from the real data distribution, they collectively increase sample diversity. This may enhance the robustness of downstream models against distribution shifts. Our extensive experiments reveal that while ensembling does not enhance downstream performance (compared with training a single model) for models trained on synthetic data generated by marginal-based or workload-based DP mechanisms, our proposed ensemble strategy does improve the performance for models trained using GAN-based DP mechanisms in terms of both accuracy and calibration of downstream models.", "year": 2023, "venue": "arXiv.org", "authors": [ "Haoyuan Sun", "Navid Azizan", "Akash Srivastava", "Hao Wang" ], "externalIds": { "ArXiv": "2310.09729", "DBLP": "journals/corr/abs-2310-09729", "DOI": "10.48550/arXiv.2310.09729", "CorpusId": 264146830 }, "url": "https://www.semanticscholar.org/paper/01c56d14fe41f810589849a742ac6048efcab76d", "referenceCount": 51, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Mixed-Type Tabular Data Synthesis with Score-based Diffusion in Latent Space", "abstract": "Recent advances in tabular data generation have greatly enhanced synthetic data quality. However, extending diffusion models to tabular data is challenging due to the intricately varied distributions and a blend of data types of tabular data. This paper introduces Tabsyn, a methodology that synthesizes tabular data by leveraging a diffusion model within a variational autoencoder (VAE) crafted latent space. The key advantages of the proposed Tabsyn include (1) Generality: the ability to handle a broad spectrum of data types by converting them into a single unified space and explicitly capture inter-column relations; (2) Quality: optimizing the distribution of latent embeddings to enhance the subsequent training of diffusion models, which helps generate high-quality synthetic data, (3) Speed: much fewer number of reverse steps and faster synthesis speed than existing diffusion-based methods. Extensive experiments on six datasets with five metrics demonstrate that Tabsyn outperforms existing methods. Specifically, it reduces the error rates by 86% and 67% for column-wise distribution and pair-wise column correlation estimations compared with the most competitive baselines.", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Hengrui Zhang", "Jiani Zhang", "Balasubramaniam Srinivasan", "Zhengyuan Shen", "Xiao Qin", "Christos Faloutsos", "H. Rangwala", "G. Karypis" ], "externalIds": { "DBLP": "conf/iclr/Zhang0SSQFRK24", "ArXiv": "2310.09656", "DOI": "10.48550/arXiv.2310.09656", "CorpusId": 264146605 }, "url": "https://www.semanticscholar.org/paper/e274f9e367fb3b7fd1a777d6984df7b7f13c3411", "referenceCount": 38, "citationCount": 26, "influentialCitationCount": 7, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "GPFN: Prior-Data Fitted Networks for Genomic Prediction", "abstract": "Genomic Prediction (GP) methods predict the breeding value of unphenotyped individuals in order to select parental candidates in breeding populations. Among models for GP, classical linear models have remained consistently popular, while more complex nonlinear methods such as deep neural networks have shown comparable accuracy at best. In this work we propose the Genomic Prior-Data Fitted Network (GPFN), a new paradigm for GP. GPFNs perform amortized Bayesian inference by drawing hundreds of thousands or millions of synthetic breeding populations during the prior fitting phase. This allows GPFNs to be deployed without requiring any training or tuning, providing predictions in a single inference pass. On three populations of crop plants across two different crop species, GPFNs perform significantly better than the linear baseline on 13 out of 16 traits. On a challenging between-families NAM prediction task, the GPFN performs significantly better in 3 locations while only falling behind in one. GPFNs represent a completely new direction for the field of genomic prediction, and have the potential to unlock levels of selection accuracy not possible with existing methods.", "year": 2023, "venue": "bioRxiv", "authors": [ "Jordan R. Ubbens", "I. Stavness", "Andrew G. Sharpe" ], "externalIds": { "DOI": "10.1101/2023.09.20.558648", "CorpusId": 262840412 }, "url": "https://www.semanticscholar.org/paper/fb44181f174e8d0e4d2791e7bb8b214a47ef2bb9", "referenceCount": 25, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Biology" ] }, { "title": "On the Usefulness of Synthetic Tabular Data Generation", "abstract": "Despite recent advances in synthetic data generation, the scientific community still lacks a unified consensus on its usefulness. It is commonly believed that synthetic data can be used for both data exchange and boosting machine learning (ML) training. Privacy-preserving synthetic data generation can accelerate data exchange for downstream tasks, but there is not enough evidence to show how or why synthetic data can boost ML training. In this study, we benchmarked ML performance using synthetic tabular data for four use cases: data sharing, data augmentation, class balancing, and data summarization. We observed marginal improvements for the balancing use case on some datasets. However, we conclude that there is not enough evidence to claim that synthetic tabular data is useful for ML training.", "year": 2023, "venue": "arXiv.org", "authors": [ "Dionysis Manousakas", "Sergül Aydöre" ], "externalIds": { "ArXiv": "2306.15636", "DBLP": "journals/corr/abs-2306-15636", "DOI": "10.48550/arXiv.2306.15636", "CorpusId": 259262225 }, "url": "https://www.semanticscholar.org/paper/8237e8643eab115018fb0c9eef548ec66595eaee", "referenceCount": 25, "citationCount": 5, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ProtoGate: Prototype-based Neural Networks with Global-to-local Feature Selection for Tabular Biomedical Data", "abstract": "Tabular biomedical data poses challenges in machine learning because it is often high-dimensional and typically low-sample-size (HDLSS). Previous research has attempted to address these challenges via local feature selection, but existing approaches often fail to achieve optimal performance due to their limitation in identifying globally important features and their susceptibility to the co-adaptation problem. In this paper, we propose ProtoGate, a prototype-based neural model for feature selection on HDLSS data. ProtoGate first selects instance-wise features via adaptively balancing global and local feature selection. Furthermore, ProtoGate employs a non-parametric prototype-based prediction mechanism to tackle the co-adaptation problem, ensuring the feature selection results and predictions are consistent with underlying data clusters. We conduct comprehensive experiments to evaluate the performance and interpretability of ProtoGate on synthetic and real-world datasets. The results show that ProtoGate generally outperforms state-of-the-art methods in prediction accuracy by a clear margin while providing high-fidelity feature selection and explainable predictions. Code is available at https://github.com/SilenceX12138/ProtoGate.", "year": 2023, "venue": "International Conference on Machine Learning", "authors": [ "Xiangjian Jiang", "Andrei Margeloiu", "Nikola Simidjievski", "M. Jamnik" ], "externalIds": { "ArXiv": "2306.12330", "DBLP": "conf/icml/JiangMSJ24", "CorpusId": 270210612 }, "url": "https://www.semanticscholar.org/paper/82cfe0390daf75a13f4cd4ab4dff0a329ae17178", "referenceCount": 83, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Statistical Foundations of Prior-Data Fitted Networks", "abstract": "Prior-data fitted networks (PFNs) were recently proposed as a new paradigm for machine learning. Instead of training the network to an observed training set, a fixed model is pre-trained offline on small, simulated training sets from a variety of tasks. The pre-trained model is then used to infer class probabilities in-context on fresh training sets with arbitrary size and distribution. Empirically, PFNs achieve state-of-the-art performance on tasks with similar size to the ones used in pre-training. Surprisingly, their accuracy further improves when passed larger data sets during inference. This article establishes a theoretical foundation for PFNs and illuminates the statistical mechanisms governing their behavior. While PFNs are motivated by Bayesian ideas, a purely frequentistic interpretation of PFNs as pre-tuned, but untrained predictors explains their behavior. A predictor's variance vanishes if its sensitivity to individual training samples does and the bias vanishes only if it is appropriately localized around the test feature. The transformer architecture used in current PFN implementations ensures only the former. These findings shall prove useful for designing architectures with favorable empirical behavior.", "year": 2023, "venue": "International Conference on Machine Learning", "authors": [ "T. Nagler" ], "externalIds": { "DBLP": "journals/corr/abs-2305-11097", "ArXiv": "2305.11097", "DOI": "10.48550/arXiv.2305.11097", "CorpusId": 258762307 }, "url": "https://www.semanticscholar.org/paper/00fa84ae6e881a0090c7c1a9241e06d4bbead3fe", "referenceCount": 26, "citationCount": 8, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Synthetic data, real errors: how (not) to publish and use synthetic data", "abstract": "Generating synthetic data through generative models is gaining interest in the ML community and beyond, promising a future where datasets can be tailored to individual needs. Unfortunately, synthetic data is usually not perfect, resulting in potential errors in downstream tasks. In this work we explore how the generative process affects the downstream ML task. We show that the naive synthetic data approach -- using synthetic data as if it is real -- leads to downstream models and analyses that do not generalize well to real data. As a first step towards better ML in the synthetic data regime, we introduce Deep Generative Ensemble (DGE) -- a framework inspired by Deep Ensembles that aims to implicitly approximate the posterior distribution over the generative process model parameters. DGE improves downstream model training, evaluation, and uncertainty quantification, vastly outperforming the naive approach on average. The largest improvements are achieved for minority classes and low-density regions of the original data, for which the generative uncertainty is largest.", "year": 2023, "venue": "International Conference on Machine Learning", "authors": [ "B. V. Breugel", "Zhaozhi Qian", "M. Schaar" ], "externalIds": { "ArXiv": "2305.09235", "DBLP": "journals/corr/abs-2305-09235", "DOI": "10.48550/arXiv.2305.09235", "CorpusId": 258714748 }, "url": "https://www.semanticscholar.org/paper/5deaacd4c1a3ae6691a7ae9f4442bc8e3c09b6b2", "referenceCount": 62, "citationCount": 19, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "When Do Neural Nets Outperform Boosted Trees on Tabular Data?", "abstract": "Tabular data is one of the most commonly used types of data in machine learning. Despite recent advances in neural nets (NNs) for tabular data, there is still an active discussion on whether or not NNs generally outperform gradient-boosted decision trees (GBDTs) on tabular data, with several recent works arguing either that GBDTs consistently outperform NNs on tabular data, or vice versa. In this work, we take a step back and question the importance of this debate. To this end, we conduct the largest tabular data analysis to date, comparing 19 algorithms across 176 datasets, and we find that the 'NN vs. GBDT' debate is overemphasized: for a surprisingly high number of datasets, either the performance difference between GBDTs and NNs is negligible, or light hyperparameter tuning on a GBDT is more important than choosing between NNs and GBDTs. A remarkable exception is the recently-proposed prior-data fitted network, TabPFN: although it is effectively limited to training sets of size 3000, we find that it outperforms all other algorithms on average, even when randomly sampling 3000 training datapoints. Next, we analyze dozens of metafeatures to determine what properties of a dataset make NNs or GBDTs better-suited to perform well. For example, we find that GBDTs are much better than NNs at handling skewed or heavy-tailed feature distributions and other forms of dataset irregularities. Our insights act as a guide for practitioners to determine which techniques may work best on their dataset. Finally, with the goal of accelerating tabular data research, we release the TabZilla Benchmark Suite: a collection of the 36 'hardest' of the datasets we study. Our benchmark suite, codebase, and all raw results are available at https://github.com/naszilla/tabzilla.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Duncan C. McElfresh", "Sujay Khandagale", "Jonathan Valverde", "C. VishakPrasad", "Ben Feuer", "Chinmay Hegde", "Ganesh Ramakrishnan", "Micah Goldblum", "Colin White" ], "externalIds": { "ArXiv": "2305.02997", "DBLP": "conf/nips/McElfreshKVCRGW23", "DOI": "10.48550/arXiv.2305.02997", "CorpusId": 258479721 }, "url": "https://www.semanticscholar.org/paper/5e4125b3a2ec91e866d970498f8a138c5a5cc89b", "referenceCount": 73, "citationCount": 72, "influentialCitationCount": 14, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Class-Balancing Diffusion Models", "abstract": "Diffusion-based models have shown the merits of generating high-quality visual data while preserving better diversity in recent studies. However, such observation is only justified with curated data distribution, where the data samples are nicely pre-processed to be uniformly distributed in terms of their labels. In practice, a long-tailed data distribution appears more common and how diffusion models perform on such classimbalanced data remains unknown. In this work, we first investigate this problem and observe significant degradation in both diversity and fidelity when the diffusion model is trained on datasets with classimbalanced distributions. Especially in tail classes, the generations largely lose diversity and we observe severe mode-collapse issues. To tackle this problem, we set from the hypothesis that the data distribution is not class-balanced, and propose Class-Balancing Diffusion Models (CBDM) that are trained with a distribution adjustment regularizer as a solution. Experiments show that images generated by CBDM exhibit higher diversity and quality in both quantitative and qualitative ways. Our method benchmarked the generation results on CIFAR100/CIFAR100LT dataset and shows out-standing performance on the downstream recognition task.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Yiming Qin", "Huangjie Zheng", "Jiangchao Yao", "Mingyuan Zhou", "Ya Zhang" ], "externalIds": { "DBLP": "journals/corr/abs-2305-00562", "ArXiv": "2305.00562", "DOI": "10.1109/CVPR52729.2023.01768", "CorpusId": 258427128 }, "url": "https://www.semanticscholar.org/paper/735e1cb91342e03eee3791afb13eb0e9669f9759", "referenceCount": 55, "citationCount": 22, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "CoDi: Co-evolving Contrastive Diffusion Models for Mixed-type Tabular Synthesis", "abstract": "With growing attention to tabular data these days, the attempt to apply a synthetic table to various tasks has been expanded toward various scenarios. Owing to the recent advances in generative modeling, fake data generated by tabular data synthesis models become sophisticated and realistic. However, there still exists a difficulty in modeling discrete variables (columns) of tabular data. In this work, we propose to process continuous and discrete variables separately (but being conditioned on each other) by two diffusion models. The two diffusion models are co-evolved during training by reading conditions from each other. In order to further bind the diffusion models, moreover, we introduce a contrastive learning method with a negative sampling method. In our experiments with 11 real-world tabular datasets and 8 baseline methods, we prove the efficacy of the proposed method, called CoDi.", "year": 2023, "venue": "International Conference on Machine Learning", "authors": [ "C. Lee", "Jayoung Kim", "Noseong Park" ], "externalIds": { "DBLP": "conf/icml/Lee0P23", "ArXiv": "2304.12654", "DOI": "10.48550/arXiv.2304.12654", "CorpusId": 258309242 }, "url": "https://www.semanticscholar.org/paper/630c35f495697ccf4194453b6a4bfe7fc69b75e3", "referenceCount": 57, "citationCount": 29, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Synthcity: facilitating innovative use cases of synthetic data in different data modalities", "abstract": "Synthcity is an open-source software package for innovative use cases of synthetic data in ML fairness, privacy and augmentation across diverse tabular data modalities, including static data, regular and irregular time series, data with censoring, multi-source data, composite data, and more. Synthcity provides the practitioners with a single access point to cutting edge research and tools in synthetic data. It also offers the community a playground for rapid experimentation and prototyping, a one-stop-shop for SOTA benchmarks, and an opportunity for extending research impact. The library can be accessed on GitHub (https://github.com/vanderschaarlab/synthcity) and pip (https://pypi.org/project/synthcity/). We warmly invite the community to join the development effort by providing feedback, reporting bugs, and contributing code.", "year": 2023, "venue": "arXiv.org", "authors": [ "Zhaozhi Qian", "B. Cebere", "M. Schaar" ], "externalIds": { "DBLP": "journals/corr/abs-2301-07573", "ArXiv": "2301.07573", "DOI": "10.48550/arXiv.2301.07573", "CorpusId": 255998576 }, "url": "https://www.semanticscholar.org/paper/f96e0a6b39259f1b07815fc88f0e8e7b411183aa", "referenceCount": 37, "citationCount": 37, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Weight Predictor Network with Feature Selection for Small Sample Tabular Biomedical Data", "abstract": "Tabular biomedical data is often high-dimensional but with a very small number of samples. Although recent work showed that well-regularised simple neural networks could outperform more sophisticated architectures on tabular data, they are still prone to overfitting on tiny datasets with many potentially irrelevant features. To combat these issues, we propose Weight Predictor Network with Feature Selection (WPFS) for learning neural networks from high-dimensional and small sample data by reducing the number of learnable parameters and simultaneously performing feature selection. In addition to the classification network, WPFS uses two small auxiliary networks that together output the weights of the first layer of the classification model. We evaluate on nine real-world biomedical datasets and demonstrate that WPFS outperforms other standard as well as more recent methods typically applied to tabular data. Furthermore, we investigate the proposed feature selection mechanism and show that it improves performance while providing useful insights into the learning task.", "year": 2022, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Andrei Margeloiu", "Nikola Simidjievski", "Pietro Lio'", "M. Jamnik" ], "externalIds": { "DBLP": "journals/corr/abs-2211-15616", "ArXiv": "2211.15616", "DOI": "10.48550/arXiv.2211.15616", "CorpusId": 254044330 }, "url": "https://www.semanticscholar.org/paper/830f264dd6d04542abdbcc54488ef491d9bd358a", "referenceCount": 48, "citationCount": 9, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Data augmentation: A comprehensive survey of modern approaches", "abstract": null, "year": 2022, "venue": "Array", "authors": [ "A. Mumuni", "F. Mumuni" ], "externalIds": { "DBLP": "journals/array/MumuniM22", "DOI": "10.1016/j.array.2022.100258", "CorpusId": 253570504 }, "url": "https://www.semanticscholar.org/paper/3128ca0aabab35cf66275d7b9ec12bb3a2462677", "referenceCount": 139, "citationCount": 182, "influentialCitationCount": 8, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Artificial intelligence for strengthening healthcare systems in low- and middle-income countries: a systematic scoping review", "abstract": null, "year": 2022, "venue": "npj Digital Medicine", "authors": [ "Tadeusz Ciecierski-Holmes", "Ritvij Singh", "Miriam Axt", "S. Brenner", "Sandra Barteit" ], "externalIds": { "DBLP": "journals/npjdm/Ciecierski-Holmes22", "PubMedCentral": "9614192", "DOI": "10.1038/s41746-022-00700-y", "CorpusId": 253162927, "PubMed": "36307479" }, "url": "https://www.semanticscholar.org/paper/ba9fbfdfdefbf1afabc42a095ef959fa0d410d73", "referenceCount": 104, "citationCount": 59, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Language Models are Realistic Tabular Data Generators", "abstract": "Tabular data is among the oldest and most ubiquitous forms of data. However, the generation of synthetic samples with the original data's characteristics remains a significant challenge for tabular data. While many generative models from the computer vision domain, such as variational autoencoders or generative adversarial networks, have been adapted for tabular data generation, less research has been directed towards recent transformer-based large language models (LLMs), which are also generative in nature. To this end, we propose GReaT (Generation of Realistic Tabular data), which exploits an auto-regressive generative LLM to sample synthetic and yet highly realistic tabular data. Furthermore, GReaT can model tabular data distributions by conditioning on any subset of features; the remaining features are sampled without additional overhead. We demonstrate the effectiveness of the proposed approach in a series of experiments that quantify the validity and quality of the produced data samples from multiple angles. We find that GReaT maintains state-of-the-art performance across numerous real-world and synthetic data sets with heterogeneous feature types coming in various sizes.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "V. Borisov", "Kathrin Seßler", "Tobias Leemann", "Martin Pawelczyk", "Gjergji Kasneci" ], "externalIds": { "ArXiv": "2210.06280", "DBLP": "conf/iclr/BorisovSLPK23", "DOI": "10.48550/arXiv.2210.06280", "CorpusId": 252846328 }, "url": "https://www.semanticscholar.org/paper/394bd431e522b86581086bcb5cd9be161cf1cdf4", "referenceCount": 58, "citationCount": 135, "influentialCitationCount": 19, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "STaSy: Score-based Tabular data Synthesis", "abstract": "Tabular data synthesis is a long-standing research topic in machine learning. Many different methods have been proposed over the past decades, ranging from statistical methods to deep generative methods. However, it has not always been successful due to the complicated nature of real-world tabular data. In this paper, we present a new model named Score-based Tabular data Synthesis (STaSy) and its training strategy based on the paradigm of score-based generative modeling. Despite the fact that score-based generative models have resolved many issues in generative models, there still exists room for improvement in tabular data synthesis. Our proposed training strategy includes a self-paced learning technique and a fine-tuning strategy, which further increases the sampling quality and diversity by stabilizing the denoising score matching training. Furthermore, we also conduct rigorous experimental studies in terms of the generative task trilemma: sampling quality, diversity, and time. In our experiments with 15 benchmark tabular datasets and 7 baselines, our method outperforms existing methods in terms of task-dependant evaluations and diversity. Code is available at https://github.com/JayoungKim408/STaSy.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Jayoung Kim", "C. Lee", "Noseong Park" ], "externalIds": { "DBLP": "conf/iclr/0002LP23", "ArXiv": "2210.04018", "DOI": "10.48550/arXiv.2210.04018", "CorpusId": 252780718 }, "url": "https://www.semanticscholar.org/paper/6349332c9226c14561f9eb82162a198142cb2965", "referenceCount": 64, "citationCount": 34, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "TabDDPM: Modelling Tabular Data with Diffusion Models", "abstract": "Denoising diffusion probabilistic models are currently becoming the leading paradigm of generative modeling for many important data modalities. Being the most prevalent in the computer vision community, diffusion models have also recently gained some attention in other domains, including speech, NLP, and graph-like data. In this work, we investigate if the framework of diffusion models can be advantageous for general tabular problems, where datapoints are typically represented by vectors of heterogeneous features. The inherent heterogeneity of tabular data makes it quite challenging for accurate modeling, since the individual features can be of completely different nature, i.e., some of them can be continuous and some of them can be discrete. To address such data types, we introduce TabDDPM -- a diffusion model that can be universally applied to any tabular dataset and handles any type of feature. We extensively evaluate TabDDPM on a wide set of benchmarks and demonstrate its superiority over existing GAN/VAE alternatives, which is consistent with the advantage of diffusion models in other fields. Additionally, we show that TabDDPM is eligible for privacy-oriented setups, where the original datapoints cannot be publicly shared.", "year": 2022, "venue": "International Conference on Machine Learning", "authors": [ "Akim Kotelnikov", "Dmitry Baranchuk", "Ivan Rubachev", "Artem Babenko" ], "externalIds": { "ArXiv": "2209.15421", "DBLP": "conf/icml/KotelnikovBRB23", "DOI": "10.48550/arXiv.2209.15421", "CorpusId": 252668788 }, "url": "https://www.semanticscholar.org/paper/25d3a4e048d0020ba9cffc6442ebd4e7bb548a55", "referenceCount": 57, "citationCount": 131, "influentialCitationCount": 35, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Towards overcoming data scarcity in materials science: unifying models and datasets with a mixture of experts framework", "abstract": null, "year": 2022, "venue": "npj Computational Materials", "authors": [ "Rees Chang", "Yu-Xiong Wang", "E. Ertekin" ], "externalIds": { "ArXiv": "2207.13880", "DOI": "10.1038/s41524-022-00929-x", "CorpusId": 251134953 }, "url": "https://www.semanticscholar.org/paper/d183f55b912fc84ed7df99fae837581ad305a8df", "referenceCount": 63, "citationCount": 18, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Physics" ] }, { "title": "TabPFN: A Transformer That Solves Small Tabular Classification Problems in a Second", "abstract": "We present TabPFN, a trained Transformer that can do supervised classification for small tabular datasets in less than a second, needs no hyperparameter tuning and is competitive with state-of-the-art classification methods. TabPFN performs in-context learning (ICL), it learns to make predictions using sequences of labeled examples (x, f(x)) given in the input, without requiring further parameter updates. TabPFN is fully entailed in the weights of our network, which accepts training and test samples as a set-valued input and yields predictions for the entire test set in a single forward pass. TabPFN is a Prior-Data Fitted Network (PFN) and is trained offline once, to approximate Bayesian inference on synthetic datasets drawn from our prior. This prior incorporates ideas from causal reasoning: It entails a large space of structural causal models with a preference for simple structures. On the 18 datasets in the OpenML-CC18 suite that contain up to 1 000 training data points, up to 100 purely numerical features without missing values, and up to 10 classes, we show that our method clearly outperforms boosted trees and performs on par with complex state-of-the-art AutoML systems with up to 230$\\times$ speedup. This increases to a 5 700$\\times$ speedup when using a GPU. We also validate these results on an additional 67 small numerical datasets from OpenML. We provide all our code, the trained TabPFN, an interactive browser demo and a Colab notebook at https://github.com/automl/TabPFN.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Noah Hollmann", "Samuel G. Müller", "Katharina Eggensperger", "F. Hutter" ], "externalIds": { "ArXiv": "2207.01848", "DBLP": "conf/iclr/Hollmann0EH23", "CorpusId": 252683429 }, "url": "https://www.semanticscholar.org/paper/4c4f0fcf1ce04f12290d8c876abfbe57817de430", "referenceCount": 72, "citationCount": 154, "influentialCitationCount": 38, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Transfer Learning with Deep Tabular Models", "abstract": "Recent work on deep learning for tabular data demonstrates the strong performance of deep tabular models, often bridging the gap between gradient boosted decision trees and neural networks. Accuracy aside, a major advantage of neural models is that they learn reusable features and are easily fine-tuned in new domains. This property is often exploited in computer vision and natural language applications, where transfer learning is indispensable when task-specific training data is scarce. In this work, we demonstrate that upstream data gives tabular neural networks a decisive advantage over widely used GBDT models. We propose a realistic medical diagnosis benchmark for tabular transfer learning, and we present a how-to guide for using upstream data to boost performance with a variety of tabular neural network architectures. Finally, we propose a pseudo-feature method for cases where the upstream and downstream feature sets differ, a tabular-specific problem widespread in real-world applications. Our code is available at https://github.com/LevinRoman/tabular-transfer-learning .", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Roman Levin", "Valeriia Cherepanova", "Avi Schwarzschild", "Arpit Bansal", "C. B. Bruss", "T. Goldstein", "A. Wilson", "Micah Goldblum" ], "externalIds": { "DBLP": "journals/corr/abs-2206-15306", "ArXiv": "2206.15306", "DOI": "10.48550/arXiv.2206.15306", "CorpusId": 250144560 }, "url": "https://www.semanticscholar.org/paper/96e22af70f9ca575ebfe648677aced03c6c8803d", "referenceCount": 103, "citationCount": 52, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Adversarial Random Forests for Density Estimation and Generative Modeling", "abstract": "We propose methods for density estimation and data synthesis using a novel form of unsupervised random forests. Inspired by generative adversarial networks, we implement a recursive procedure in which trees gradually learn structural properties of the data through alternating rounds of generation and discrimination. The method is provably consistent under minimal assumptions. Unlike classic tree-based alternatives, our approach provides smooth (un)conditional densities and allows for fully synthetic data generation. We achieve comparable or superior performance to state-of-the-art probabilistic circuits and deep learning models on various tabular data benchmarks while executing about two orders of magnitude faster on average. An accompanying $\\texttt{R}$ package, $\\texttt{arf}$, is available on $\\texttt{CRAN}$.", "year": 2022, "venue": "International Conference on Artificial Intelligence and Statistics", "authors": [ "David Watson", "Kristin Blesch", "Jan Kapar", "Marvin N. Wright" ], "externalIds": { "DBLP": "conf/aistats/WatsonBKW23", "ArXiv": "2205.09435", "CorpusId": 252846366 }, "url": "https://www.semanticscholar.org/paper/0134ba108f074d11a7a8512d1c7237240a6dbd11", "referenceCount": 139, "citationCount": 12, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "The Effects of Regularization and Data Augmentation are Class Dependent", "abstract": "Regularization is a fundamental technique to prevent over-fitting and to improve generalization performances by constraining a model's complexity. Current Deep Networks heavily rely on regularizers such as Data-Augmentation (DA) or weight-decay, and employ structural risk minimization, i.e. cross-validation, to select the optimal regularization hyper-parameters. In this study, we demonstrate that techniques such as DA or weight decay produce a model with a reduced complexity that is unfair across classes. The optimal amount of DA or weight decay found from cross-validation leads to disastrous model performances on some classes e.g. on Imagenet with a resnet50, the\"barn spider\"classification test accuracy falls from $68\\%$ to $46\\%$ only by introducing random crop DA during training. Even more surprising, such performance drop also appears when introducing uninformative regularization techniques such as weight decay. Those results demonstrate that our search for ever increasing generalization performance -- averaged over all classes and samples -- has left us with models and regularizers that silently sacrifice performances on some classes. This scenario can become dangerous when deploying a model on downstream tasks e.g. an Imagenet pre-trained resnet50 deployed on INaturalist sees its performances fall from $70\\%$ to $30\\%$ on class \\#8889 when introducing random crop DA during the Imagenet pre-training phase. Those results demonstrate that designing novel regularizers without class-dependent bias remains an open research question.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Randall Balestriero", "L. Bottou", "Yann LeCun" ], "externalIds": { "DBLP": "journals/corr/abs-2204-03632", "ArXiv": "2204.03632", "DOI": "10.48550/arXiv.2204.03632", "CorpusId": 248006086 }, "url": "https://www.semanticscholar.org/paper/dcda4897113ed03c920e2e94a90ee33e09781759", "referenceCount": 64, "citationCount": 78, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Why the search for a privacy-preserving data sharing mechanism is failing", "abstract": null, "year": 2022, "venue": "Nature Computational Science", "authors": [ "Theresa Stadler", "C. Troncoso" ], "externalIds": { "DBLP": "journals/ncs/StadlerT22", "DOI": "10.1038/s43588-022-00236-x", "CorpusId": 248335865, "PubMed": "38177545" }, "url": "https://www.semanticscholar.org/paper/f8fccef7c5cfae1fb13de92be0290d47bcd39a2f", "referenceCount": 14, "citationCount": 8, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Synthetic data generation for tabular health records: A systematic review", "abstract": null, "year": 2022, "venue": "Neurocomputing", "authors": [ "Mikel Hernandez", "Gorka Epelde", "Ane Alberdi", "Rodrigo Cilla", "Debbie Rankin" ], "externalIds": { "DBLP": "journals/ijon/HernandezEACR22", "DOI": "10.1016/j.neucom.2022.04.053", "CorpusId": 248162973 }, "url": "https://www.semanticscholar.org/paper/7dad9fe143f8bc90056b30de27358e1954144886", "referenceCount": 71, "citationCount": 114, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Data Contamination: From Memorization to Exploitation", "abstract": "Pretrained language models are typically trained on massive web-based datasets, which are often “contaminated” with downstream test sets. It is not clear to what extent models exploit the contaminated data for downstream tasks. We present a principled method to study this question. We pretrain BERT models on joint corpora of Wikipedia and labeled downstream datasets, and fine-tune them on the relevant task. Comparing performance between samples seen and unseen during pretraining enables us to define and quantify levels of memorization and exploitation.Experiments with two models and three downstream tasks show that exploitation exists in some cases, but in others the models memorize the contaminated data, but do not exploit it. We show that these two measures are affected by different factors such as the number of duplications of the contaminated data and the model size. Our results highlight the importance of analyzing massive web-scale datasets to verify that progress in NLP is obtained by better language understanding and not better data exploitation.", "year": 2022, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Inbal Magar", "Roy Schwartz" ], "externalIds": { "DBLP": "journals/corr/abs-2203-08242", "ArXiv": "2203.08242", "ACL": "2022.acl-short.18", "DOI": "10.48550/arXiv.2203.08242", "CorpusId": 247475929 }, "url": "https://www.semanticscholar.org/paper/c37d0b258386293097fa3f71f971dc5dfceb4684", "referenceCount": 34, "citationCount": 123, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Systematic Review on Data Scarcity Problem in Deep Learning: Solution and Applications", "abstract": "Recent advancements in deep learning architecture have increased its utility in real-life applications. Deep learning models require a large amount of data to train the model. In many application domains, there is a limited set of data available for training neural networks as collecting new data is either not feasible or requires more resources such as in marketing, computer vision, and medical science. These models require a large amount of data to avoid the problem of overfitting. One of the data space solutions to the problem of limited data is data augmentation. The purpose of this study focuses on various data augmentation techniques that can be used to further improve the accuracy of a neural network. This saves the cost and time consumption required to collect new data for the training of deep neural networks by augmenting available data. This also regularizes the model and improves its capability of generalization. The need for large datasets in different fields such as computer vision, natural language processing, security, and healthcare is also covered in this survey paper. The goal of this paper is to provide a comprehensive survey of recent advancements in data augmentation techniques and their application in various domains.", "year": 2022, "venue": "ACM Computing Surveys", "authors": [ "Ms. Aayushi Bansal", "Dr. Rewa Sharma", "Dr. Mamta Kathuria" ], "externalIds": { "DBLP": "journals/csur/BansalSK22", "DOI": "10.1145/3502287", "CorpusId": 245772675 }, "url": "https://www.semanticscholar.org/paper/5c9cb718ae91ebdcf07e05a026db469dc46aaed4", "referenceCount": 126, "citationCount": 94, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Transformers Can Do Bayesian Inference", "abstract": "Currently, it is hard to reap the benefits of deep learning for Bayesian methods, which allow the explicit specification of prior knowledge and accurately capture model uncertainty. We present Prior-Data Fitted Networks (PFNs). PFNs leverage in-context learning in large-scale machine learning techniques to approximate a large set of posteriors. The only requirement for PFNs to work is the ability to sample from a prior distribution over supervised learning tasks (or functions). Our method restates the objective of posterior approximation as a supervised classification problem with a set-valued input: it repeatedly draws a task (or function) from the prior, draws a set of data points and their labels from it, masks one of the labels and learns to make probabilistic predictions for it based on the set-valued input of the rest of the data points. Presented with a set of samples from a new supervised learning task as input, PFNs make probabilistic predictions for arbitrary other data points in a single forward propagation, having learned to approximate Bayesian inference. We demonstrate that PFNs can near-perfectly mimic Gaussian processes and also enable efficient Bayesian inference for intractable problems, with over 200-fold speedups in multiple setups compared to current methods. We obtain strong results in very diverse areas such as Gaussian process regression, Bayesian neural networks, classification for small tabular data sets, and few-shot image classification, demonstrating the generality of PFNs. Code and trained PFNs are released at https://github.com/automl/TransformersCanDoBayesianInference.", "year": 2021, "venue": "International Conference on Learning Representations", "authors": [ "Samuel Muller", "Noah Hollmann", "Sebastian Pineda Arango", "Josif Grabocka", "F. Hutter" ], "externalIds": { "DBLP": "conf/iclr/0005HPGH22", "ArXiv": "2112.10510", "CorpusId": 245334722 }, "url": "https://www.semanticscholar.org/paper/d88a5ae1673f2009704186acf2890163e6ddf4ca", "referenceCount": 68, "citationCount": 94, "influentialCitationCount": 11, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Deep Neural Networks and Tabular Data: A Survey", "abstract": "Heterogeneous tabular data are the most commonly used form of data and are essential for numerous critical and computationally demanding applications. On homogeneous datasets, deep neural networks have repeatedly shown excellent performance and have therefore been widely adopted. However, their adaptation to tabular data for inference or data generation tasks remains highly challenging. To facilitate further progress in the field, this work provides an overview of state-of-the-art deep learning methods for tabular data. We categorize these methods into three groups: data transformations, specialized architectures, and regularization models. For each of these groups, our work offers a comprehensive overview of the main approaches. Moreover, we discuss deep learning approaches for generating tabular data and also provide an overview over strategies for explaining deep models on tabular data. Thus, our first contribution is to address the main research streams and existing methodologies in the mentioned areas while highlighting relevant challenges and open research questions. Our second contribution is to provide an empirical comparison of traditional machine learning methods with 11 deep learning approaches across five popular real-world tabular datasets of different sizes and with different learning objectives. Our results, which we have made publicly available as competitive benchmarks, indicate that algorithms based on gradient-boosted tree ensembles still mostly outperform deep learning models on supervised learning tasks, suggesting that the research progress on competitive deep learning models for tabular data is stagnating. To the best of our knowledge, this is the first in-depth overview of deep learning approaches for tabular data; as such, this work can serve as a valuable starting point to guide researchers and practitioners interested in deep learning with tabular data.", "year": 2021, "venue": "IEEE Transactions on Neural Networks and Learning Systems", "authors": [ "V. Borisov", "Tobias Leemann", "Kathrin Seßler", "Johannes Haug", "Martin Pawelczyk", "Gjergji Kasneci" ], "externalIds": { "DBLP": "journals/corr/abs-2110-01889", "ArXiv": "2110.01889", "DOI": "10.1109/TNNLS.2022.3229161", "CorpusId": 238353897, "PubMed": "37015381" }, "url": "https://www.semanticscholar.org/paper/3acff13163f51765bb36147f6107967765509d9b", "referenceCount": 250, "citationCount": 476, "influentialCitationCount": 46, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Text Data Augmentation for Deep Learning", "abstract": null, "year": 2021, "venue": "Journal of Big Data", "authors": [ "Connor Shorten", "T. Khoshgoftaar", "B. Furht" ], "externalIds": { "DBLP": "journals/jbd/ShortenKF21a", "PubMedCentral": "8287113", "DOI": "10.1186/s40537-021-00492-0", "CorpusId": 236096559, "PubMed": "34306963" }, "url": "https://www.semanticscholar.org/paper/31852f9fc732c0868af12d631c72693702d80521", "referenceCount": 144, "citationCount": 328, "influentialCitationCount": 10, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Revisiting Deep Learning Models for Tabular Data", "abstract": "The existing literature on deep learning for tabular data proposes a wide range of novel architectures and reports competitive results on various datasets. However, the proposed models are usually not properly compared to each other and existing works often use different benchmarks and experiment protocols. As a result, it is unclear for both researchers and practitioners what models perform best. Additionally, the field still lacks effective baselines, that is, the easy-to-use models that provide competitive performance across different problems. In this work, we perform an overview of the main families of DL architectures for tabular data and raise the bar of baselines in tabular DL by identifying two simple and powerful deep architectures. The first one is a ResNet-like architecture which turns out to be a strong baseline that is often missing in prior works. The second model is our simple adaptation of the Transformer architecture for tabular data, which outperforms other solutions on most tasks. Both models are compared to many existing architectures on a diverse set of tasks under the same training and tuning protocols. We also compare the best DL models with Gradient Boosted Decision Trees and conclude that there is still no universally superior solution.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Yu. V. Gorishniy", "Ivan Rubachev", "Valentin Khrulkov", "Artem Babenko" ], "externalIds": { "DBLP": "conf/nips/GorishniyRKB21", "ArXiv": "2106.11959", "CorpusId": 235593213 }, "url": "https://www.semanticscholar.org/paper/5fa06d856ba6ae9cd1366888f8134d7fd0db75b9", "referenceCount": 63, "citationCount": 503, "influentialCitationCount": 109, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Tabular Data: Deep Learning is Not All You Need", "abstract": null, "year": 2021, "venue": "Information Fusion", "authors": [ "Ravid Shwartz-Ziv", "Amitai Armon" ], "externalIds": { "ArXiv": "2106.03253", "DBLP": "journals/inffus/Shwartz-ZivA22", "DOI": "10.1016/j.inffus.2021.11.011", "CorpusId": 260435623 }, "url": "https://www.semanticscholar.org/paper/009560d2710138a446e6e254d8ddcb65eaa0e687", "referenceCount": 59, "citationCount": 873, "influentialCitationCount": 32, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Survey of Data Augmentation Approaches for NLP", "abstract": "Data augmentation has recently seen increased interest in NLP due to more work in low-resource domains, new tasks, and the popularity of large-scale neural networks that require large amounts of training data. Despite this recent upsurge, this area is still relatively underexplored, perhaps due to the challenges posed by the discrete nature of language data. In this paper, we present a comprehensive and unifying survey of data augmentation for NLP by summarizing the literature in a structured manner. We first introduce and motivate data augmentation for NLP, and then discuss major methodologically representative approaches. Next, we highlight techniques that are used for popular NLP applications and tasks. We conclude by outlining current challenges and directions for future research. Overall, our paper aims to clarify the landscape of existing literature in data augmentation for NLP and motivate additional work in this area. We also present a GitHub repository with a paper list that will be continuously updated at https://github.com/styfeng/DataAug4NLP", "year": 2021, "venue": "Findings", "authors": [ "Steven Y. Feng", "Varun Gangal", "Jason Wei", "Sarath Chandar", "Soroush Vosoughi", "T. Mitamura", "E. Hovy" ], "externalIds": { "ACL": "2021.findings-acl.84", "ArXiv": "2105.03075", "DBLP": "journals/corr/abs-2105-03075", "DOI": "10.18653/v1/2021.findings-acl.84", "CorpusId": 234093015 }, "url": "https://www.semanticscholar.org/paper/63d8426ba1f51a8525dd19fd8ec92934ec71aea5", "referenceCount": 193, "citationCount": 665, "influentialCitationCount": 44, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "CTAB-GAN: Effective Table Data Synthesizing", "abstract": "While data sharing is crucial for knowledge development, privacy concerns and strict regulation (e.g., European General Data Protection Regulation (GDPR)) unfortunately limit its full effectiveness. Synthetic tabular data emerges as an alternative to enable data sharing while fulfilling regulatory and privacy constraints. The state-of-the-art tabular data synthesizers draw methodologies from generative Adversarial Networks (GAN) and address two main data types in the industry, i.e., continuous and categorical. In this paper, we develop CTAB-GAN, a novel conditional table GAN architecture that can effectively model diverse data types, including a mix of continuous and categorical variables. Moreover, we address data imbalance and long-tail issues, i.e., certain variables have drastic frequency differences across large values. To achieve those aims, we first introduce the information loss and classification loss to the conditional GAN. Secondly, we design a novel conditional vector, which efficiently encodes the mixed data type and skewed distribution of data variable. We extensively evaluate CTAB-GAN with the state of the art GANs that generate synthetic tables, in terms of data similarity and analysis utility. The results on five datasets show that the synthetic data of CTAB-GAN remarkably resembles the real data for all three types of variables and results into higher accuracy for five machine learning algorithms, by up to 17%.", "year": 2021, "venue": "Asian Conference on Machine Learning", "authors": [ "Zilong Zhao", "A. Kunar", "H. V. D. Scheer", "R. Birke", "L. Chen" ], "externalIds": { "ArXiv": "2102.08369", "DBLP": "journals/corr/abs-2102-08369", "CorpusId": 231934078 }, "url": "https://www.semanticscholar.org/paper/0173de47cce7b5e616aa8198b270b34abbd68645", "referenceCount": 30, "citationCount": 144, "influentialCitationCount": 21, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Artificial Intelligence in Low- and Middle-Income Countries: Innovating Global Health Radiology.", "abstract": "Scarce or absent radiology resources impede adoption of artificial intelligence (AI) for medical imaging by resource-poor health institutions. They face limitations in local equipment, personnel expertise, infrastructure, data-rights frameworks, and public policies. The trustworthiness of AI for medical decision making in global health and low-resource settings is hampered by insufficient data diversity, nontransparent AI algorithms, and resource-poor health institutions' limited participation in AI production and validation. RAD-AID's three-pronged integrated strategy for AI adoption in resource-poor health institutions is presented, which includes clinical radiology education, infrastructure implementation, and phased AI introduction. This strategy derives from RAD-AID's more-than-a-decade experience as a nonprofit organization developing radiology in resource-poor health institutions, both in the United States and in low- and middle-income countries. The three components synergistically provide the foundation to address health care disparities. Local radiology personnel expertise is augmented through comprehensive education. Software, hardware, and radiologic and networking infrastructure enables radiology workflows incorporating AI. These educational and infrastructure developments occur while RAD-AID delivers phased introduction, testing, and scaling of AI via global health collaborations.", "year": 2020, "venue": "Radiology", "authors": [ "D. Mollura", "M. Culp", "E. Pollack", "Gillian Battino", "J. Scheel", "Victoria L. Mango", "A. Elahi", "A. Schweitzer", "Farouk Dako" ], "externalIds": { "MAG": "3092635044", "DOI": "10.1148/radiol.2020201434", "CorpusId": 222182046, "PubMed": "33021895" }, "url": "https://www.semanticscholar.org/paper/3239e46950c3df06ae49e63ee160a4f2dd98c41c", "referenceCount": 34, "citationCount": 82, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine" ] }, { "title": "A survey on generative adversarial networks for imbalance problems in computer vision tasks", "abstract": null, "year": 2020, "venue": "Journal of Big Data", "authors": [ "Vignesh Sampath", "I. Maurtua", "Juan José Aguilar Martín", "Aitor Gutierrez" ], "externalIds": { "PubMedCentral": "7845583", "DBLP": "journals/jbd/SampathMMG21", "DOI": "10.1186/s40537-021-00414-0", "CorpusId": 231631243, "PubMed": "33552840" }, "url": "https://www.semanticscholar.org/paper/95e74beb9f54e0312f7356391ba7c699b05ebdb0", "referenceCount": 240, "citationCount": 148, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Transfer Learning for Drug Discovery.", "abstract": "The data volume of in silico drug discovery studies is often small. Therefore, the availability of labeled data is a major limitation in artificial intelligence-assisted drug discovery. One solution to the problem is to develop algorithms that can cope with relatively heterogeneous and scarce data. Transfer learning is a kind of machine learning that can leverage existing knowledge from other related tasks to a specific task with insufficient data. Deep transfer learning is the most commonly used method and has attained much progress in the field of drug discovery. This perspective aims to provide an overview of transfer learning and related applications in drug discovery and give outlooks as to future development and application of transfer learning for drug discovery.", "year": 2020, "venue": "Journal of Medicinal Chemistry", "authors": [ "Chenjing Cai", "Shiwei Wang", "Youjun Xu", "Weilin Zhang", "K. Tang", "Ouyang Qi", "L. Lai", "Jianfeng Pei" ], "externalIds": { "MAG": "3042826782", "DOI": "10.1021/acs.jmedchem.9b02147", "CorpusId": 220606408, "PubMed": "32672961" }, "url": "https://www.semanticscholar.org/paper/7d5d93af1518a36039b214c0b30ca2f7a5bf4e4e", "referenceCount": 68, "citationCount": 204, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Chemistry", "Medicine" ] }, { "title": "Artificial intelligence in health care: laying the Foundation for Responsible, sustainable, and inclusive innovation in low- and middle-income countries", "abstract": null, "year": 2020, "venue": "Globalization and Health", "authors": [ "H. Alami", "L. Rivard", "P. Lehoux", "S. Hoffman", "S. B. Cadeddu", "M. Savoldelli", "M. A. Samri", "M. A. Ag Ahmed", "R. Fleet", "J. Fortin" ], "externalIds": { "PubMedCentral": "7315549", "MAG": "3037647569", "DOI": "10.1186/s12992-020-00584-1", "CorpusId": 219988180, "PubMed": "32580741" }, "url": "https://www.semanticscholar.org/paper/b460805f7717925953bc5c17554f736c3c6d58aa", "referenceCount": 61, "citationCount": 99, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Business" ] }, { "title": "Privacy-Preserved Data Sharing Towards Multiple Parties in Industrial IoTs", "abstract": "The effective physical data sharing has been facilitating the functionality of Industrial IoTs, which is believed to be one primary basis for Industry 4.0. These physical data, while providing pivotal information for multiple components of a production system, also bring in severe privacy issues for both workers and manufacturers, thus aggravating the challenges for data sharing. Current designs tend to simplify the behaviors of participants for better theoretical analysis, and they cannot properly handle the challenges in IIoTs where the behaviors are more complicated and correlated. Therefore, this paper proposes a privacy-preserved data sharing framework for IIoTs, where multiple competing data consumers exist in different stages of the system. The framework allows data contributors to share their contents upon requests. The uploaded contents will be perturbed to preserve the sensitive status of contributors. The differential privacy is adopted in the perturbation to guarantee the privacy preservation. Then the data collector will process and relay contents with subsequent data consumers. This data collector will gain both its own data utility and extra profits in data relay. Two algorithms are proposed for data sharing in different scenarios, based on whether the service provider will further process the contents to retain its exclusive utility. This work also provides for both algorithms a comprehensive consideration on privacy, data utility, bandwidth efficiency, payment, and rationality for data sharing. Finally, the evaluation on real-world datasets demonstrates the effectiveness of proposed methods, together with clues for data sharing towards Industry 4.0.", "year": 2020, "venue": "IEEE Journal on Selected Areas in Communications", "authors": [ "Xu Zheng", "Zhipeng Cai" ], "externalIds": { "MAG": "3012539654", "DBLP": "journals/jsac/ZhengC20", "DOI": "10.1109/JSAC.2020.2980802", "CorpusId": 215835652 }, "url": "https://www.semanticscholar.org/paper/1ee96c89a357b2892f2f08b5e938e401c2d5a357", "referenceCount": 44, "citationCount": 294, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Your Classifier is Secretly an Energy Based Model and You Should Treat it Like One", "abstract": "We propose to reinterpret a standard discriminative classifier of p(y|x) as an energy based model for the joint distribution p(x,y). In this setting, the standard class probabilities can be easily computed as well as unnormalized values of p(x) and p(x|y). Within this framework, standard discriminative architectures may beused and the model can also be trained on unlabeled data. We demonstrate that energy based training of the joint distribution improves calibration, robustness, andout-of-distribution detection while also enabling our models to generate samplesrivaling the quality of recent GAN approaches. We improve upon recently proposed techniques for scaling up the training of energy based models and presentan approach which adds little overhead compared to standard classification training. Our approach is the first to achieve performance rivaling the state-of-the-artin both generative and discriminative learning within one hybrid model.", "year": 2019, "venue": "International Conference on Learning Representations", "authors": [ "Will Grathwohl", "Kuan-Chieh Jackson Wang", "J. Jacobsen", "D. Duvenaud", "Mohammad Norouzi", "Kevin Swersky" ], "externalIds": { "ArXiv": "1912.03263", "DBLP": "conf/iclr/GrathwohlWJD0S20", "MAG": "2994434574", "CorpusId": 208857409 }, "url": "https://www.semanticscholar.org/paper/97cd86d8d8c0f27cd3e64c6ca5cfdeb957ee39f4", "referenceCount": 52, "citationCount": 481, "influentialCitationCount": 92, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "PyTorch: An Imperative Style, High-Performance Deep Learning Library", "abstract": "Deep learning frameworks have often focused on either usability or speed, but not both. PyTorch is a machine learning library that shows that these two goals are in fact compatible: it was designed from first principles to support an imperative and Pythonic programming style that supports code as a model, makes debugging easy and is consistent with other popular scientific computing libraries, while remaining efficient and supporting hardware accelerators such as GPUs. In this paper, we detail the principles that drove the implementation of PyTorch and how they are reflected in its architecture. We emphasize that every aspect of PyTorch is a regular Python program under the full control of its user. We also explain how the careful and pragmatic implementation of the key components of its runtime enables them to work together to achieve compelling performance. We demonstrate the efficiency of individual subsystems, as well as the overall speed of PyTorch on several commonly used benchmarks.", "year": 2019, "venue": "Neural Information Processing Systems", "authors": [ "Adam Paszke", "Sam Gross", "Francisco Massa", "Adam Lerer", "James Bradbury", "Gregory Chanan", "Trevor Killeen", "Zeming Lin", "N. Gimelshein", "L. Antiga", "Alban Desmaison", "Andreas Köpf", "E. Yang", "Zach DeVito", "Martin Raison", "Alykhan Tejani", "Sasank Chilamkurthy", "Benoit Steiner", "Lu Fang", "Junjie Bai", "Soumith Chintala" ], "externalIds": { "MAG": "2970971581", "DBLP": "journals/corr/abs-1912-01703", "ArXiv": "1912.01703", "CorpusId": 202786778 }, "url": "https://www.semanticscholar.org/paper/3c8a456509e6c0805354bd40a35e3f2dbf8069b1", "referenceCount": 39, "citationCount": 36158, "influentialCitationCount": 3694, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Enhancing materials property prediction by leveraging computational and experimental data using deep transfer learning", "abstract": null, "year": 2019, "venue": "Nature Communications", "authors": [ "Dipendra Jha", "K. Choudhary", "F. Tavazza", "W. Liao", "A. Choudhary", "C. Campbell", "Ankit Agrawal" ], "externalIds": { "PubMedCentral": "6874674", "MAG": "2990015413", "DOI": "10.1038/s41467-019-13297-w", "CorpusId": 208211021, "PubMed": "31757948" }, "url": "https://www.semanticscholar.org/paper/08a68192cedf80f36d802584e58923926e9041dc", "referenceCount": 68, "citationCount": 196, "influentialCitationCount": 9, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "A survey on Image Data Augmentation for Deep Learning", "abstract": null, "year": 2019, "venue": "Journal of Big Data", "authors": [ "Connor Shorten", "T. Khoshgoftaar" ], "externalIds": { "DBLP": "journals/jbd/ShortenK19", "MAG": "2954996726", "DOI": "10.1186/s40537-019-0197-0", "CorpusId": 195811894 }, "url": "https://www.semanticscholar.org/paper/3813b88a4ec3c63919df47e9694b577f4691f7e5", "referenceCount": 142, "citationCount": 7440, "influentialCitationCount": 187, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Modeling Tabular data using Conditional GAN", "abstract": "Modeling the probability distribution of rows in tabular data and generating realistic synthetic data is a non-trivial task. Tabular data usually contains a mix of discrete and continuous columns. Continuous columns may have multiple modes whereas discrete columns are sometimes imbalanced making the modeling difficult. Existing statistical and deep neural network models fail to properly model this type of data. We design TGAN, which uses a conditional generative adversarial network to address these challenges. To aid in a fair and thorough comparison, we design a benchmark with 7 simulated and 8 real datasets and several Bayesian network baselines. TGAN outperforms Bayesian methods on most of the real datasets whereas other deep learning methods could not.", "year": 2019, "venue": "Neural Information Processing Systems", "authors": [ "Lei Xu", "Maria Skoularidou", "Alfredo Cuesta-Infante", "K. Veeramachaneni" ], "externalIds": { "MAG": "2970533824", "ArXiv": "1907.00503", "DBLP": "conf/nips/XuSCV19", "CorpusId": 195767064 }, "url": "https://www.semanticscholar.org/paper/e7f3478fd8aac6940a4bf4f5eb60ac38f6b0b85b", "referenceCount": 32, "citationCount": 888, "influentialCitationCount": 210, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Neural Spline Flows", "abstract": "A normalizing flow models a complex probability density as an invertible transformation of a simple base density. Flows based on either coupling or autoregressive transforms both offer exact density evaluation and sampling, but rely on the parameterization of an easily invertible elementwise transformation, whose choice determines the flexibility of these models. Building upon recent work, we propose a fully-differentiable module based on monotonic rational-quadratic splines, which enhances the flexibility of both coupling and autoregressive transforms while retaining analytic invertibility. We demonstrate that neural spline flows improve density estimation, variational inference, and generative modeling of images.", "year": 2019, "venue": "Neural Information Processing Systems", "authors": [ "Conor Durkan", "Artur Bekasov", "Iain Murray", "G. Papamakarios" ], "externalIds": { "MAG": "2948659923", "DBLP": "conf/nips/DurkanB0P19", "ArXiv": "1906.04032", "CorpusId": 182952446 }, "url": "https://www.semanticscholar.org/paper/1eeb265595e250cf66751ef9032524386d7a9b32", "referenceCount": 66, "citationCount": 646, "influentialCitationCount": 105, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "A Review of Secure and Privacy-Preserving Medical Data Sharing", "abstract": "In the digital healthcare era, it is of the utmost importance to harness medical information scattered across healthcare institutions to support in-depth data analysis and achieve personalized healthcare. However, the cyberinfrastructure boundaries of healthcare organizations and privacy leakage threats place obstacles on the sharing of medical records. Blockchain, as a public ledger characterized by its transparency, tamper-evidence, trustlessness, and decentralization, can help build a secure medical data exchange network. This paper surveys the state-of-the-art schemes on secure and privacy-preserving medical data sharing of the past decade with a focus on blockchain-based approaches. We classify them into permissionlessblockchain-based approaches and permissioned blockchain-based approaches and analyze their advantagesand disadvantages. We also discuss potential research topics on blockchain-based medical data sharing.", "year": 2019, "venue": "IEEE Access", "authors": [ "Hao Jin", "Yan Luo", "Peilong Li", "J. Mathew" ], "externalIds": { "DBLP": "journals/access/JinLLM19", "MAG": "2946102439", "DOI": "10.1109/ACCESS.2019.2916503", "CorpusId": 167222332 }, "url": "https://www.semanticscholar.org/paper/1b64dacf21f67de09953024105eca33a309fca2a", "referenceCount": 74, "citationCount": 137, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Model Evaluation, Model Selection, and Algorithm Selection in Machine Learning", "abstract": "The correct use of model evaluation, model selection, and algorithm selection techniques is vital in academic machine learning research as well as in many industrial settings. This article reviews different techniques that can be used for each of these three subtasks and discusses the main advantages and disadvantages of each technique with references to theoretical and empirical studies. Further, recommendations are given to encourage best yet feasible practices in research and applications of machine learning. Common methods such as the holdout method for model evaluation and selection are covered, which are not recommended when working with small datasets. Different flavors of the bootstrap technique are introduced for estimating the uncertainty of performance estimates, as an alternative to confidence intervals via normal approximation if bootstrapping is computationally feasible. Common cross-validation techniques such as leave-one-out cross-validation and k-fold cross-validation are reviewed, the bias-variance trade-off for choosing k is discussed, and practical tips for the optimal choice of k are given based on empirical evidence. Different statistical tests for algorithm comparisons are presented, and strategies for dealing with multiple comparisons such as omnibus tests and multiple-comparison corrections are discussed. Finally, alternative methods for algorithm selection, such as the combined F-test 5x2 cross-validation and nested cross-validation, are recommended for comparing machine learning algorithms when datasets are small.", "year": 2018, "venue": "arXiv.org", "authors": [ "S. Raschka" ], "externalIds": { "MAG": "2902240649", "ArXiv": "1811.12808", "DBLP": "journals/corr/abs-1811-12808", "CorpusId": 49529756 }, "url": "https://www.semanticscholar.org/paper/eef183687fab4d762a381f2e80e357e08e923f0a", "referenceCount": 35, "citationCount": 673, "influentialCitationCount": 31, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Towards Secure and Privacy-Preserving Data Sharing in e-Health Systems via Consortium Blockchain", "abstract": null, "year": 2018, "venue": "Journal of medical systems", "authors": [ "Aiqing Zhang", "Xiaodong Lin" ], "externalIds": { "DBLP": "journals/jms/ZhangL18", "MAG": "2811112224", "DOI": "10.1007/s10916-018-0995-5", "CorpusId": 49527054, "PubMed": "29956061" }, "url": "https://www.semanticscholar.org/paper/56cd71e02772d6bd7adead9aa876a862ee0537c2", "referenceCount": 34, "citationCount": 401, "influentialCitationCount": 19, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Data Synthesis based on Generative Adversarial Networks", "abstract": "\n Privacy is an important concern for our society where sharing data with partners or releasing data to the public is a frequent occurrence. Some of the techniques that are being used to achieve privacy are to remove identifiers, alter quasi-identifiers, and perturb values. Unfortunately, these approaches suffer from two limitations. First, it has been shown that private information can still be leaked if attackers possess some background knowledge or other information sources. Second, they do not take into account the adverse impact these methods will have on the utility of the released data. In this paper, we propose a method that meets both requirements. Our method, called\n table-GAN\n , uses generative adversarial networks (GANs) to synthesize fake tables that are statistically similar to the original table yet do not incur information leakage. We show that the machine learning models trained using our synthetic tables exhibit performance that is similar to that of models trained using the original table for unknown testing cases. We call this property\n model compatibility\n . We believe that anonymization/perturbation/synthesis methods without model compatibility are of little value. We used four real-world datasets from four different domains for our experiments and conducted indepth comparisons with state-of-the-art anonymization, perturbation, and generation techniques. Throughout our experiments, only our method consistently shows balance between privacy level and model compatibility.\n", "year": 2018, "venue": "Proceedings of the VLDB Endowment", "authors": [ "Noseong Park", "Mahmoud Mohammadi", "Kshitij Gorde", "S. Jajodia", "Hongkyu Park", "Youngmin Kim" ], "externalIds": { "MAG": "2806276686", "ArXiv": "1806.03384", "DBLP": "journals/pvldb/ParkMGJPK18", "DOI": "10.14778/3231751.3231757", "CorpusId": 47017667 }, "url": "https://www.semanticscholar.org/paper/631b7acaba06c42e6ba918ee7a284ba5e5ab0078", "referenceCount": 31, "citationCount": 368, "influentialCitationCount": 39, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Data Augmentation Generative Adversarial Networks", "abstract": "Effective training of neural networks requires much data. In the low-data regime, parameters are underdetermined, and learnt networks generalise poorly. Data Augmentation alleviates this by using existing data more effectively. However standard data augmentation produces only limited plausible alternative data. Given there is potential to generate a much broader set of augmentations, we design and train a generative model to do data augmentation. The model, based on image conditional Generative Adversarial Networks, takes data from a source domain and learns to take any data item and generalise it to generate other within-class data items. As this generative process does not depend on the classes themselves, it can be applied to novel unseen classes of data. We show that a Data Augmentation Generative Adversarial Network (DAGAN) augments standard vanilla classifiers well. We also show a DAGAN can enhance few-shot learning systems such as Matching Networks. We demonstrate these approaches on Omniglot, on EMNIST having learnt the DAGAN on Omniglot, and VGG-Face data. In our experiments we can see over 13% increase in accuracy in the low-data regime experiments in Omniglot (from 69% to 82%), EMNIST (73.9% to 76%) and VGG-Face (4.5% to 12%); in Matching Networks for Omniglot we observe an increase of 0.5% (from 96.9% to 97.4%) and an increase of 1.8% in EMNIST (from 59.5% to 61.3%).", "year": 2017, "venue": "International Conference on Learning Representations", "authors": [ "Antreas Antoniou", "A. Storkey", "Harrison Edwards" ], "externalIds": { "MAG": "2951224694", "ArXiv": "1711.04340", "DBLP": "journals/corr/abs-1711-04340", "CorpusId": 4117071 }, "url": "https://www.semanticscholar.org/paper/fe9cd683c3b8ebdfd8efd1109a857cdbf9edc364", "referenceCount": 41, "citationCount": 1005, "influentialCitationCount": 72, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "OpenML Benchmarking Suites", "abstract": "Machine learning research depends on objectively interpretable, comparable, and reproducible algorithm benchmarks. Therefore, we advocate the use of curated, comprehensive suites of machine learning tasks to standardize the setup, execution, and reporting of benchmarks. We enable this through software tools that help to create and leverage these benchmarking suites. These are seamlessly integrated into the OpenML platform, and accessible through interfaces in Python, Java, and R. OpenML benchmarking suites are (a) easy to use through standardized data formats, APIs, and client libraries; (b) machine-readable, with extensive meta-information on the included datasets; and (c) allow benchmarks to be shared and reused in future studies. We also present a first, carefully curated and practical benchmarking suite for classification: the OpenML Curated Classification benchmarking suite 2018 (OpenML-CC18).", "year": 2017, "venue": "NeurIPS Datasets and Benchmarks", "authors": [ "B. Bischl", "Giuseppe Casalicchio", "Matthias Feurer", "F. Hutter", "Michel Lang", "R. G. Mantovani", "J. N. Rijn", "J. Vanschoren" ], "externalIds": { "DBLP": "conf/nips/BischlCFGHLMRV21", "ArXiv": "1708.03731", "MAG": "2976734616", "CorpusId": 203691173 }, "url": "https://www.semanticscholar.org/paper/d03e451db07f764a0294d44aec13a5993d927790", "referenceCount": 76, "citationCount": 127, "influentialCitationCount": 18, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Presence", "abstract": null, "year": 2017, "venue": "The Lancet", "authors": [ "A. Kleinman" ], "externalIds": { "DOI": "10.1016/S0140-6736(17)31620-3", "CorpusId": 208794041 }, "url": "https://www.semanticscholar.org/paper/435ea4f1fd3e748baa9d93990f0af8cae4803d2b", "referenceCount": 0, "citationCount": 98, "influentialCitationCount": 9, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "CatBoost: unbiased boosting with categorical features", "abstract": "This paper presents the key algorithmic techniques behind CatBoost, a new gradient boosting toolkit. Their combination leads to CatBoost outperforming other publicly available boosting implementations in terms of quality on a variety of datasets. Two critical algorithmic advances introduced in CatBoost are the implementation of ordered boosting, a permutation-driven alternative to the classic algorithm, and an innovative algorithm for processing categorical features. Both techniques were created to fight a prediction shift caused by a special kind of target leakage present in all currently existing implementations of gradient boosting algorithms. In this paper, we provide a detailed analysis of this problem and demonstrate that proposed algorithms solve it effectively, leading to excellent empirical results.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "L. Ostroumova", "Gleb Gusev", "A. Vorobev", "Anna Veronika Dorogush", "Andrey Gulin" ], "externalIds": { "DBLP": "conf/nips/ProkhorenkovaGV18", "MAG": "2964022491", "CorpusId": 5044218 }, "url": "https://www.semanticscholar.org/paper/ee0a0f04d45f86bf50b24d7258e884725fcaa621", "referenceCount": 34, "citationCount": 2557, "influentialCitationCount": 280, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Imbalanced-learn: A Python Toolbox to Tackle the Curse of Imbalanced Datasets in Machine Learning", "abstract": "Imbalanced-learn is an open-source python toolbox aiming at providing a wide range of methods to cope with the problem of imbalanced dataset frequently encountered in machine learning and pattern recognition. The implemented state-of-the-art methods can be categorized into 4 groups: (i) under-sampling, (ii) over-sampling, (iii) combination of over- and under-sampling, and (iv) ensemble learning methods. The proposed toolbox only depends on numpy, scipy, and scikit-learn and is distributed under MIT license. Furthermore, it is fully compatible with scikit-learn and is part of the scikit-learn-contrib supported project. Documentation, unit tests as well as integration tests are provided to ease usage and contribution. The toolbox is publicly available in GitHub: this https URL.", "year": 2016, "venue": "Journal of machine learning research", "authors": [ "G. Lemaître", "Fernando Nogueira", "Christos K. Aridas" ], "externalIds": { "MAG": "2949824731", "DBLP": "journals/jmlr/LemaitreNA17", "ArXiv": "1609.06570", "CorpusId": 1426815 }, "url": "https://www.semanticscholar.org/paper/05c5b732fb92546c7d6eeabfadb5c14610d07373", "referenceCount": 23, "citationCount": 1853, "influentialCitationCount": 116, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Failed trials for central nervous system disorders do not necessarily invalidate preclinical models and drug targets", "abstract": null, "year": 2016, "venue": "Nature reviews. Drug discovery", "authors": [ "A. Bespalov", "T. Steckler", "B. Altevogt", "E. Koustova", "P. Skolnick", "D. Deaver", "M. Millan", "J. Bastlund", "D. Doller", "J. Witkin", "P. Moser", "P. O’Donnell", "U. Ebert", "M. Geyer", "E. Prinssen", "T. Ballard", "Malcolm Macleod" ], "externalIds": { "MAG": "2430666023", "DOI": "10.1038/nrd.2016.88", "CorpusId": 13729602, "PubMed": "27312728" }, "url": "https://www.semanticscholar.org/paper/555b605787ae77935b0c001a89b4a9c0baba3fac", "referenceCount": 10, "citationCount": 62, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "XGBoost: A Scalable Tree Boosting System", "abstract": "Tree boosting is a highly effective and widely used machine learning method. In this paper, we describe a scalable end-to-end tree boosting system called XGBoost, which is used widely by data scientists to achieve state-of-the-art results on many machine learning challenges. We propose a novel sparsity-aware algorithm for sparse data and weighted quantile sketch for approximate tree learning. More importantly, we provide insights on cache access patterns, data compression and sharding to build a scalable tree boosting system. By combining these insights, XGBoost scales beyond billions of examples using far fewer resources than existing systems.", "year": 2016, "venue": "Knowledge Discovery and Data Mining", "authors": [ "Tianqi Chen", "Carlos Guestrin" ], "externalIds": { "ArXiv": "1603.02754", "DBLP": "conf/kdd/ChenG16", "MAG": "3102476541", "DOI": "10.1145/2939672.2939785", "CorpusId": 4650265 }, "url": "https://www.semanticscholar.org/paper/26bc9195c6343e4d7f434dd65b4ad67efe2be27a", "referenceCount": 26, "citationCount": 30771, "influentialCitationCount": 2876, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The Chi-square test of independence", "abstract": "The Chi-square statistic is a non-parametric (distribution free) tool designed to analyze group differences when the dependent variable is measured at a nominal level. Like all non-parametric statistics, the Chi-square is robust with respect to the distribution of the data. Specifically, it does not require equality of variances among the study groups or homoscedasticity in the data. It permits evaluation of both dichotomous independent variables, and of multiple group studies. Unlike many other non-parametric and some parametric statistics, the calculations needed to compute the Chi-square provide considerable information about how each of the groups performed in the study. This richness of detail allows the researcher to understand the results and thus to derive more detailed information from this statistic than from many others. The Chi-square is a significance statistic, and should be followed with a strength statistic. The Cramer’s V is the most common strength test used to test the data when a significant Chi-square result has been obtained. Advantages of the Chi-square include its robustness with respect to distribution of the data, its ease of computation, the detailed information that can be derived from the test, its use in studies for which parametric assumptions cannot be met, and its flexibility in handling data from both two group and multiple group studies. Limitations include its sample size requirements, difficulty of interpretation when there are large numbers of categories (20 or more) in the independent or dependent variables, and tendency of the Cramer’s V to produce relative low correlation measures, even for highly significant results.", "year": 2013, "venue": "Biochemia Medica", "authors": [ "Mary L. McHugh" ], "externalIds": { "PubMedCentral": "3900058", "MAG": "2151570219", "DOI": "10.11613/BM.2013.018", "CorpusId": 18099903, "PubMed": "23894860" }, "url": "https://www.semanticscholar.org/paper/61b6fb4ae3879598b8bf281f9ed57c608806d8e9", "referenceCount": 4, "citationCount": 1749, "influentialCitationCount": 110, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Mathematics" ] }, { "title": "Privacy Preserving Data Sharing With Anonymous ID Assignment", "abstract": "An algorithm for anonymous sharing of private data among N parties is developed. This technique is used iteratively to assign these nodes ID numbers ranging from 1 to N. This assignment is anonymous in that the identities received are unknown to the other members of the group. Resistance to collusion among other members is verified in an information theoretic sense when private communication channels are used. This assignment of serial numbers allows more complex data to be shared and has applications to other problems in privacy preserving data mining, collision avoidance in communications and distributed database access. The required computations are distributed without using a trusted central authority. Existing and new algorithms for assigning anonymous IDs are examined with respect to trade-offs between communication and computational requirements. The new algorithms are built on top of a secure sum data mining operation using Newton's identities and Sturm's theorem. An algorithm for distributed solution of certain polynomials over finite fields enhances the scalability of the algorithms. Markov chain representations are used to find statistics on the number of iterations required, and computer algebra gives closed form results for the completion rates.", "year": 2013, "venue": "IEEE Transactions on Information Forensics and Security", "authors": [ "L. Dunning", "Ray Kresman" ], "externalIds": { "DBLP": "journals/tifs/DunningK13", "MAG": "2017815160", "DOI": "10.1109/TIFS.2012.2235831", "CorpusId": 1796878 }, "url": "https://www.semanticscholar.org/paper/b32836684d504afc8e33dcc41d77336e51e27fc5", "referenceCount": 45, "citationCount": 92, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Preclinical safety evaluations supporting pediatric drug development with biopharmaceuticals: strategy, challenges, current practices.", "abstract": "Evaluation of pharmaceutical agents in children is now conducted earlier in the drug development process. An important consideration for this pediatric use is how to assess and support its safety. This article is a collaborative effort of industry toxicologists to review strategies, challenges, and current practice regarding preclinical safety evaluations supporting pediatric drug development with biopharmaceuticals. Biopharmaceuticals include a diverse group of molecular, cell-based or gene therapeutics derived from biological sources or complex biotechnological processes. The principles of preclinical support of pediatric drug development for biopharmaceuticals are similar to those for small molecule pharmaceuticals and in general follow the same regulatory guidances outlined by the Food and Drug Administration and European Medicines Agency. However, many biopharmaceuticals are also inherently different, with limited species specificity or immunogenic potential which may impact the approach taken. This article discusses several key areas to aid in the support of pediatric clinical use, study design considerations for juvenile toxicity studies when they are needed, and current practices to support pediatric drug development based on surveys specifically targeting biopharmaceutical development.", "year": 2011, "venue": "Birth defects research. Part B. Developmental and reproductice toxicology", "authors": [ "L. Morford", "C. Bowman", "D. Blanset", "I. B. Bøgh", "G. Chellman", "W. Halpern", "G. Weinbauer", "T. P. Coogan" ], "externalIds": { "MAG": "2023261089", "DOI": "10.1002/bdrb.20305", "CorpusId": 22693996, "PubMed": "21770023" }, "url": "https://www.semanticscholar.org/paper/e952daefa2c2bf88d043bf335dfd3e275aaebe28", "referenceCount": 88, "citationCount": 34, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Bayesian Learning via Stochastic Gradient Langevin Dynamics", "abstract": "In this paper we propose a new framework for learning from large scale datasets based on iterative learning from small mini-batches. By adding the right amount of noise to a standard stochastic gradient optimization algorithm we show that the iterates will converge to samples from the true posterior distribution as we anneal the stepsize. This seamless transition between optimization and Bayesian posterior sampling provides an inbuilt protection against overfitting. We also propose a practical method for Monte Carlo estimates of posterior statistics which monitors a \"sampling threshold\" and collects samples after it has been surpassed. We apply the method to three models: a mixture of Gaussians, logistic regression and ICA with natural gradients.", "year": 2011, "venue": "International Conference on Machine Learning", "authors": [ "M. Welling", "Y. Teh" ], "externalIds": { "MAG": "2167433878", "DBLP": "conf/icml/WellingT11", "CorpusId": 2178983 }, "url": "https://www.semanticscholar.org/paper/aeed631d6a84100b5e9a021ec1914095c66de415", "referenceCount": 14, "citationCount": 2429, "influentialCitationCount": 384, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Scikit-learn: Machine Learning in Python", "abstract": "Scikit-learn is a Python module integrating a wide range of state-of-the-art machine learning algorithms for medium-scale supervised and unsupervised problems. This package focuses on bringing machine learning to non-specialists using a general-purpose high-level language. Emphasis is put on ease of use, performance, documentation, and API consistency. It has minimal dependencies and is distributed under the simplified BSD license, encouraging its use in both academic and commercial settings. Source code, binaries, and documentation can be downloaded from http://scikit-learn.sourceforge.net.", "year": 2011, "venue": "Journal of machine learning research", "authors": [ "Fabian Pedregosa", "G. Varoquaux", "Alexandre Gramfort", "V. Michel", "B. Thirion", "O. Grisel", "Mathieu Blondel", "Gilles Louppe", "P. Prettenhofer", "Ron Weiss", "Ron J. Weiss", "J. Vanderplas", "Alexandre Passos", "D. Cournapeau", "M. Brucher", "M. Perrot", "E. Duchesnay" ], "externalIds": { "MAG": "2950511172", "DBLP": "journals/corr/abs-1201-0490", "ArXiv": "1201.0490", "DOI": "10.5555/1953048.2078195", "CorpusId": 10659969 }, "url": "https://www.semanticscholar.org/paper/168f28ac3c8c7ea63bf7ed25f2288e8b67e2fe74", "referenceCount": 18, "citationCount": 69127, "influentialCitationCount": 5372, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Matplotlib: A 2D Graphics Environment", "abstract": "Matplotlib is a 2D graphics package used for Python for application development, interactive scripting,and publication-quality image generation across user interfaces and operating systems", "year": 2007, "venue": "Computing in science & engineering (Print)", "authors": [ "John D. Hunter" ], "externalIds": { "MAG": "2011301426", "DBLP": "journals/cse/Hunter07", "DOI": "10.1109/MCSE.2007.55", "CorpusId": 37016120 }, "url": "https://www.semanticscholar.org/paper/412a0bb5a3baa91b62053d82c562bc172df0439f", "referenceCount": 1, "citationCount": 18925, "influentialCitationCount": 1542, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A preprocessing scheme for high-cardinality categorical attributes in classification and prediction problems", "abstract": "Categorical data fields characterized by a large number of distinct values represent a serious challenge for many classification and regression algorithms that require numerical inputs. On the other hand, these types of data fields are quite common in real-world data mining applications and often contain potentially relevant information that is difficult to represent for modeling purposes.This paper presents a simple preprocessing scheme for high-cardinality categorical data that allows this class of attributes to be used in predictive models such as neural networks, linear and logistic regression. The proposed method is based on a well-established statistical method (empirical Bayes) that is straightforward to implement as an in-database procedure. Furthermore, for categorical attributes with an inherent hierarchical structure, like ZIP codes, the preprocessing scheme can directly leverage the hierarchy by blending statistics at the various levels of aggregation.While the statistical methods discussed in this paper were first introduced in the mid 1950's, the use of these methods as a preprocessing step for complex models, like neural networks, has not been previously discussed in any literature.", "year": 2001, "venue": "SKDD", "authors": [ "D. Micci-Barreca" ], "externalIds": { "MAG": "1990836268", "DBLP": "journals/sigkdd/Micci-Barreca01", "DOI": "10.1145/507533.507538", "CorpusId": 207706862 }, "url": "https://www.semanticscholar.org/paper/f3f3054b04b4554f00a25a698393625c4f784bb5", "referenceCount": 14, "citationCount": 202, "influentialCitationCount": 14, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The Art of Data Augmentation", "abstract": "The term data augmentation refers to methods for constructing iterative optimization or sampling algorithms via the introduction of unobserved data or latent variables. For deterministic algorithms, the method was popularized in the general statistical community by the seminal article by Dempster, Laird, and Rubin on the EM algorithm for maximizing a likelihood function or, more generally, a posterior density. For stochastic algorithms, the method was popularized in the statistical literature by Tanner and Wong's Data Augmentation algorithm for posterior sampling and in the physics literature by Swendsen and Wang's algorithm for sampling from the Ising and Potts models and their generalizations; in the physics literature, the method of data augmentation is referred to as the method of auxiliary variables. Data augmentation schemes were used by Tanner and Wong to make simulation feasible and simple, while auxiliary variables were adopted by Swendsen and Wang to improve the speed of iterative simulation. In general, however, constructing data augmentation schemes that result in both simple and fast algorithms is a matter of art in that successful strategies vary greatly with the (observed-data) models being considered. After an overview of data augmentation/auxiliary variables and some recent developments in methods for constructing such efficient data augmentation schemes, we introduce an effective search strategy that combines the ideas of marginal augmentation and conditional augmentation, together with a deterministic approximation method for selecting good augmentation schemes. We then apply this strategy to three common classes of models (specifically, multivariate t, probit regression, and mixed-effects models) to obtain efficient Markov chain Monte Carlo algorithms for posterior sampling. We provide theoretical and empirical evidence that the resulting algorithms, while requiring similar programming effort, can show dramatic improvement over the Gibbs samplers commonly used for these models in practice. A key feature of all these new algorithms is that they are positive recurrent subchains of nonpositive recurrent Markov chains constructed in larger spaces.", "year": 2001, "venue": "", "authors": [ "D. V. van Dyk", "X. Meng" ], "externalIds": { "MAG": "2025183033", "DOI": "10.1198/10618600152418584", "CorpusId": 121929631 }, "url": "https://www.semanticscholar.org/paper/2ed57da0adc1909c8cba41b077ff780de6c34363", "referenceCount": 68, "citationCount": 985, "influentialCitationCount": 27, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Discriminatory Analysis - Nonparametric Discrimination: Consistency Properties", "abstract": "Abstract : The discrimination problem (two population case) may be defined as follows: e random variable Z, of observed value z, is distributed over some space (say, p-dimensional) either according to distribution F, or according to distribution G. The problem is to decide, on the basis of z, which of the two distributions Z has.", "year": 1989, "venue": "", "authors": [ "E. Fix", "J. Hodges" ], "externalIds": { "MAG": "1967320885", "DOI": "10.2307/1403797", "CorpusId": 120323383 }, "url": "https://www.semanticscholar.org/paper/088a6ae9149aa83d22b7cca1c542ce938f27cfe7", "referenceCount": 3, "citationCount": 3000, "influentialCitationCount": 132, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "$I$-Divergence Geometry of Probability Distributions and Minimization Problems", "abstract": "JSTOR is a not-for-profit service that helps scholars, researchers, and students discover, use, and build upon a wide range of content in a trusted digital archive. We use information technology and tools to increase productivity and facilitate new forms of scholarship. For more information about JSTOR, please contact support@jstor.org.. Institute of Mathematical Statistics is collaborating with JSTOR to digitize, preserve and extend access to The Annals of Probability. Some geometric properties of PD's are established, Kullback's I-divergence playing the role of squared Euclidean distance. The minimum discrimination information problem is viewed as that of projecting a PD onto a convex set of PD's and useful existence theorems for and characterizations of the minimizing PD are arrived at. A natural generalization of known iterative algorithms converging to the minimizing PD in special situations is given; even for those special cases, our convergence proof is more generally valid than those previously published. As corollaries of independent interest, generalizations of known results on the existence of PD's or nonnegative matrices of a certain form are obtained. The Lagrange multiplier technique is not used.", "year": 1975, "venue": "", "authors": [ "I. Csiszár" ], "externalIds": { "MAG": "2160709761", "DOI": "10.1214/AOP/1176996454", "CorpusId": 18053591 }, "url": "https://www.semanticscholar.org/paper/72b2aeeb76dbff312321ccbcc58e85009e0b57ae", "referenceCount": 25, "citationCount": 1681, "influentialCitationCount": 216, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Handbook of Methods of Applied Statistics:@@@Volume I: Techniques of Computation, Descriptive Methods, and Statistical Inference@@@Volume II: Planning of Surveys and Experiments.", "abstract": null, "year": 1968, "venue": "", "authors": [ "M. Karson", "I. Chakravarti", "R. Laha", "J. Roy" ], "externalIds": { "MAG": "2314046074", "DOI": "10.2307/2283899", "CorpusId": 123937589 }, "url": "https://www.semanticscholar.org/paper/922ee337c39b7f0423a383ba36dd73d136bdb508", "referenceCount": 0, "citationCount": 168, "influentialCitationCount": 15, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The Regression Analysis of Binary Sequences", "abstract": null, "year": 1958, "venue": "", "authors": [ "D. Cox" ], "externalIds": { "MAG": "2903950532", "DOI": "10.1111/J.2517-6161.1958.TB00292.X", "CorpusId": 125694386 }, "url": "https://www.semanticscholar.org/paper/7bd431157c5c5af038df34b8c28cc793cecdbb77", "referenceCount": 31, "citationCount": 1782, "influentialCitationCount": 98, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "GOGGLE: Generative Modelling for Tabular Data by Learning Relational Structure", "abstract": "Deep generative models learn highly complex and non-linear representations to generate realistic synthetic data. While they have achieved notable success in computer vision and natural language processing, similar advances have been less demonstrable in the tabular domain. This is partially because generative modelling of tabular data entails a particular set of challenges, including heterogeneous relationships, limited number of samples, and difficulties in incorporating prior knowledge. Additionally, unlike their counterparts in image and sequence domain, deep generative models for tabular data almost exclusively employ fully-connected layers, which encode weak inductive biases about relationships between inputs. Real-world data generating processes can often be represented using relational structures, which encode sparse, heterogeneous relationships between variables. In this work, we learn and exploit relational structure underlying tabular data (where typical dimensionality d < 100) to better model variable dependence, and as a natural means to introduce regularization on relationships and include prior knowledge. Specifically, we introduce GOGGLE, an end-to-end message passing scheme that jointly learns the relational structure and corresponding functional relationships as the basis of generating synthetic samples. Using real-world datasets, we provide empirical evidence that the proposed method is effective in generating realistic synthetic data and exploiting domain knowledge for downstream tasks.", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Tennison Liu", "Zhaozhi Qian", "Jeroen Berrevoets", "M. Schaar" ], "externalIds": { "DBLP": "conf/iclr/LiuQBS23", "CorpusId": 259298730 }, "url": "https://www.semanticscholar.org/paper/d41c2ecc159e545ef02dac198a6be7b066d9563d", "referenceCount": 87, "citationCount": 30, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Synthcity: a benchmark framework for diverse use cases of tabular synthetic data", "abstract": "Accessible high-quality data is the bread and butter of machine learning research, 1 and the demand for data has exploded as larger and more advanced ML models are 2 built across different domains. Yet, real data often contain sensitive information, 3 subject to various biases, and are costly to acquire, which compromise their quality 4 and accessibility. Synthetic data have thus emerged as a complement, sometimes 5 even a replacement, to real data for ML training. However, the landscape of 6 synthetic data research has been fragmented due to the large number of data", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Zhaozhi Qian", "Robert Davis", "M. Schaar" ], "externalIds": { "DBLP": "conf/nips/QianDS23", "CorpusId": 268030698 }, "url": "https://www.semanticscholar.org/paper/ac13ab1880e8a3d4fe21c20cf29d1c5c8ef3258b", "referenceCount": 78, "citationCount": 8, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Why do tree-based models still outperform deep learning on typical tabular data?", "abstract": "While deep learning has enabled tremendous progress on text and image datasets, its superiority on tabular data is not clear. We contribute extensive benchmarks of standard and novel deep learning methods as well as tree-based models such as XGBoost and Random Forests, across a large number of datasets and hyperparameter combinations. We define a standard set of 45 datasets from varied domains with clear characteristics of tabular data and a benchmarking methodology accounting for both fitting models and finding good hyperparameters. Results show that tree-based models remain state-of-the-art on medium-sized data ( ∼ 10K samples) even without accounting for their superior speed. To understand this gap, we conduct an empirical investigation into the differing inductive biases of tree-based models and Neural Networks (NNs). This leads to a series of challenges which should guide researchers aiming to build tabular-specific NNs: 1. be robust to uninformative features, 2. preserve the orientation of the data, and 3. be able to easily learn irregular functions. To stimulate research on tabular architectures, we contribute a standard benchmark and raw data for baselines: every point of a 20 000 compute hours hyperparameter search for each learner. Results Looking at the results as a function of random search time rather than random search iterations tree-based models superiority even more striking. Neural networks and tree-based models were close for some benchmarks after a small number of iterations, but for the same amount of time spent on random search, tree-based models scores are always high above neural networks.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Léo Grinsztajn", "Edouard Oyallon", "G. Varoquaux" ], "externalIds": { "DBLP": "conf/nips/GrinsztajnOV22", "CorpusId": 252697222 }, "url": "https://www.semanticscholar.org/paper/5a00b32876f7d4869bce980500d4ccc978389315", "referenceCount": 72, "citationCount": 444, "influentialCitationCount": 47, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Effective data generation for imbalanced learning using conditional generative adversarial networks", "abstract": null, "year": 2018, "venue": "Expert systems with applications", "authors": [ "G. Douzas", "F. Bação" ], "externalIds": { "MAG": "2756182389", "DBLP": "journals/eswa/DouzasB18", "DOI": "10.1016/J.ESWA.2017.09.030", "CorpusId": 27011880 }, "url": "https://www.semanticscholar.org/paper/4f8c4fd0174c75cafc31b66812da61840c6454f2", "referenceCount": 40, "citationCount": 425, "influentialCitationCount": 16, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Biomedical data privacy: problems, perspectives, and recent advances", "abstract": "The notion of privacy in the healthcare domain is at least as old as the ancient Greeks. Several decades ago, as electronic medical record (EMR) systems began to take hold, the necessity of patient privacy was recognized as a core principle, or even a right, that must be upheld.1 ,2 This belief was re-enforced as computers and EMRs became more common in clinical environments.3–5 However, the arrival of ultra-cheap data collection and processing technologies is fundamentally changing the face of healthcare. The traditional boundaries of primary and tertiary care environments are breaking down and health information is increasingly collected through mobile devices,6 in personal domains (eg, in one's home7), and from sensors attached on or in the human body (eg, body area networks8–10). At the same time, the detail and diversity of information collected in the context of healthcare and biomedical research is increasing at an unprecedented rate, with clinical and administrative health data being complemented with a range of *omics data, where genomics11 and proteomics12 are currently leading the charge, with other types of molecular data on the horizon.13 Healthcare organizations (HCOs) are adopting and adapting information technologies to support an expanding array of activities designed to derive value from these growing data archives, in terms of enhanced health outcomes.14\n\nThe ready availability of such large volumes of detailed data has also been accompanied by privacy invasions. Recent breach notification laws at the US federal and state levels have brought to the public's attention the scope and frequency of these invasions. For example, there are cases of healthcare provider snooping on the medical records of famous people, family, and friends, use of personal information for identity fraud, and millions of records disclosed through lost and … \n\nCorrespondence to Dr Bradley Malin, Department of Biomedical Informatics, Vanderbilt University, 2525 West End Avenue, Suite 600, Nashville, TN 37203, USA; b.malin{at}vanderbilt.edu", "year": 2013, "venue": "J. Am. Medical Informatics Assoc.", "authors": [ "B. Malin", "K. Emam", "C. O'Keefe" ], "externalIds": { "MAG": "2115883395", "DBLP": "journals/jamia/MalinEO13", "DOI": "10.1136/amiajnl-2012-001509", "CorpusId": 16169848, "PubMed": "23221359" }, "url": "https://www.semanticscholar.org/paper/ce1974e7c1abd4c4143c7a87512674bdcbf9e772", "referenceCount": 73, "citationCount": 137, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "A Tutorial on Energy-Based Learning", "abstract": "Energy-Based Models (EBMs) capture dependencies between variables by associating a scalar energy to each configuration of the variab les. Inference consists in clamping the value of observed variables and finding config urations of the remaining variables that minimize the energy. Learning consists in finding an energy function in which observed configurations of the variables a re given lower energies than unobserved ones. The EBM approach provides a common theoretical framework for many learning models, including traditional discr iminative and generative approaches, as well as graph-transformer networks, co nditional random fields, maximum margin Markov networks, and several manifold learning methods. Probabilistic models must be properly normalized, which sometimes requires evaluating intractable integrals over the space of all poss ible variable configurations. Since EBMs have no requirement for proper normalization, this problem is naturally circumvented. EBMs can be viewed as a form of non-probabilistic factor graphs, and they provide considerably more flexibility in th e design of architectures and training criteria than probabilistic approaches .", "year": 2006, "venue": "", "authors": [ "Yann LeCun", "S. Chopra", "R. Hadsell", "Aurelio Ranzato", "Fu Jie Huang" ], "externalIds": { "MAG": "2161914416", "CorpusId": 8531544 }, "url": "https://www.semanticscholar.org/paper/7fc604e1a3e45cd2d2742f96d62741930a363efa", "referenceCount": 73, "citationCount": 1345, "influentialCitationCount": 118, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "SMOTE: Synthetic Minority Over-sampling Technique", "abstract": "An approach to the construction of classifiers from imbalanced datasets is described. A dataset is imbalanced if the classification categories are not approximately equally represented. Often real-world data sets are predominately composed of \"normal\" examples with only a small percentage of \"abnormal\" or \"interesting\" examples. It is also the case that the cost of misclassifying an abnormal (interesting) example as a normal example is often much higher than the cost of the reverse error. Under-sampling of the majority (normal) class has been proposed as a good means of increasing the sensitivity of a classifier to the minority class. This paper shows that a combination of our method of oversampling the minority (abnormal)cla ss and under-sampling the majority (normal) class can achieve better classifier performance (in ROC space)tha n only under-sampling the majority class. This paper also shows that a combination of our method of over-sampling the minority class and under-sampling the majority class can achieve better classifier performance (in ROC space)t han varying the loss ratios in Ripper or class priors in Naive Bayes. Our method of over-sampling the minority class involves creating synthetic minority class examples. Experiments are performed using C4.5, Ripper and a Naive Bayes classifier. The method is evaluated using the area under the Receiver Operating Characteristic curve (AUC)and the ROC convex hull strategy.", "year": 2002, "venue": "Journal of Artificial Intelligence Research", "authors": [ "N. Chawla", "K. Bowyer", "L. Hall", "W. Kegelmeyer" ], "externalIds": { "ArXiv": "1106.1813", "DBLP": "journals/corr/abs-1106-1813", "MAG": "3100785508", "DOI": "10.1613/jair.953", "CorpusId": 1554582 }, "url": "https://www.semanticscholar.org/paper/8cb44f06586f609a29d9b496cc752ec01475dffe", "referenceCount": 42, "citationCount": 22523, "influentialCitationCount": 2431, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Random Forests", "abstract": null, "year": 2001, "venue": "Machine-mediated learning", "authors": [ "L. Breiman" ], "externalIds": { "MAG": "2911964244", "DBLP": "reference/ml/X17sy", "DOI": "10.1023/A:1010933404324", "CorpusId": 89141 }, "url": "https://www.semanticscholar.org/paper/8e0be569ea77b8cb29bb0e8b031887630fe7a96c", "referenceCount": 25, "citationCount": 89809, "influentialCitationCount": 5836, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "What exactly has tabpfn learned to do?", "abstract": null, "year": null, "venue": "ICLR Blogposts", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Refereeing the Referees: Evaluating Two-Sample Tests for Validating Generators in Precision Sciences": { "paper_title": "Refereeing the Referees: Evaluating Two-Sample Tests for Validating Generators in Precision Sciences", "arxiv_id": "2409.16336v1", "keyword": "generative model", "authors": [ "Samuele Grossi", "Marco Letizia", "Riccardo Torre" ], "references": [ { "title": "JetNet: A Python package for accessing open datasets and benchmarking machine learning methods in high energy physics", "abstract": null, "year": 2023, "venue": "Journal of Open Source Software", "authors": [ "R. Kansal", "Carlos Pareja", "Zichun Hao", "J. Duarte" ], "externalIds": { "DBLP": "journals/jossw/KansalPHD23", "DOI": "10.21105/joss.05789", "CorpusId": 264827225 }, "url": "https://www.semanticscholar.org/paper/efa5f75ffc6c5061bfffcfa7367ceadc928e54b6", "referenceCount": 20, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Goodness of fit by Neyman-Pearson testing", "abstract": "The Neyman-Pearson strategy for hypothesis testing can be employed for goodness of fit if the alternative hypothesis is selected from data by exploring a rich parametrised family of models, while controlling the impact of statistical fluctuations. The New Physics Learning Machine (NPLM) methodology has been developed as a concrete implementation of this idea, to target the detection of new physical effects in the context of high energy physics collider experiments. In this paper we conduct a comparison of this approach to goodness of fit with others, in particular with classifier-based strategies that share strong similarities with NPLM. From our comparison, NPLM emerges as the more sensitive test to small departures of the data from the expected distribution and not biased towards detecting specific types of anomalies. These features make it suited for agnostic searches for new physics at collider experiments. Its deployment in other scientific and industrial scenarios should be investigated.", "year": 2023, "venue": "SciPost Physics", "authors": [ "G. Grosso", "M. Letizia", "M. Pierini", "A. Wulzer" ], "externalIds": { "ArXiv": "2305.14137", "DOI": "10.21468/SciPostPhys.16.5.123", "CorpusId": 258841643 }, "url": "https://www.semanticscholar.org/paper/29ad4357374ca7ad106fccd9a4fafd8b90078e1f", "referenceCount": 26, "citationCount": 5, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Physics", "Mathematics" ] }, { "title": "Evaluating generative models in high energy physics", "abstract": "There has been a recent explosion in research into machine-learning-based generative modeling to tackle computational challenges for simulations in high energy physics (HEP). In order to use such alternative simulators in practice, we need well-defined metrics to compare different generative models and evaluate their discrepancy from the true distributions. We present the first systematic review and investigation into evaluation metrics and their sensitivity to failure modes of generative models, using the framework of two-sample goodness-of-fit testing, and their relevance and viability for HEP. Inspired by previous work in both physics and computer vision, we propose two new metrics, the Fr\\'echet and kernel physics distances (FPD and KPD, respectively), and perform a variety of experiments measuring their performance on simple Gaussian-distributed, and simulated high energy jet datasets. We find FPD, in particular, to be the most sensitive metric to all alternative jet distributions tested and recommend its adoption, along with the KPD and Wasserstein distances between individual feature distributions, for evaluating generative models in HEP. We finally demonstrate the efficacy of these proposed metrics in evaluating and comparing a novel attention-based generative adversarial particle transformer to the state-of-the-art message-passing generative adversarial network jet simulation model. The code for our proposed metrics is provided in the open source JetNet Python library.", "year": 2022, "venue": "Physical Review D", "authors": [ "R. Kansal", "Anni Li", "Javier Mauricio Duarte", "N. Chernyavskaya", "M. Pierini", "B. Orzari", "T. Tomei" ], "externalIds": { "ArXiv": "2211.10295", "DOI": "10.1103/PhysRevD.107.076017", "CorpusId": 258280799 }, "url": "https://www.semanticscholar.org/paper/f3bed1872353534260c5cacfdcec13d5a6e08f64", "referenceCount": 75, "citationCount": 28, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Physics", "Computer Science", "Mathematics" ] }, { "title": "Learning new physics efficiently with nonparametric methods", "abstract": null, "year": 2022, "venue": "The European Physical Journal C", "authors": [ "M. Letizia", "Gianvito Losapio", "Marco Rando", "G. Grosso", "A. Wulzer", "M. Pierini", "M. Zanetti", "L. Rosasco" ], "externalIds": { "DBLP": "journals/corr/abs-2204-02317", "ArXiv": "2204.02317", "PubMedCentral": "9534824", "DOI": "10.1140/epjc/s10052-022-10830-y", "CorpusId": 247957947, "PubMed": "36212113" }, "url": "https://www.semanticscholar.org/paper/bd094fa5823db9ca5f20faa75b253d228f6dc803", "referenceCount": 65, "citationCount": 25, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Physics", "Computer Science", "Medicine" ] }, { "title": "Testing the boundaries: Normalizing Flows for higher dimensional data sets", "abstract": "Normalizing Flows (NFs) are emerging as a powerful class of generative models, as they not only allow for efficient sampling, but also deliver, by construction, density estimation. They are of great potential usage in High Energy Physics (HEP), where complex high dimensional data and probability distributions are everyday’s meal. However, in order to fully leverage the potential of NFs it is crucial to explore their robustness as data dimensionality increases. Thus, in this contribution, we discuss the performances of some of the most popular types of NFs on the market, on some toy data sets with increasing number of dimensions.", "year": 2022, "venue": "Journal of Physics: Conference Series", "authors": [ "H. Reyes-González", "R. Torre" ], "externalIds": { "ArXiv": "2202.09188", "DBLP": "journals/corr/abs-2202-09188", "DOI": "10.1088/1742-6596/2438/1/012155", "CorpusId": 246996840 }, "url": "https://www.semanticscholar.org/paper/bbbdc687f15b1bc14ba8a39e7aeb3fd6cdb2052e", "referenceCount": 4, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science", "Physics" ] }, { "title": "Pros and Cons of GAN Evaluation Measures: New Developments", "abstract": null, "year": 2021, "venue": "Computer Vision and Image Understanding", "authors": [ "A. Borji" ], "externalIds": { "DBLP": "journals/cviu/Borji22", "ArXiv": "2103.09396", "DOI": "10.1016/j.cviu.2021.103329", "CorpusId": 232257836 }, "url": "https://www.semanticscholar.org/paper/4a3ea0abfb4988a44638c534416fd66c6d15e9d3", "referenceCount": 93, "citationCount": 225, "influentialCitationCount": 12, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ATLAS HL-LHC Computing Conceptual Design Report", "abstract": null, "year": 2020, "venue": "", "authors": [ "P. Calafiura", "J. Catmore", "D. Costanzo", "A. Girolamo" ], "externalIds": { "MAG": "3100726162", "CorpusId": 229002932 }, "url": "https://www.semanticscholar.org/paper/cf01eb4cbdd556866d0eac3a26759742728f4321", "referenceCount": 76, "citationCount": 42, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning multivariate new physics", "abstract": null, "year": 2019, "venue": "The European Physical Journal C", "authors": [ "R. D’Agnolo", "G. Grosso", "M. Pierini", "A. Wulzer", "M. Zanetti" ], "externalIds": { "ArXiv": "1912.12155", "MAG": "2997822613", "DOI": "10.1140/epjc/s10052-021-08853-y", "CorpusId": 209500635 }, "url": "https://www.semanticscholar.org/paper/a52f4b412fa728516038b52e9be8c2b870c5b13e", "referenceCount": 91, "citationCount": 74, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Physics" ] }, { "title": "Effectively Unbiased FID and Inception Score and Where to Find Them", "abstract": "This paper shows that two commonly used evaluation metrics for generative models, the Fréchet Inception Distance (FID) and the Inception Score (IS), are biased -- the expected value of the score computed for a finite sample set is not the true value of the score. Worse, the paper shows that the bias term depends on the particular model being evaluated, so model A may get a better score than model B simply because model A's bias term is smaller. This effect cannot be fixed by evaluating at a fixed number of samples. This means all comparisons using FID or IS as currently computed are unreliable. We then show how to extrapolate the score to obtain an effectively bias-free estimate of scores computed with an infinite number of samples, which we term FID Infinity and IS Infinity. In turn, this effectively bias-free estimate requires good estimates of scores with a finite number of samples. We show that using Quasi-Monte Carlo integration notably improves estimates of FID and IS for finite sample sets. Our extrapolated scores are simple, drop-in replacements for the finite sample scores. Additionally, we show that using low discrepancy sequence in GAN training offers small improvements in the resulting generator.", "year": 2019, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Min Jin Chong", "D. Forsyth" ], "externalIds": { "MAG": "2985516053", "DBLP": "conf/cvpr/ChongF20", "ArXiv": "1911.07023", "DOI": "10.1109/cvpr42600.2020.00611", "CorpusId": 208138613 }, "url": "https://www.semanticscholar.org/paper/d7015a7c12550b5138181f4d054ea7dbe9022bed", "referenceCount": 48, "citationCount": 158, "influentialCitationCount": 18, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "The DNNLikelihood: enhancing likelihood distribution with Deep Learning", "abstract": null, "year": 2019, "venue": "The European Physical Journal C", "authors": [ "A. Coccaro", "M. Pierini", "L. Silvestrini", "R. Torre" ], "externalIds": { "MAG": "3044273297", "ArXiv": "1911.03305", "DOI": "10.1140/epjc/s10052-020-8230-1", "CorpusId": 207847917 }, "url": "https://www.semanticscholar.org/paper/702d633d1496b96a42a80f9997cf3a5e1883b440", "referenceCount": 81, "citationCount": 18, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Physics" ] }, { "title": "A Style-Based Generator Architecture for Generative Adversarial Networks", "abstract": "We propose an alternative generator architecture for generative adversarial networks, borrowing from style transfer literature. The new architecture leads to an automatically learned, unsupervised separation of high-level attributes (e.g., pose and identity when trained on human faces) and stochastic variation in the generated images (e.g., freckles, hair), and it enables intuitive, scale-specific control of the synthesis. The new generator improves the state-of-the-art in terms of traditional distribution quality metrics, leads to demonstrably better interpolation properties, and also better disentangles the latent factors of variation. To quantify interpolation quality and disentanglement, we propose two new, automated methods that are applicable to any generator architecture. Finally, we introduce a new, highly varied and high-quality dataset of human faces.", "year": 2018, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Tero Karras", "S. Laine", "Timo Aila" ], "externalIds": { "DBLP": "journals/corr/abs-1812-04948", "MAG": "2904367110", "ArXiv": "1812.04948", "DOI": "10.1109/CVPR.2019.00453", "CorpusId": 54482423 }, "url": "https://www.semanticscholar.org/paper/ceb2ebef0b41e31c1a21b28c2734123900c005e2", "referenceCount": 65, "citationCount": 8967, "influentialCitationCount": 1761, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Learning new physics from a machine", "abstract": "We propose using neural networks to detect data departures from a given reference model, with no prior bias on the nature of the new physics responsible for the discrepancy. The virtues of neural networks as unbiased function approximants make them particularly suited for this task. An algorithm that implements this idea is constructed, as a straightforward application of the likelihood-ratio hypothesis test. The algorithm compares observations with an auxiliary set of reference-distributed events, possibly obtained with a Monte Carlo event generator. It returns a $p$ value, which measures the compatibility of the reference model with the data. It also identifies the most discrepant phase-space region of the data set, to be selected for further investigation. The most interesting potential applications are model-independent new physics searches, although our approach could also be used to compare the theoretical predictions of different Monte Carlo event generators, or for data validation algorithms. In this work we study the performance of our algorithm on a few simple examples. The results confirm the model independence of the approach, namely that it displays good sensitivity to a variety of putative signals. Furthermore, we show that the reach does not depend much on whether a favorable signal region is selected based on prior expectations. We identify directions for improvement towards applications to real experimental data sets.", "year": 2018, "venue": "Physical Review D", "authors": [ "R. D’Agnolo", "A. Wulzer" ], "externalIds": { "ArXiv": "1806.02350", "MAG": "2950029982", "DOI": "10.1103/PhysRevD.99.015014", "CorpusId": 50807342 }, "url": "https://www.semanticscholar.org/paper/c71f9dbd705b606b96c0b92aaf9c07c93beecf20", "referenceCount": 79, "citationCount": 155, "influentialCitationCount": 8, "isOpenAccess": true, "fieldsOfStudy": [ "Physics" ] }, { "title": "Computational Optimal Transport", "abstract": "Optimal transport (OT) theory can be informally described using the words of the French mathematician Gaspard Monge (1746-1818): A worker with a shovel in hand has to move a large pile of sand lying on a construction site. The goal of the worker is to erect with all that sand a target pile with a prescribed shape (for example, that of a giant sand castle). Naturally, the worker wishes to minimize her total effort, quantified for instance as the total distance or time spent carrying shovelfuls of sand. Mathematicians interested in OT cast that problem as that of comparing two probability distributions, two different piles of sand of the same volume. They consider all of the many possible ways to morph, transport or reshape the first pile into the second, and associate a \"global\" cost to every such transport, using the \"local\" consideration of how much it costs to move a grain of sand from one place to another. Recent years have witnessed the spread of OT in several fields, thanks to the emergence of approximate solvers that can scale to sizes and dimensions that are relevant to data sciences. Thanks to this newfound scalability, OT is being increasingly used to unlock various problems in imaging sciences (such as color or texture processing), computer vision and graphics (for shape manipulation) or machine learning (for regression, classification and density fitting). This short book reviews OT with a bias toward numerical methods and their applications in data sciences, and sheds lights on the theoretical properties of OT that make it particularly useful for some of these applications.", "year": 2018, "venue": "Found. Trends Mach. Learn.", "authors": [ "G. Peyré", "Marco Cuturi" ], "externalIds": { "MAG": "2963820353", "ArXiv": "1803.00567", "DBLP": "journals/ftml/PeyreC19", "DOI": "10.1561/2200000073", "CorpusId": 73725148 }, "url": "https://www.semanticscholar.org/paper/8e51d68250db5637cd6bc1de98a99396441399b2", "referenceCount": 445, "citationCount": 1881, "influentialCitationCount": 199, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "GANs Trained by a Two Time-Scale Update Rule Converge to a Local Nash Equilibrium", "abstract": "Generative Adversarial Networks (GANs) excel at creating realistic images with complex models for which maximum likelihood is infeasible. However, the convergence of GAN training has still not been proved. We propose a two time-scale update rule (TTUR) for training GANs with stochastic gradient descent on arbitrary GAN loss functions. TTUR has an individual learning rate for both the discriminator and the generator. Using the theory of stochastic approximation, we prove that the TTUR converges under mild assumptions to a stationary local Nash equilibrium. The convergence carries over to the popular Adam optimization, for which we prove that it follows the dynamics of a heavy ball with friction and thus prefers flat minima in the objective landscape. For the evaluation of the performance of GANs at image generation, we introduce the \"Frechet Inception Distance\" (FID) which captures the similarity of generated images to real ones better than the Inception Score. In experiments, TTUR improves learning for DCGANs and Improved Wasserstein GANs (WGAN-GP) outperforming conventional GAN training on CelebA, CIFAR-10, SVHN, LSUN Bedrooms, and the One Billion Word Benchmark.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "M. Heusel", "Hubert Ramsauer", "Thomas Unterthiner", "Bernhard Nessler", "Sepp Hochreiter" ], "externalIds": { "MAG": "2963981733", "DBLP": "conf/nips/HeuselRUNH17", "CorpusId": 326772 }, "url": "https://www.semanticscholar.org/paper/231af7dc01a166cac3b5b01ca05778238f796e41", "referenceCount": 62, "citationCount": 10842, "influentialCitationCount": 2301, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Sliced and Radon Wasserstein Barycenters of Measures", "abstract": null, "year": 2014, "venue": "Journal of Mathematical Imaging and Vision", "authors": [ "Nicolas Bonneel", "J. Rabin", "G. Peyré", "H. Pfister" ], "externalIds": { "DBLP": "journals/jmiv/BonneelRPP15", "MAG": "2019106840", "DOI": "10.1007/s10851-014-0506-3", "CorpusId": 1907942 }, "url": "https://www.semanticscholar.org/paper/d60efe595605a9fcc41f7b705d46b27b0caccbce", "referenceCount": 36, "citationCount": 476, "influentialCitationCount": 55, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "A Kernel Two-Sample Test", "abstract": "We propose a framework for analyzing and comparing distributions, which we use to construct statistical tests to determine if two samples are drawn from different distributions. Our test statistic is the largest difference in expectations over functions in the unit ball of a reproducing kernel Hilbert space (RKHS), and is called the maximum mean discrepancy (MMD).We present two distribution free tests based on large deviation bounds for the MMD, and a third test based on the asymptotic distribution of this statistic. The MMD can be computed in quadratic time, although efficient linear time approximations are available. Our statistic is an instance of an integral probability metric, and various classical metrics on distributions are obtained when alternative function classes are used in place of an RKHS. We apply our two-sample tests to a variety of problems, including attribute matching for databases using the Hungarian marriage method, where they perform strongly. Excellent performance is also obtained when comparing distributions over graphs, for which these are the first such tests.", "year": 2012, "venue": "Journal of machine learning research", "authors": [ "A. Gretton", "Karsten M. Borgwardt", "M. Rasch", "B. Scholkopf", "Alex Smola" ], "externalIds": { "MAG": "2212660284", "DBLP": "journals/jmlr/GrettonBRSS12", "DOI": "10.5555/2503308.2188410", "CorpusId": 10742222 }, "url": "https://www.semanticscholar.org/paper/225f78ae8a44723c136646044fd5c5d7f1d3d15a", "referenceCount": 96, "citationCount": 4560, "influentialCitationCount": 977, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Wasserstein Barycenter and Its Application to Texture Mixing", "abstract": null, "year": 2011, "venue": "Scale Space and Variational Methods in Computer Vision", "authors": [ "J. Rabin", "G. Peyré", "J. Delon", "M. Bernot" ], "externalIds": { "MAG": "1639961155", "DBLP": "conf/scalespace/RabinPDB11", "DOI": "10.1007/978-3-642-24785-9_37", "CorpusId": 3571438 }, "url": "https://www.semanticscholar.org/paper/9b208891d1287ebb5b84ac801b41c3313d7e3303", "referenceCount": 38, "citationCount": 624, "influentialCitationCount": 68, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Asymptotic formulae for likelihood-based tests of new physics", "abstract": null, "year": 2010, "venue": "", "authors": [ "G. Cowan", "K. Cranmer", "E. Gross", "O. Vitells" ], "externalIds": { "MAG": "2952098880", "ArXiv": "1007.1727", "DOI": "10.1140/epjc/s10052-011-1554-0", "CorpusId": 17033719 }, "url": "https://www.semanticscholar.org/paper/c9c01a9d6c683399392550227b2b28640dda0375", "referenceCount": 28, "citationCount": 3820, "influentialCitationCount": 199, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Physics" ] }, { "title": "Hilbert Space Embeddings and Metrics on Probability Measures", "abstract": "A Hilbert space embedding for probability measures has recently been proposed, with applications including dimensionality reduction, homogeneity testing, and independence testing. This embedding represents any probability measure as a mean element in a reproducing kernel Hilbert space (RKHS). A pseudometric on the space of probability measures can be defined as the distance between distribution embeddings: we denote this as γk, indexed by the kernel function k that defines the inner product in the RKHS. \n \nWe present three theoretical properties of γk. First, we consider the question of determining the conditions on the kernel k for which γk is a metric: such k are denoted characteristic kernels. Unlike pseudometrics, a metric is zero only when two distributions coincide, thus ensuring the RKHS embedding maps all distributions uniquely (i.e., the embedding is injective). While previously published conditions may apply only in restricted circumstances (e.g., on compact domains), and are difficult to check, our conditions are straightforward and intuitive: integrally strictly positive definite kernels are characteristic. Alternatively, if a bounded continuous kernel is translation-invariant on ℜd, then it is characteristic if and only if the support of its Fourier transform is the entire ℜd. Second, we show that the distance between distributions under γk results from an interplay between the properties of the kernel and the distributions, by demonstrating that distributions are close in the embedding space when their differences occur at higher frequencies. Third, to understand the nature of the topology induced by γk, we relate γk to other popular metrics on probability measures, and present conditions on the kernel k under which γk metrizes the weak topology.", "year": 2009, "venue": "Journal of machine learning research", "authors": [ "Bharath K. Sriperumbudur", "A. Gretton", "K. Fukumizu", "B. Scholkopf", "Gert R. G. Lanckriet" ], "externalIds": { "MAG": "2124331852", "DBLP": "journals/jmlr/SriperumbudurGFSL10", "ArXiv": "0907.5309", "DOI": "10.5555/1756006.1859901", "CorpusId": 6107563 }, "url": "https://www.semanticscholar.org/paper/f2fdb43f594b9ae0c32e1d52cf2d6b82dfe46dc3", "referenceCount": 65, "citationCount": 700, "influentialCitationCount": 73, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "A Kernel Method for the Two-Sample-Problem", "abstract": "We propose two statistical tests to determine if two samples are from different distributions. Our test statistic is in both cases the distance between the means of the two samples mapped into a reproducing kernel Hilbert space (RKHS). The first test is based on a large deviation bound for the test statistic, while the second is based on the asymptotic distribution of this statistic. The test statistic can be computed in O(m2) time. We apply our approach to a variety of problems, including attribute matching for databases using the Hungarian marriage method, where our test performs strongly. We also demonstrate excellent performance when comparing distributions over graphs, for which no alternative tests currently exist.", "year": 2006, "venue": "Neural Information Processing Systems", "authors": [ "A. Gretton", "Karsten M. Borgwardt", "M. Rasch", "B. Scholkopf", "Alex Smola" ], "externalIds": { "MAG": "2950536412", "DBLP": "journals/corr/abs-0805-2368", "ArXiv": "0805.2368", "DOI": "10.7551/mitpress/7503.003.0069", "CorpusId": 1993257 }, "url": "https://www.semanticscholar.org/paper/9bca4d7b932e0854c3325f1578cfd17341dd8ea8", "referenceCount": 87, "citationCount": 2192, "influentialCitationCount": 386, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "All of Statistics: A Concise Course in Statistical Inference", "abstract": "WINNER OF THE 2005 DEGROOT PRIZE! This book is for people who want to learn probability and statistics quickly. It brings together many of the main ideas in modern statistics in one place. The book is suitable for students and researchers in statistics, computer science, data mining and machine learning. This book covers a much wider range of topics than a typical introductory text on mathematical statistics. It includes modern topics like nonparametric curve estimation, bootstrapping and classification, topics that are usually relegated to follow-up courses. The reader is assumed to know calculus and a little linear algebra. No previous knowledge of probability and statistics is required. The text can be used at the advanced undergraduate and graduate level.", "year": 2004, "venue": "", "authors": [ "L. Wasserman" ], "externalIds": { "MAG": "1496357020", "CorpusId": 60826655 }, "url": "https://www.semanticscholar.org/paper/b3f8348133c1d2f76f1dc1272f748a0b28874d80", "referenceCount": 109, "citationCount": 1929, "influentialCitationCount": 195, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Topics in Optimal Transportation", "abstract": "Introduction The Kantorovich duality Geometry of optimal transportation Brenier's polar factorization theorem The Monge-Ampere equation Displacement interpolation and displacement convexity Geometric and Gaussian inequalities The metric side of optimal transportation A differential point of view on optimal transportation Entropy production and transportation inequalities Problems Bibliography Table of short statements Index.", "year": 2003, "venue": "", "authors": [ "C. Villani" ], "externalIds": { "MAG": "1585160083", "DOI": "10.1090/gsm/058", "CorpusId": 118448577 }, "url": "https://www.semanticscholar.org/paper/59f8451586fba8e9a7c00daace8f8fc62d795b1e", "referenceCount": 0, "citationCount": 4896, "influentialCitationCount": 723, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "A multivariate Kolmogorov-Smirnov test of goodness of fit", "abstract": null, "year": 1997, "venue": "", "authors": [ "A. Justel", "D. Peña", "R. Zamar" ], "externalIds": { "MAG": "1972571116", "DOI": "10.1016/S0167-7152(97)00020-5", "CorpusId": 53506767 }, "url": "https://www.semanticscholar.org/paper/97fbceb3be4503b535770f3a2fd2f62cf71eb624", "referenceCount": 22, "citationCount": 481, "influentialCitationCount": 29, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "A Distribution Free Version of the Smirnov Two Sample Test in the $p$-Variate Case", "abstract": null, "year": 1969, "venue": "", "authors": [ "P. Bickel" ], "externalIds": { "MAG": "2002190927", "DOI": "10.1214/AOMS/1177697800", "CorpusId": 120515991 }, "url": "https://www.semanticscholar.org/paper/0e4524b3d869d8818a74d9aee889bc4e4584c336", "referenceCount": 18, "citationCount": 157, "influentialCitationCount": 15, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "The Kolmogorov-Smirnov Test for Goodness of Fit", "abstract": "Abstract The test is based on the maximum difference between an empirical and a hypothetical cumulative distribution. Percentage points are tabled, and a lower bound to the power function is charted. Confidence limits for a cumulative distribution are described. Examples are given. Indications that the test is superior to the chi-square test are cited.", "year": 1951, "venue": "", "authors": [ "F. Massey" ], "externalIds": { "MAG": "2166481425", "DOI": "10.2307/2280095", "CorpusId": 209842899 }, "url": "https://www.semanticscholar.org/paper/c57a214bfbaaf4b56844ccebf31694f3e2564829", "referenceCount": 11, "citationCount": 5972, "influentialCitationCount": 293, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Table for Estimating the Goodness of Fit of Empirical Distributions", "abstract": null, "year": 1948, "venue": "", "authors": [ "N. Smirnov" ], "externalIds": { "MAG": "1974971448", "DOI": "10.1214/AOMS/1177730256", "CorpusId": 120842954 }, "url": "https://www.semanticscholar.org/paper/91e455e20865eba0e93a12fd8080ad640584b133", "referenceCount": 0, "citationCount": 1771, "influentialCitationCount": 125, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "The Large-Sample Distribution of the Likelihood Ratio for Testing Composite Hypotheses", "abstract": null, "year": 1938, "venue": "", "authors": [ "S. S. Wilks" ], "externalIds": { "MAG": "2019841176", "DOI": "10.1214/AOMS/1177732360", "CorpusId": 121129475 }, "url": "https://www.semanticscholar.org/paper/44c28138fb1b4f6ab76a72cca59e78e8b1a2e269", "referenceCount": 0, "citationCount": 3151, "influentialCitationCount": 200, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Optimal Transport for Applied Mathematicians", "abstract": null, "year": 2015, "venue": "", "authors": [ "F. Santambrogio" ], "externalIds": { "MAG": "2502030283", "DOI": "10.1007/978-3-319-20828-2", "CorpusId": 123894440 }, "url": "https://www.semanticscholar.org/paper/adc6dcb0c9ed016fb804ed3572460d312962d8d3", "referenceCount": 0, "citationCount": 1660, "influentialCitationCount": 273, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "An Introduction to the Bootstrap", "abstract": "15 Empirical Bayes Method, 2nd edition J.S. Maritz and T. Lwin (1989) Symmetric Multivariate and Related Distributions K.-T. Fang, S. Kotz and K. Ng (1989) Ieneralized Linear Models, 2nd edition P. McCullagh and J.A. Neider (1989) 38 Cyclic Designs J.A. John (1987) 39 Analog Estimation Methods in Econometrics C.F. Manski (1988) 40 Subset Selection in Regression A.J. Miller (1990) 41 Analysis of Repeated Measures M. Crowder and D .J. Hand (1990) 42 Statistical Reasoning with Imprecise Probabilities P. Walley (1990) ~3 Generalized Additive Models T.J. Hastie and R.J. Tibshirani (1990) lnspection Errors for Attributes in Quality Control N.L. Johnson, S. Kotz and x. Wu (1991) ·5 The Analysis of Contingency Tables, 2nd edition B.S. Everitt (1992) 46 The Analysis of Quantal Response Data B.f.T. Morgan (1992) 47 Longitudinal Data with Serial Correlation: A State-Space Approach R.H. Jones (1993) : Differential Geometry and Statistics M.K. Murray and f. W. Rice (1993) 49 Markov Models and Optimization M.H.A. Davies (1993) 50 Chaos and Networks: Statistical and Probabilistic Aspects Edited by O. Barndorff-Nielsen et al. (1993) Number Theoretic Methods in Statistics K.-T. Fang and W. Yuan (1993) 2 Inference and Asymptotics O. Barndorff-Nielsen and D.R. Cox (1993) ;3 Practical Risk Theory for Actuaries C.D. Daykin, T. Pentikainen and M. Pesonen (1993) 54 Statistical Concepts and Applications in Medicine f. Aitchison and I.f. Lauder (1994) 55 Predictive Inference S. Geisser (1993) 56 Model-Free Curve Estimation M. Tarter and M. Lock (1993) 57 An Introduction to the Bootstrap B. Efron and R . Tibshirani (1993) (Full details concerning this series are available from the Publishers.) An Introduction to the Bootstrap", "year": 2007, "venue": "", "authors": [], "externalIds": { "CorpusId": 19878149 }, "url": "https://www.semanticscholar.org/paper/75f8a4d7ed6a0f32fa098cac967de247938d9ce5", "referenceCount": 0, "citationCount": 23655, "influentialCitationCount": 1718, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "“ Randomization tests (4th ed.) ”", "abstract": null, "year": 2007, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "On the Problem of the Most Efficient Tests of Statistical Hypotheses", "abstract": null, "year": 1933, "venue": "", "authors": [ "J. Neyman", "E. S. Pearson" ], "externalIds": { "MAG": "2166843037", "DOI": "10.1007/978-1-4612-0919-5_6", "CorpusId": 85550403 }, "url": "https://www.semanticscholar.org/paper/a05f5a5c9fe1d8a44f5960571cc6f4fbb75d0d36", "referenceCount": 1, "citationCount": 3310, "influentialCitationCount": 160, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Physics", "Psychology" ] }, { "title": "Sulla determinazione empírica di uma legge di distribuzione", "abstract": null, "year": 1933, "venue": "", "authors": [ "AN Kolmogorov-Smirnov", "A. Kolmogorov", "M. Kolmogorov" ], "externalIds": { "MAG": "3008021358", "CorpusId": 222427298 }, "url": "https://www.semanticscholar.org/paper/c859d88a2b4386e0be5d4b5c91c8dc8996ef6d64", "referenceCount": 0, "citationCount": 1708, "influentialCitationCount": 119, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "“ High-Luminosity Large Hadron Collider (HL-LHC): Technical design report ”", "abstract": null, "year": null, "venue": "CERN Yellow Reports: Monographs. CERN, Geneva, 2020 [ CDS ]", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Metrics for two-sample tests: results on Mixture of Gaussians and Correlated Gaussians models", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "“CMS Phase-2 Computing Model: Update Document”", "abstract": null, "year": null, "venue": "tech. rep., CERN, Geneva", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "A TensorFlow2 implementation of the metrics discussed in the paper, 2024", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "“ On wasserstein two-sample", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "GenerativeModelsMetrics : code and results for the two-sample tests with Correlated Gaussians and Mixture of Gaussians models, 2024", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "“ Comparative Study of Coupling and Autoregressive Flows through Robust Statistical Tests ”", "abstract": null, "year": null, "venue": "Symmetry", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "“ Fast and accurate simulations of calorimeter showers with normalizing flows ”", "abstract": null, "year": null, "venue": "Phys. Rev. D", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "“ Demystifying MMD", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Improvements to SDXL in NovelAI Diffusion V3": { "paper_title": "Improvements to SDXL in NovelAI Diffusion V3", "arxiv_id": "2409.15997v2", "keyword": "generative model", "authors": [ "Juan Ossa", "Eren Doğan", "Alex Birch", "F. Johnson" ], "references": [ { "title": "MegaFusion: Extend Diffusion Models towards Higher-resolution Image Generation without Further Tuning", "abstract": "Diffusion models have emerged as frontrunners in text-to-image generation, however, their fixed image resolution during training often leads to challenges in high-resolution image generation, such as semantic deviations and object replication. This paper introduces MegaFusion, a novel approach that extends existing diffusion-based text-to-image generation models towards efficient higher-resolution generation without additional fine-tuning or extra adaptation. Specifically, we employ an innovative truncate and relay strategy to bridge the denoising processes across different resolutions, allowing for high-resolution image generation in a coarse-to-fine manner. Moreover, by integrating dilated convolutions and noise re-scheduling, we further adapt the model's priors for higher resolution. The versatility and efficacy of MegaFusion make it universally applicable to both latent-space and pixel-space diffusion models, along with other derivative models. Extensive experiments confirm that MegaFusion significantly boosts the capability of existing models to produce images of megapixels and various aspect ratios, while only requiring about 40% of the original computational cost.", "year": 2024, "venue": "arXiv.org", "authors": [ "Haoning Wu", "Shaocheng Shen", "Qiang Hu", "Xiaoyun Zhang", "Ya Zhang", "Yanfeng Wang" ], "externalIds": { "DBLP": "journals/corr/abs-2408-11001", "ArXiv": "2408.11001", "DOI": "10.48550/arXiv.2408.11001", "CorpusId": 271909716 }, "url": "https://www.semanticscholar.org/paper/dac5c5546829868a39c9e98a0a22843a6b60403f", "referenceCount": 54, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "FouriScale: A Frequency Perspective on Training-Free High-Resolution Image Synthesis", "abstract": "In this study, we delve into the generation of high-resolution images from pre-trained diffusion models, addressing persistent challenges, such as repetitive patterns and structural distortions, that emerge when models are applied beyond their trained resolutions. To address this issue, we introduce an innovative, training-free approach FouriScale from the perspective of frequency domain analysis. We replace the original convolutional layers in pre-trained diffusion models by incorporating a dilation technique along with a low-pass operation, intending to achieve structural consistency and scale consistency across resolutions, respectively. Further enhanced by a padding-then-crop strategy, our method can flexibly handle text-to-image generation of various aspect ratios. By using the FouriScale as guidance, our method successfully balances the structural integrity and fidelity of generated images, achieving an astonishing capacity of arbitrary-size, high-resolution, and high-quality generation. With its simplicity and compatibility, our method can provide valuable insights for future explorations into the synthesis of ultra-high-resolution images. The code will be released at https://github.com/LeonHLJ/FouriScale.", "year": 2024, "venue": "arXiv.org", "authors": [ "Linjiang Huang", "Rongyao Fang", "Aiping Zhang", "Guanglu Song", "Si Liu", "Yu Liu", "Hongsheng Li" ], "externalIds": { "DBLP": "journals/corr/abs-2403-12963", "ArXiv": "2403.12963", "DOI": "10.48550/arXiv.2403.12963", "CorpusId": 268532384 }, "url": "https://www.semanticscholar.org/paper/5c80f5947fed7c5c2b58a4aebfb7c3be4b3ce35d", "referenceCount": 47, "citationCount": 9, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Analyzing and Improving the Training Dynamics of Diffusion Models", "abstract": "Diffusion models currently dominate the field of data- driven image synthesis with their unparalleled scaling to large datasets. In this paper, we identify and rectify several causes for uneven and ineffective training in the popular ADM diffusion model architecture, without altering its high- level structure. Observing uncontrolled magnitude changes and imbalances in both the network activations and weights over the course of training, we redesign the network layers to preserve activation, weight, and update magnitudes on ex- pectation. We find that systematic application of this philoso- phy eliminates the observed drifts and imbalances, resulting in considerably better networks at equal computational com- plexity. Our modifications improve the previous record FID of 2.41 in ImageNet-512 synthesis to 1.81, achieved using fast deterministic sampling. As an independent contribution, we present a method for setting the exponential moving average (EMA) parameters post-hoc, i.e., after completing the training run. This allows precise tuning of EMA length without the cost of performing several training runs, and reveals its surprising interactions with network architecture, training time, and guidance.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Tero Karras", "M. Aittala", "J. Lehtinen", "Janne Hellsten", "Timo Aila", "S. Laine" ], "externalIds": { "DBLP": "journals/corr/abs-2312-02696", "ArXiv": "2312.02696", "DOI": "10.1109/CVPR52733.2024.02282", "CorpusId": 265659032 }, "url": "https://www.semanticscholar.org/paper/bb857c72e7c75fc3d4b44b1dcaa66c62ea10a2e1", "referenceCount": 97, "citationCount": 41, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "ScaleCrafter: Tuning-free Higher-Resolution Visual Generation with Diffusion Models", "abstract": "In this work, we investigate the capability of generating images from pre-trained diffusion models at much higher resolutions than the training image sizes. In addition, the generated images should have arbitrary image aspect ratios. When generating images directly at a higher resolution, 1024 x 1024, with the pre-trained Stable Diffusion using training images of resolution 512 x 512, we observe persistent problems of object repetition and unreasonable object structures. Existing works for higher-resolution generation, such as attention-based and joint-diffusion approaches, cannot well address these issues. As a new perspective, we examine the structural components of the U-Net in diffusion models and identify the crucial cause as the limited perception field of convolutional kernels. Based on this key observation, we propose a simple yet effective re-dilation that can dynamically adjust the convolutional perception field during inference. We further propose the dispersed convolution and noise-damped classifier-free guidance, which can enable ultra-high-resolution image generation (e.g., 4096 x 4096). Notably, our approach does not require any training or optimization. Extensive experiments demonstrate that our approach can address the repetition issue well and achieve state-of-the-art performance on higher-resolution image synthesis, especially in texture details. Our work also suggests that a pre-trained diffusion model trained on low-resolution images can be directly used for high-resolution visual generation without further tuning, which may provide insights for future research on ultra-high-resolution image and video synthesis.", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Yin-Yin He", "Shaoshu Yang", "Haoxin Chen", "Xiaodong Cun", "Menghan Xia", "Yong Zhang", "Xintao Wang", "Ran He", "Qifeng Chen", "Ying Shan" ], "externalIds": { "DBLP": "journals/corr/abs-2310-07702", "ArXiv": "2310.07702", "DOI": "10.48550/arXiv.2310.07702", "CorpusId": 263834845 }, "url": "https://www.semanticscholar.org/paper/b02d8250bf26701f0e300bb6d6c3cff7558b2754", "referenceCount": 32, "citationCount": 38, "influentialCitationCount": 13, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Unmasking Bias in Diffusion Model Training", "abstract": "Denoising diffusion models have emerged as a dominant approach for image generation, however they still suffer from slow convergence in training and color shift issues in sampling. In this paper, we identify that these obstacles can be largely attributed to bias and suboptimality inherent in the default training paradigm of diffusion models. Specifically, we offer theoretical insights that the prevailing constant loss weight strategy in $\\epsilon$-prediction of diffusion models leads to biased estimation during the training phase, hindering accurate estimations of original images. To address the issue, we propose a simple but effective weighting strategy derived from the unlocked biased part. Furthermore, we conduct a comprehensive and systematic exploration, unraveling the inherent bias problem in terms of its existence, impact and underlying reasons. These analyses contribute to advancing the understanding of diffusion models. Empirical results demonstrate that our method remarkably elevates sample quality and displays improved efficiency in both training and sampling processes, by only adjusting loss weighting strategy. The code is released publicly at \\url{https://github.com/yuhuUSTC/Debias}", "year": 2023, "venue": "", "authors": [ "Huikang Yu", "Li Shen", "Jie Huang", "Hongsheng Li", "Feng Zhao" ], "externalIds": { "ArXiv": "2310.08442", "CorpusId": 271709931 }, "url": "https://www.semanticscholar.org/paper/3b02551a802153f738b3008d8da962723264ada5", "referenceCount": 43, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Training-free Diffusion Model Adaptation for Variable-Sized Text-to-Image Synthesis", "abstract": "Diffusion models (DMs) have recently gained attention with state-of-the-art performance in text-to-image synthesis. Abiding by the tradition in deep learning, DMs are trained and evaluated on the images with fixed sizes. However, users are demanding for various images with specific sizes and various aspect ratio. This paper focuses on adapting text-to-image diffusion models to handle such variety while maintaining visual fidelity. First we observe that, during the synthesis, lower resolution images suffer from incomplete object portrayal, while higher resolution images exhibit repetitively disordered presentation. Next, we establish a statistical relationship indicating that attention entropy changes with token quantity, suggesting that models aggregate spatial information in proportion to image resolution. The subsequent interpretation on our observations is that objects are incompletely depicted due to limited spatial information for low resolutions, while repetitively disorganized presentation arises from redundant spatial information for high resolutions. From this perspective, we propose a scaling factor to alleviate the change of attention entropy and mitigate the defective pattern observed. Extensive experimental results validate the efficacy of the proposed scaling factor, enabling models to achieve better visual effects, image quality, and text alignment. Notably, these improvements are achieved without additional training or fine-tuning techniques.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Zhiyu Jin", "Xuli Shen", "Bin Li", "Xiangyang Xue" ], "externalIds": { "ArXiv": "2306.08645", "DBLP": "journals/corr/abs-2306-08645", "DOI": "10.48550/arXiv.2306.08645", "CorpusId": 259164394 }, "url": "https://www.semanticscholar.org/paper/166b8c2ee52794c46615c5c52d0390d896b79794", "referenceCount": 61, "citationCount": 20, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Common Diffusion Noise Schedules and Sample Steps are Flawed", "abstract": "We discover that common diffusion noise schedules do not enforce the last timestep to have zero signal-to-noise ratio (SNR), and some implementations of diffusion samplers do not start from the last timestep. Such designs are flawed and do not reflect the fact that the model is given pure Gaussian noise at inference, creating a discrepancy between training and inference. We show that the flawed design causes real problems in existing implementations. In Stable Diffusion, it severely limits the model to only generate images with medium brightness and prevents it from generating very bright and dark samples. We propose a few simple fixes: (1) rescale the noise schedule to enforce zero terminal SNR; (2) train the model with v prediction; (3) change the sampler to always start from the last timestep; (4) rescale classifier-free guidance to prevent over-exposure. These simple changes ensure the diffusion process is congruent between training and inference and allow the model to generate samples more faithful to the original data distribution.", "year": 2023, "venue": "IEEE Workshop/Winter Conference on Applications of Computer Vision", "authors": [ "Shanchuan Lin", "Bingchen Liu", "Jiashi Li", "Xiao Yang" ], "externalIds": { "ArXiv": "2305.08891", "DBLP": "journals/corr/abs-2305-08891", "DOI": "10.1109/WACV57701.2024.00532", "CorpusId": 258714883 }, "url": "https://www.semanticscholar.org/paper/5003fdf35af631d4cb17fd3c1ce2469f665064f1", "referenceCount": 16, "citationCount": 118, "influentialCitationCount": 17, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Efficient Diffusion Training via Min-SNR Weighting Strategy", "abstract": "Denoising diffusion models have been a mainstream approach for image generation, however, training these models often suffers from slow convergence. In this paper, we discovered that the slow convergence is partly due to conflicting optimization directions between timesteps. To address this issue, we treat the diffusion training as a multi-task learning problem, and introduce a simple yet effective approach referred to as Min-SNR-γ. This method adapts loss weights of timesteps based on clamped signal-to-noise ratios, which effectively balances the conflicts among timesteps. Our results demonstrate a significant improvement in converging speed, 3.4× faster than previous weighting strategies. It is also more effective, achieving a new record FID score of 2.06 on the ImageNet 256 × 256 benchmark using smaller architectures than that employed in previous state-of-the-art. The code is available at https://github.com/TiankaiHang/Min-SNR-Diffusion-Training.", "year": 2023, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Tiankai Hang", "Shuyang Gu", "Chen Li", "Jianmin Bao", "Dong Chen", "Han Hu", "Xin Geng", "B. Guo" ], "externalIds": { "DBLP": "conf/iccv/HangGLB00GG23", "ArXiv": "2303.09556", "DOI": "10.1109/ICCV51070.2023.00684", "CorpusId": 257557255 }, "url": "https://www.semanticscholar.org/paper/d37ee0c440e36df133722093f6a824f4964e8ae0", "referenceCount": 61, "citationCount": 91, "influentialCitationCount": 16, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "simple diffusion: End-to-end diffusion for high resolution images", "abstract": "Currently, applying diffusion models in pixel space of high resolution images is difficult. Instead, existing approaches focus on diffusion in lower dimensional spaces (latent diffusion), or have multiple super-resolution levels of generation referred to as cascades. The downside is that these approaches add additional complexity to the diffusion framework. This paper aims to improve denoising diffusion for high resolution images while keeping the model as simple as possible. The paper is centered around the research question: How can one train a standard denoising diffusion models on high resolution images, and still obtain performance comparable to these alternate approaches? The four main findings are: 1) the noise schedule should be adjusted for high resolution images, 2) It is sufficient to scale only a particular part of the architecture, 3) dropout should be added at specific locations in the architecture, and 4) downsampling is an effective strategy to avoid high resolution feature maps. Combining these simple yet effective techniques, we achieve state-of-the-art on image generation among diffusion models without sampling modifiers on ImageNet.", "year": 2023, "venue": "International Conference on Machine Learning", "authors": [ "Emiel Hoogeboom", "J. Heek", "Tim Salimans" ], "externalIds": { "DBLP": "journals/corr/abs-2301-11093", "ArXiv": "2301.11093", "DOI": "10.48550/arXiv.2301.11093", "CorpusId": 256274516 }, "url": "https://www.semanticscholar.org/paper/6e3a3b7a8a0376d867cad72eedf2f9b746f29a33", "referenceCount": 30, "citationCount": 147, "influentialCitationCount": 13, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "DPM-Solver++: Fast Solver for Guided Sampling of Diffusion Probabilistic Models", "abstract": "Diffusion probabilistic models (DPMs) have achieved impressive success in high-resolution image synthesis, especially in recent large-scale text-to-image generation applications. An essential technique for improving the sample quality of DPMs is guided sampling, which usually needs a large guidance scale to obtain the best sample quality. The commonly-used fast sampler for guided sampling is DDIM, a first-order diffusion ODE solver that generally needs 100 to 250 steps for high-quality samples. Although recent works propose dedicated high-order solvers and achieve a further speedup for sampling without guidance, their effectiveness for guided sampling has not been well-tested before. In this work, we demonstrate that previous high-order fast samplers suffer from instability issues, and they even become slower than DDIM when the guidance scale grows large. To further speed up guided sampling, we propose DPM-Solver++, a high-order solver for the guided sampling of DPMs. DPM-Solver++ solves the diffusion ODE with the data prediction model and adopts thresholding methods to keep the solution matches training data distribution. We further propose a multistep variant of DPM-Solver++ to address the instability issue by reducing the effective step size. Experiments show that DPM-Solver++ can generate high-quality samples within only 15 to 20 steps for guided sampling by pixel-space and latent-space DPMs.", "year": 2022, "venue": "arXiv.org", "authors": [ "Cheng Lu", "Yuhao Zhou", "Fan Bao", "Jianfei Chen", "Chongxuan Li", "Jun Zhu" ], "externalIds": { "ArXiv": "2211.01095", "DBLP": "journals/corr/abs-2211-01095", "DOI": "10.48550/arXiv.2211.01095", "CorpusId": 253254916 }, "url": "https://www.semanticscholar.org/paper/baa4f95081e9663fb045d145acc70049ace16ac9", "referenceCount": 51, "citationCount": 340, "influentialCitationCount": 51, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Imagen Video: High Definition Video Generation with Diffusion Models", "abstract": "We present Imagen Video, a text-conditional video generation system based on a cascade of video diffusion models. Given a text prompt, Imagen Video generates high definition videos using a base video generation model and a sequence of interleaved spatial and temporal video super-resolution models. We describe how we scale up the system as a high definition text-to-video model including design decisions such as the choice of fully-convolutional temporal and spatial super-resolution models at certain resolutions, and the choice of the v-parameterization of diffusion models. In addition, we confirm and transfer findings from previous work on diffusion-based image generation to the video generation setting. Finally, we apply progressive distillation to our video models with classifier-free guidance for fast, high quality sampling. We find Imagen Video not only capable of generating videos of high fidelity, but also having a high degree of controllability and world knowledge, including the ability to generate diverse videos and text animations in various artistic styles and with 3D object understanding. See https://imagen.research.google/video/ for samples.", "year": 2022, "venue": "arXiv.org", "authors": [ "Jonathan Ho", "William Chan", "Chitwan Saharia", "Jay Whang", "Ruiqi Gao", "A. Gritsenko", "Diederik P. Kingma", "Ben Poole", "Mohammad Norouzi", "David J. Fleet", "Tim Salimans" ], "externalIds": { "DBLP": "journals/corr/abs-2210-02303", "ArXiv": "2210.02303", "DOI": "10.48550/arXiv.2210.02303", "CorpusId": 252715883 }, "url": "https://www.semanticscholar.org/paper/498ac9b2e494601d20a3d0211c16acf2b7954a54", "referenceCount": 51, "citationCount": 1042, "influentialCitationCount": 73, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Cold Diffusion: Inverting Arbitrary Image Transforms Without Noise", "abstract": "Standard diffusion models involve an image transform -- adding Gaussian noise -- and an image restoration operator that inverts this degradation. We observe that the generative behavior of diffusion models is not strongly dependent on the choice of image degradation, and in fact an entire family of generative models can be constructed by varying this choice. Even when using completely deterministic degradations (e.g., blur, masking, and more), the training and test-time update rules that underlie diffusion models can be easily generalized to create generative models. The success of these fully deterministic models calls into question the community's understanding of diffusion models, which relies on noise in either gradient Langevin dynamics or variational inference, and paves the way for generalized diffusion models that invert arbitrary processes. Our code is available at https://github.com/arpitbansal297/Cold-Diffusion-Models", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Arpit Bansal", "Eitan Borgnia", "Hong-Min Chu", "Jie Li", "Hamid Kazemi", "Furong Huang", "Micah Goldblum", "Jonas Geiping", "T. Goldstein" ], "externalIds": { "ArXiv": "2208.09392", "DBLP": "conf/nips/BansalBCLKHGGG23", "DOI": "10.48550/arXiv.2208.09392", "CorpusId": 251710469 }, "url": "https://www.semanticscholar.org/paper/525f459f369032e2f2fa3eb1d60da34ab99191bc", "referenceCount": 38, "citationCount": 193, "influentialCitationCount": 27, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Classifier-Free Diffusion Guidance", "abstract": "Classifier guidance is a recently introduced method to trade off mode coverage and sample fidelity in conditional diffusion models post training, in the same spirit as low temperature sampling or truncation in other types of generative models. Classifier guidance combines the score estimate of a diffusion model with the gradient of an image classifier and thereby requires training an image classifier separate from the diffusion model. It also raises the question of whether guidance can be performed without a classifier. We show that guidance can be indeed performed by a pure generative model without such a classifier: in what we call classifier-free guidance, we jointly train a conditional and an unconditional diffusion model, and we combine the resulting conditional and unconditional score estimates to attain a trade-off between sample quality and diversity similar to that obtained using classifier guidance.", "year": 2022, "venue": "arXiv.org", "authors": [ "Jonathan Ho" ], "externalIds": { "ArXiv": "2207.12598", "DBLP": "journals/corr/abs-2207-12598", "DOI": "10.48550/arXiv.2207.12598", "CorpusId": 249145348 }, "url": "https://www.semanticscholar.org/paper/af9f365ed86614c800f082bd8eb14be76072ad16", "referenceCount": 25, "citationCount": 2396, "influentialCitationCount": 313, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Elucidating the Design Space of Diffusion-Based Generative Models", "abstract": "We argue that the theory and practice of diffusion-based generative models are currently unnecessarily convoluted and seek to remedy the situation by presenting a design space that clearly separates the concrete design choices. This lets us identify several changes to both the sampling and training processes, as well as preconditioning of the score networks. Together, our improvements yield new state-of-the-art FID of 1.79 for CIFAR-10 in a class-conditional setting and 1.97 in an unconditional setting, with much faster sampling (35 network evaluations per image) than prior designs. To further demonstrate their modular nature, we show that our design changes dramatically improve both the efficiency and quality obtainable with pre-trained score networks from previous work, including improving the FID of a previously trained ImageNet-64 model from 2.07 to near-SOTA 1.55, and after re-training with our proposed improvements to a new SOTA of 1.36.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Tero Karras", "M. Aittala", "Timo Aila", "S. Laine" ], "externalIds": { "DBLP": "journals/corr/abs-2206-00364", "ArXiv": "2206.00364", "DOI": "10.48550/arXiv.2206.00364", "CorpusId": 249240415 }, "url": "https://www.semanticscholar.org/paper/2f4c451922e227cbbd4f090b74298445bbd900d0", "referenceCount": 65, "citationCount": 1135, "influentialCitationCount": 301, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Overcoming a Theoretical Limitation of Self-Attention", "abstract": "Although transformers are remarkably effective for many tasks, there are some surprisingly easy-looking regular languages that they struggle with. Hahn shows that for languages where acceptance depends on a single input symbol, a transformer’s classification decisions get closer and closer to random guessing (that is, a cross-entropy of 1) as input strings get longer and longer. We examine this limitation using two languages: PARITY, the language of bit strings with an odd number of 1s, and FIRST, the language of bit strings starting with a 1. We demonstrate three ways of overcoming the limitation implied by Hahn’s lemma. First, we settle an open question by constructing a transformer that recognizes PARITY with perfect accuracy, and similarly for FIRST. Second, we use layer normalization to bring the cross-entropy of both models arbitrarily close to zero. Third, when transformers need to focus on a single position, as for FIRST, we find that they can fail to generalize to longer strings; we offer a simple remedy to this problem that also improves length generalization in machine translation.", "year": 2022, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "David Chiang", "Peter A. Cholak" ], "externalIds": { "DBLP": "journals/corr/abs-2202-12172", "ArXiv": "2202.12172", "ACL": "2022.acl-long.527", "DOI": "10.18653/v1/2022.acl-long.527", "CorpusId": 247084324 }, "url": "https://www.semanticscholar.org/paper/d3dd80269f2542cc173afb3a1df24b582a1e4af2", "referenceCount": 21, "citationCount": 51, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "High-Resolution Image Synthesis with Latent Diffusion Models", "abstract": "By decomposing the image formation process into a sequential application of denoising autoencoders, diffusion models (DMs) achieve state-of-the-art synthesis results on image data and beyond. Additionally, their formulation allows for a guiding mechanism to control the image generation process without retraining. However, since these models typically operate directly in pixel space, optimization of powerful DMs often consumes hundreds of GPU days and inference is expensive due to sequential evaluations. To enable DM training on limited computational resources while retaining their quality and flexibility, we apply them in the latent space of powerful pretrained autoencoders. In contrast to previous work, training diffusion models on such a representation allows for the first time to reach a near-optimal point between complexity reduction and detail preservation, greatly boosting visual fidelity. By introducing cross-attention layers into the model architecture, we turn diffusion models into powerful and flexible generators for general conditioning inputs such as text or bounding boxes and high-resolution synthesis becomes possible in a convolutional manner. Our latent diffusion models (LDMs) achieve new state of the art scores for image inpainting and class-conditional image synthesis and highly competitive performance on various tasks, including unconditional image generation, text-to-image synthesis, and super-resolution, while significantly reducing computational requirements compared to pixel-based DMs.", "year": 2021, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Robin Rombach", "A. Blattmann", "Dominik Lorenz", "Patrick Esser", "B. Ommer" ], "externalIds": { "ArXiv": "2112.10752", "DBLP": "journals/corr/abs-2112-10752", "DOI": "10.1109/CVPR52688.2022.01042", "CorpusId": 245335280 }, "url": "https://www.semanticscholar.org/paper/c10075b3746a9f3dd5811970e93c8ca3ad39b39d", "referenceCount": 110, "citationCount": 9847, "influentialCitationCount": 2744, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "SDEdit: Guided Image Synthesis and Editing with Stochastic Differential Equations", "abstract": "Guided image synthesis enables everyday users to create and edit photo-realistic images with minimum effort. The key challenge is balancing faithfulness to the user input (e.g., hand-drawn colored strokes) and realism of the synthesized image. Existing GAN-based methods attempt to achieve such balance using either conditional GANs or GAN inversions, which are challenging and often require additional training data or loss functions for individual applications. To address these issues, we introduce a new image synthesis and editing method, Stochastic Differential Editing (SDEdit), based on a diffusion model generative prior, which synthesizes realistic images by iteratively denoising through a stochastic differential equation (SDE). Given an input image with user guide of any type, SDEdit first adds noise to the input, then subsequently denoises the resulting image through the SDE prior to increase its realism. SDEdit does not require task-specific training or inversions and can naturally achieve the balance between realism and faithfulness. SDEdit significantly outperforms state-of-the-art GAN-based methods by up to 98.09% on realism and 91.72% on overall satisfaction scores, according to a human perception study, on multiple tasks, including stroke-based image synthesis and editing as well as image compositing.", "year": 2021, "venue": "International Conference on Learning Representations", "authors": [ "Chenlin Meng", "Yutong He", "Yang Song", "Jiaming Song", "Jiajun Wu", "Jun-Yan Zhu", "Stefano Ermon" ], "externalIds": { "DBLP": "conf/iclr/MengHSSWZE22", "ArXiv": "2108.01073", "CorpusId": 245704504 }, "url": "https://www.semanticscholar.org/paper/f671a09e3e5922e6d38cb77dda8d76d5ceac2a27", "referenceCount": 74, "citationCount": 986, "influentialCitationCount": 140, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Alias-Free Generative Adversarial Networks", "abstract": "We observe that despite their hierarchical convolutional nature, the synthesis process of typical generative adversarial networks depends on absolute pixel coordinates in an unhealthy manner. This manifests itself as, e.g., detail appearing to be glued to image coordinates instead of the surfaces of depicted objects. We trace the root cause to careless signal processing that causes aliasing in the generator network. Interpreting all signals in the network as continuous, we derive generally applicable, small architectural changes that guarantee that unwanted information cannot leak into the hierarchical synthesis process. The resulting networks match the FID of StyleGAN2 but differ dramatically in their internal representations, and they are fully equivariant to translation and rotation even at subpixel scales. Our results pave the way for generative models better suited for video and animation.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Tero Karras", "M. Aittala", "S. Laine", "Erik Härkönen", "Janne Hellsten", "J. Lehtinen", "Timo Aila" ], "externalIds": { "MAG": "3174807077", "DBLP": "conf/nips/KarrasALHHLA21", "ArXiv": "2106.12423", "CorpusId": 235606261 }, "url": "https://www.semanticscholar.org/paper/c1ff08b59f00c44f34dfdde55cd53370733a2c19", "referenceCount": 74, "citationCount": 1319, "influentialCitationCount": 140, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Learning Transferable Visual Models From Natural Language Supervision", "abstract": "State-of-the-art computer vision systems are trained to predict a fixed set of predetermined object categories. This restricted form of supervision limits their generality and usability since additional labeled data is needed to specify any other visual concept. Learning directly from raw text about images is a promising alternative which leverages a much broader source of supervision. We demonstrate that the simple pre-training task of predicting which caption goes with which image is an efficient and scalable way to learn SOTA image representations from scratch on a dataset of 400 million (image, text) pairs collected from the internet. After pre-training, natural language is used to reference learned visual concepts (or describe new ones) enabling zero-shot transfer of the model to downstream tasks. We study the performance of this approach by benchmarking on over 30 different existing computer vision datasets, spanning tasks such as OCR, action recognition in videos, geo-localization, and many types of fine-grained object classification. The model transfers non-trivially to most tasks and is often competitive with a fully supervised baseline without the need for any dataset specific training. For instance, we match the accuracy of the original ResNet-50 on ImageNet zero-shot without needing to use any of the 1.28 million training examples it was trained on. We release our code and pre-trained model weights at https://github.com/OpenAI/CLIP.", "year": 2021, "venue": "International Conference on Machine Learning", "authors": [ "Alec Radford", "Jong Wook Kim", "Chris Hallacy", "A. Ramesh", "Gabriel Goh", "Sandhini Agarwal", "Girish Sastry", "Amanda Askell", "Pamela Mishkin", "Jack Clark", "Gretchen Krueger", "I. Sutskever" ], "externalIds": { "ArXiv": "2103.00020", "DBLP": "conf/icml/RadfordKHRGASAM21", "CorpusId": 231591445 }, "url": "https://www.semanticscholar.org/paper/6f870f7f02a8c59c3e23f407f3ef00dd1dcf8fc4", "referenceCount": 220, "citationCount": 18886, "influentialCitationCount": 5013, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Position, Padding and Predictions: A Deeper Look at Position Information in CNNs", "abstract": null, "year": 2021, "venue": "International Journal of Computer Vision", "authors": [ "Md. Amirul Islam", "M. Kowal", "Sen Jia", "K. Derpanis", "Neil D. B. Bruce" ], "externalIds": { "ArXiv": "2101.12322", "DBLP": "journals/ijcv/IslamKJDB24", "DOI": "10.1007/s11263-024-02069-9", "CorpusId": 231728503 }, "url": "https://www.semanticscholar.org/paper/7eb2e2c086c9b6e76854ee58c92f993bcd171029", "referenceCount": 77, "citationCount": 48, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Denoising Diffusion Probabilistic Models", "abstract": "We present high quality image synthesis results using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics. Our best results are obtained by training on a weighted variational bound designed according to a novel connection between diffusion probabilistic models and denoising score matching with Langevin dynamics, and our models naturally admit a progressive lossy decompression scheme that can be interpreted as a generalization of autoregressive decoding. On the unconditional CIFAR10 dataset, we obtain an Inception score of 9.46 and a state-of-the-art FID score of 3.17. On 256x256 LSUN, we obtain sample quality similar to ProgressiveGAN. Our implementation is available at this https URL", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Jonathan Ho", "Ajay Jain", "P. Abbeel" ], "externalIds": { "ArXiv": "2006.11239", "MAG": "3100572490", "DBLP": "journals/corr/abs-2006-11239", "CorpusId": 219955663 }, "url": "https://www.semanticscholar.org/paper/5c126ae3421f05768d8edd97ecd44b1364e2c99a", "referenceCount": 73, "citationCount": 10778, "influentialCitationCount": 2337, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Note on a Method for Calculating Corrected Sums of Squares and Products", "abstract": "In many problems the \"corrected sum of squares\" of a set of values must be calculated i.e. the sum of squares of the deviations of the values about their mean. The most usual way is to calculate the sum of squares of the values (the \"crude\" sum of squares) and then to subtract a correction factor (which is the product of the total of the values and the mean of the values). This subtraction results in a loss of significant figures and if a large set of values is being handled by a computer, this can result in a corrected sum of squares which has many fewer, accurate significant figures than the computer uses in calculations. Various alternative schemes are available to combat this. One method is to scale the values to an arbitrary origin which is approximately equal to the mean: if successful, this will reduce the loss in significant figures. An alternative method is to first calculate the mean and then sum the powers of the deviations from the mean. This involves each value being considered twice: first in evaluating the mean and then when calculating its deviation from the mean. If the set of values is large and is being handled by a computer this can involve either storing the data in a slow speed store or reading the same data into the computer twice. A third method which is less cumbersome than either of these is outlined below. The basis of the method is an iteration formula for deriving the corrected sum of squares for n values from the corrected sum of squares for the first (n 1) of these. We are given a set of xi's (i = 1, * *, k,) for which we require the corrected sum of squares.", "year": 1962, "venue": "", "authors": [ "B. Welford" ], "externalIds": { "MAG": "1987034518", "DOI": "10.1080/00401706.1962.10490022", "CorpusId": 120126049 }, "url": "https://www.semanticscholar.org/paper/946c0b839e5dcca3fcf5a39d07d9f6f7d67887e3", "referenceCount": 1, "citationCount": 652, "influentialCitationCount": 34, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "“crowsonkb/k-diffusion: v0.1.1.post1,”", "abstract": null, "year": 2023, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "“Progressive distillation for fast sampling of diffusion models,”", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Improving Image Generation with Better Captions", "abstract": "We show that prompt following abilities of text-to-image models can be substantially improved by training on highly descriptive generated image captions. Existing text-to-image models struggle to follow detailed image descriptions and often ignore words or confuse the meaning of prompts. We hypothesize that this issue stems from noisy and inaccurate image captions in the training dataset. We address this by training a bespoke image captioner and use it to recaption the training dataset. We then train several text-to-image models and find that training on these synthetic captions reliably improves prompt following ability. Finally, we use these findings to build DALL-E 3: a new text-to-image generation system, and benchmark its performance on an evaluation designed to measure prompt following, coherence, and aesthetics, finding that it compares favorably to competitors. We publish samples and code for these evaluations so that future research can continue optimizing this important aspect of text-to-image systems.", "year": null, "venue": "", "authors": [ "James Betker", "Gabriel Goh", "Li Jing", "†. TimBrooks", "Jianfeng Wang", "Linjie Li", "†. LongOuyang", "†. JuntangZhuang", "†. JoyceLee", "†. YufeiGuo", "†. WesamManassra", "†. PrafullaDhariwal", "†. CaseyChu", "†. YunxinJiao", "Aditya Ramesh" ], "externalIds": { "CorpusId": 264403242 }, "url": "https://www.semanticscholar.org/paper/cfee1826dd4743eab44c6e27a0cc5970effa4d80", "referenceCount": 33, "citationCount": 536, "influentialCitationCount": 56, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "“Novelai improvements on stable diffusion,”", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "“Pytorch discussion: Normalization in the mnist example.”", "abstract": null, "year": null, "venue": "discuss.pytorch.org/", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "“comp.ai.neural-nets faq, part 2 of 7: Learning.”", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "These statistics are dataset-dependent", "abstract": null, "year": null, "venue": "illustration datasets", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "We then select a distinct subset of 1 world_size ∗ bsz item IDs according to the global rank of the current process", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "“Tensorfloat-32 in the a100 gpu accelerates ai training, hpc up to 20x.”", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Enhancing Pedestrian Trajectory Prediction with Crowd Trip Information": { "paper_title": "Enhancing Pedestrian Trajectory Prediction with Crowd Trip Information", "arxiv_id": "2409.15224v1", "keyword": "generative model", "authors": [ "Rei Tamaru", "Pei Li", "Bin Ran" ], "references": [ { "title": "On Adversarial Robustness of Trajectory Prediction for Autonomous Vehicles", "abstract": "Trajectory prediction is a critical component for autonomous vehicles (AVs) to perform safe planning and navigation. However, few studies have analyzed the adversarial robustness of trajectory prediction or investigated whether the worst-case prediction can still lead to safe planning. To bridge this gap, we study the adversarial robustness of trajectory prediction models by proposing a new adversarial attack that perturbs normal vehicle trajectories to maximize the prediction error. Our experiments on three models and three datasets show that the adversarial prediction increases the prediction error by more than 150%. Our case studies show that if an adversary drives a vehicle close to the target AV following the adversarial trajectory, the AV may make an inaccurate prediction and even make unsafe driving decisions. We also explore possible mitigation techniques via data augmentation and trajectory smoothing.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Qingzhao Zhang", "Shengtuo Hu", "Jiachen Sun", "Qi Alfred Chen", "Z. Morley Mao" ], "externalIds": { "DBLP": "journals/corr/abs-2201-05057", "ArXiv": "2201.05057", "DOI": "10.1109/CVPR52688.2022.01473", "CorpusId": 245906353 }, "url": "https://www.semanticscholar.org/paper/5b381a8104e7ea90b412e93f19482320c1b4b665", "referenceCount": 41, "citationCount": 103, "influentialCitationCount": 14, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "SSAGCN: Social Soft Attention Graph Convolution Network for Pedestrian Trajectory Prediction", "abstract": "Pedestrian trajectory prediction is an important technique of autonomous driving. In order to accurately predict the reasonable future trajectory of pedestrians, it is inevitable to consider social interactions among pedestrians and the influence of surrounding scene simultaneously, which can fully represent the complex behavior information and ensure the rationality of predicted trajectories obeyed realistic rules. In this article, we propose one new prediction model named social soft attention graph convolution network (SSAGCN), which aims to simultaneously handle social interactions among pedestrians and scene interactions between pedestrians and environments. In detail, when modeling social interaction, we propose a new social soft attention function, which fully considers various interaction factors among pedestrians. Also, it can distinguish the influence of pedestrians around the agent based on different factors under various situations. For the scene interaction, we propose one new sequential scene sharing mechanism. The influence of the scene on one agent at each moment can be shared with other neighbors through social soft attention; therefore, the influence of the scene is expanded both in spatial and temporal dimensions. With the help of these improvements, we successfully obtain socially and physically acceptable predicted trajectories. The experiments on public available datasets prove the effectiveness of SSAGCN and have achieved state-of-the-art results. The project code is available at https://github.com/WW-Tong/ssagcn_for_path_prediction", "year": 2021, "venue": "IEEE Transactions on Neural Networks and Learning Systems", "authors": [ "Pei Lv", "Wentong Wang", "Yunxin Wang", "Yuzhen Zhang", "Mingliang Xu", "Changsheng Xu" ], "externalIds": { "ArXiv": "2112.02459", "DBLP": "journals/tnn/LvWWZXX24", "DOI": "10.1109/TNNLS.2023.3250485", "CorpusId": 244909345, "PubMed": "37028327" }, "url": "https://www.semanticscholar.org/paper/43acf2392f300b286c94a8e29c4a3413cfc546d2", "referenceCount": 62, "citationCount": 20, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "ETA Prediction with Graph Neural Networks in Google Maps", "abstract": "Travel-time prediction constitutes a task of high importance in transportation networks, with web mapping services like Google Maps regularly serving vast quantities of travel time queries from users and enterprises alike. Further, such a task requires accounting for complex spatiotemporal interactions (modelling both the topological properties of the road network and anticipating events---such as rush hours---that may occur in the future). Hence, it is an ideal target for graph representation learning at scale. Here we present a graph neural network estimator for estimated time of arrival (ETA) which we have deployed in production at Google Maps. While our main architecture consists of standard GNN building blocks, we further detail the usage of training schedule methods such as MetaGradients in order to make our model robust and production-ready. We also provide prescriptive studies: ablating on various architectural decisions and training regimes, and qualitative analyses on real-world situations where our model provides a competitive edge. Our GNN proved powerful when deployed, significantly reducing negative ETA outcomes in several regions compared to the previous production baseline (40+% in cities like Sydney).", "year": 2021, "venue": "International Conference on Information and Knowledge Management", "authors": [ "Austin Derrow-Pinion", "Jennifer She", "David Wong", "O. Lange", "Todd Hester", "L. Perez", "Marc Nunkesser", "Seongjae Lee", "Xueying Guo", "Brett Wiltshire", "P. Battaglia", "Vishal Gupta", "Ang Li", "Zhongwen Xu", "Alvaro Sanchez-Gonzalez", "Yujia Li", "Petar Velivckovi'c" ], "externalIds": { "DBLP": "conf/cikm/Derrow-PinionSW21", "ArXiv": "2108.11482", "DOI": "10.1145/3459637.3481916", "CorpusId": 237303762 }, "url": "https://www.semanticscholar.org/paper/5822490cf59df7f7ccb92b8901f244850b867a66", "referenceCount": 40, "citationCount": 185, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DenseTNT: End-to-end Trajectory Prediction from Dense Goal Sets", "abstract": "Due to the stochasticity of human behaviors, predicting the future trajectories of road agents is challenging for autonomous driving. Recently, goal-based multi-trajectory prediction methods are proved to be effective, where they first score over-sampled goal candidates and then select a final set from them. However, these methods usually involve goal predictions based on sparse pre-defined anchors and heuristic goal selection algorithms. In this work, we propose an anchor-free and end-to-end trajectory prediction model, named DenseTNT, that directly outputs a set of trajectories from dense goal candidates. In addition, we introduce an offline optimization-based technique to provide multi-future pseudo-labels for our final online model. Experiments show that DenseTNT achieves state-of-the-art performance, ranking 1st on the Argoverse motion forecasting benchmark and being the 1st place winner of the 2021 Waymo Open Dataset Motion Prediction Challenge.", "year": 2021, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Junru Gu", "Chen Sun", "Hang Zhao" ], "externalIds": { "DBLP": "journals/corr/abs-2108-09640", "ArXiv": "2108.09640", "DOI": "10.1109/ICCV48922.2021.01502", "CorpusId": 237267284 }, "url": "https://www.semanticscholar.org/paper/70ed76ec9bff4eec17e6e0d023f85dffe760b9c1", "referenceCount": 41, "citationCount": 325, "influentialCitationCount": 47, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "LOKI: Long Term and Key Intentions for Trajectory Prediction", "abstract": "Recent advances in trajectory prediction have shown that explicit reasoning about agents’ intent is important to accurately forecast their motion. However, the current research activities are not directly applicable to intelligent and safety critical systems. This is mainly because very few public datasets are available, and they only consider pedestrian-specific intents for a short temporal horizon from a restricted egocentric view. To this end, we propose LOKI (LOng term and Key Intentions), a novel large-scale dataset that is designed to tackle joint trajectory and intention prediction for heterogeneous traffic agents (pedestrians and vehicles) in an autonomous driving setting. The LOKI dataset is created to discover several factors that may affect intention, including i) agent’s own will, ii) social interactions, iii) environmental constraints, and iv) contextual information. We also propose a model that jointly performs trajectory and intention prediction, showing that recurrently reasoning about intention can assist with trajectory prediction. We show our method outperforms state-of-the-art trajectory prediction methods by upto 27% and also provide a baseline for frame-wise intention estimation. The dataset is available at https://usa.honda-ri.com/loki", "year": 2021, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Harshayu Girase", "Haiming Gang", "Srikanth Malla", "Jiachen Li", "Akira Kanehara", "K. Mangalam", "Chiho Choi" ], "externalIds": { "DBLP": "conf/iccv/GiraseGM0KMC21", "ArXiv": "2108.08236", "DOI": "10.1109/ICCV48922.2021.00966", "CorpusId": 237194689 }, "url": "https://www.semanticscholar.org/paper/2899f5a1a706028b44dd6fdeaf67367d2c3bd3c0", "referenceCount": 46, "citationCount": 58, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Pedestrian Path Prediction for Autonomous Driving at Un-Signalized Crosswalk Using W/CDM and MSFM", "abstract": "Pedestrian trajectory prediction is essential for collision avoidance in autonomous driving, which can help autonomous vehicles have a better understanding of traffic environment and perform tasks such as risk assessment in advance. In this paper, pedestrian path prediction at a time horizon of 2s for autonomous driving is systematically investigated using waiting/crossing decision model (W/CDM) and modified social force model (MSFM), and the possible conflict between pedestrians and straight-going vehicles at an un-signalized crosswalk is focused on. First of all, a W/CDM is efficiently developed to judge pedestrians’ waiting/crossing intentions when a straight-going vehicle is approaching. Then the humanoid micro-dynamic MSFM of pedestrians who have been judged to cross is characterized by taking into account the evasion with conflicting pedestrians, the collision avoidance with straight-going vehicles, and the reaction to crosswalk boundary. The influence of pedestrian heterogeneous characteristics is considered for the first time. Moreover, aerial video data of pedestrians and vehicles at an un-signalized crosswalk is collected and analyzed for model calibration. Maximum likelihood estimation (MLE) is proposed to calibrate the non-measurable parameters of the proposed models. Finally, the model validation is conducted with two cases by comparing with the existing methods. The result reveals that the integrated method (W/CDM-MSFM) outperforms the existing methods and accurately predicts the path of pedestrians, which can give us great confidence to use the current method to predict the path of pedestrian for autonomous driving with significant accuracy and highly improve pedestrian safety.", "year": 2021, "venue": "IEEE transactions on intelligent transportation systems (Print)", "authors": [ "Xi Zhang", "Hao Chen", "Wenyan Yang", "Wenqiang Jin", "Wangwang Zhu" ], "externalIds": { "DBLP": "journals/tits/ZhangCYJZ21", "MAG": "3011742000", "DOI": "10.1109/TITS.2020.2979231", "CorpusId": 216455190 }, "url": "https://www.semanticscholar.org/paper/d20393205a113cae771bd07a2ab1f270bf6dce8c", "referenceCount": 36, "citationCount": 30, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Predicting Pedestrian Crossing Intention With Feature Fusion and Spatio-Temporal Attention", "abstract": "Predicting vulnerableroad user behavior is an essential prerequisite for deploying Automated Driving Systems (ADS) in the real-world. Pedestrian crossing intention should be recognized in real-time, especially for urban driving. Recent works have shown the potential of using vision-based deep neural network models for this task. However, these models are not robust and certain issues still need to be resolved. First, the global spatio-temporal context that accounts for the interaction between the target pedestrian and the scene has not been properly utilized. Second, the optimal strategy for fusing different sensor data has not been thoroughly investigated. This work addresses the above limitations by introducing a novel neural network architecture to fuse inherently different spatio-temporal features for pedestrian crossing intention prediction. We fuse different phenomena such as sequences of RGB imagery, semantic segmentation masks, and ego-vehicle speed in an optimal way using attention mechanisms and a stack of recurrent neural networks. The optimal architecture was obtained through exhaustive ablation and comparison studies. Extensive comparative experiments on the JAAD and PIE pedestrian action prediction benchmarks demonstrate the effectiveness of the proposed method, where state-of-the-art performance was achieved. Our code is open-source and publicly available: https://github.com/OSU-Haolin/Pedestrian_Crossing_Intention_Prediction.", "year": 2021, "venue": "IEEE Transactions on Intelligent Vehicles", "authors": [ "Dongfang Yang", "Haolin Zhang", "Ekim Yurtsever", "K. Redmill", "U. Ozguner" ], "externalIds": { "ArXiv": "2104.05485", "DBLP": "journals/tiv/YangZYRO22", "DOI": "10.1109/TIV.2022.3162719", "CorpusId": 233210445 }, "url": "https://www.semanticscholar.org/paper/63376fea6cfe40d9291bf321bbf3b749779696ff", "referenceCount": 36, "citationCount": 80, "influentialCitationCount": 8, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "From Goals, Waypoints & Paths To Long Term Human Trajectory Forecasting", "abstract": "Human trajectory forecasting is an inherently multi-modal problem. Uncertainty in future trajectories stems from two sources: (a) sources that are known to the agent but unknown to the model, such as long term goals and (b) sources that are unknown to both the agent and the model, such as the intent of other agents and irreducible randomness in decisions. We propose to factorize this uncertainty into its epistemic and aleatoric sources. We model the epistemic uncertainty through multimodality in long term goals and the aleatoric uncertainty through multimodality in way-points and paths. To exemplify this dichotomy, we also propose a novel long term trajectory forecasting setting, with prediction horizons up to a minute, up to an order of magnitude longer than prior works. Finally, we present Y-net, a scene compliant trajectory forecasting network that exploits the proposed epistemic and aleatoric structure for diverse trajectory predictions across long prediction horizons. Y-net significantly improves previous state-of-the-art performance on both (a) The short prediction horizon setting on the Stanford Drone (31.7% in FDE) and ETH/UCY datasets (7.4% in FDE) and (b) The proposed long horizon setting on the re-purposed Stanford Drone and Intersection Drone datasets.", "year": 2020, "venue": "IEEE International Conference on Computer Vision", "authors": [ "K. Mangalam", "Yang An", "Harshayu Girase", "J. Malik" ], "externalIds": { "MAG": "3108490973", "DBLP": "journals/corr/abs-2012-01526", "ArXiv": "2012.01526", "DOI": "10.1109/ICCV48922.2021.01495", "CorpusId": 227254503 }, "url": "https://www.semanticscholar.org/paper/78060fa493f597378a286d74c9979deb2775ff25", "referenceCount": 51, "citationCount": 201, "influentialCitationCount": 40, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Goal-driven Long-Term Trajectory Prediction", "abstract": "The prediction of humans’ short-term trajectories has advanced significantly with the use of powerful sequential modeling and rich environment feature extraction. However, long-term prediction is still a major challenge for the current methods as the errors could accumulate along the way. Indeed, consistent and stable prediction far to the end of a trajectory inherently requires deeper analysis into the overall structure of that trajectory, which is related to the pedestrian’s intention on the destination of the journey. In this work, we propose to model a hypothetical process that determines pedestrians’ goals and the impact of such process on long-term future trajectories. We design Goal-driven Trajectory Prediction model - a dual-channel neural network that realizes such intuition. The two channels of the network take their dedicated roles and collaborate to generate future trajectories. Different than conventional goal-conditioned, planning-based methods, the model architecture is designed to generalize the patterns and work across different scenes with arbitrary geometrical and semantic structures. The model is shown to outperform the state-of-the-art in various settings, especially in large prediction horizons. This result is another evidence for the effectiveness of adaptive structured representation of visual and geometrical features in human behavior analysis.", "year": 2020, "venue": "IEEE Workshop/Winter Conference on Applications of Computer Vision", "authors": [ "Hung Tran", "Vuong Le", "T. Tran" ], "externalIds": { "DBLP": "conf/wacv/TranL021", "MAG": "3094965135", "ArXiv": "2011.02751", "DOI": "10.1109/WACV48630.2021.00084", "CorpusId": 226254317 }, "url": "https://www.semanticscholar.org/paper/53a18d25f7730f33df54777c6c8430b11565c8da", "referenceCount": 36, "citationCount": 34, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Socially Aware Crowd Navigation with Multimodal Pedestrian Trajectory Prediction for Autonomous Vehicles", "abstract": "Seamlessly operating an autonomous vehicles in a crowded pedestrian environment is a very challenging task. This is because human movement and interactions are very hard to predict in such environments. Recent work has demonstrated that reinforcement learning-based methods have the ability to learn to drive in crowds. However, these methods can have very poor performance due to inaccurate predictions of the pedestrians’ future state as human motion prediction has a large variance. To overcome this problem, we propose a new method, SARL-SGAN-KCE, that combines a deep socially aware attentive value network with a human multimodal trajectory prediction model to help identify the optimal driving policy. We also introduce a novel technique to extend the discrete action space with minimal additional computational requirements. The kinematic constraints of the vehicle are also considered to ensure smooth and safe trajectories. We evaluate our method against the state of art methods for crowd navigation and provide an ablation study to show that our method is safer and closer to human behaviour.", "year": 2020, "venue": "2020 IEEE 23rd International Conference on Intelligent Transportation Systems (ITSC)", "authors": [ "Kunming Li", "Mao Shan", "K. Narula", "Stewart Worrall", "E. Nebot" ], "externalIds": { "DBLP": "journals/corr/abs-2011-11191", "MAG": "3109425324", "ArXiv": "2011.11191", "DOI": "10.1109/ITSC45102.2020.9294304", "CorpusId": 227127129 }, "url": "https://www.semanticscholar.org/paper/66a42c4ea3693a746daa6a9cb01e012dd909eb5c", "referenceCount": 30, "citationCount": 27, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A3T-GCN: Attention Temporal Graph Convolutional Network for Traffic Forecasting", "abstract": "Accurate real-time traffic forecasting is a core technological problem against the implementation of the intelligent transportation system. However, it remains challenging considering the complex spatial and temporal dependencies among traffic flows. In the spatial dimension, due to the connectivity of the road network, the traffic flows between linked roads are closely related. In the temporal dimension, although there exists a tendency among adjacent time points, the importance of distant time points is not necessarily less than that of recent ones, since traffic flows are also affected by external factors. In this study, an attention temporal graph convolutional network (A3T-GCN) was proposed to simultaneously capture global temporal dynamics and spatial correlations in traffic flows. The A3T-GCN model learns the short-term trend by using the gated recurrent units and learns the spatial dependence based on the topology of the road network through the graph convolutional network. Moreover, the attention mechanism was introduced to adjust the importance of different time points and assemble global temporal information to improve prediction accuracy. Experimental results in real-world datasets demonstrate the effectiveness and robustness of the proposed A3T-GCN. We observe the improvements in RMSE of 2.51–46.15% and 2.45–49.32% over baselines for the SZ-taxi and Los-loop, respectively. Meanwhile, the Accuracies are 0.95–89.91% and 0.26–10.37% higher than the baselines for the SZ-taxi and Los-loop, respectively.", "year": 2020, "venue": "ISPRS Int. J. Geo Inf.", "authors": [ "Jiawei Zhu", "Yujiao Song", "Ling Zhao", "Haifeng Li" ], "externalIds": { "MAG": "3036262174", "DBLP": "journals/ijgi/BaiZSZHD021", "ArXiv": "2006.11583", "DOI": "10.3390/ijgi10070485", "CorpusId": 219966767 }, "url": "https://www.semanticscholar.org/paper/6e53d907c9927df48d12f6837112c1bd36b40186", "referenceCount": 43, "citationCount": 209, "influentialCitationCount": 10, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Multimodal Future Localization and Emergence Prediction for Objects in Egocentric View With a Reachability Prior", "abstract": "In this paper, we investigate the problem of anticipating future dynamics, particularly the future location of other vehicles and pedestrians, in the view of a moving vehicle. We approach two fundamental challenges: (1) the partial visibility due to the egocentric view with a single RGB camera and considerable field-of-view change due to the egomotion of the vehicle; (2) the multimodality of the distribution of future states. In contrast to many previous works, we do not assume structural knowledge from maps. We rather estimate a reachability prior for certain classes of objects from the semantic map of the present image and propagate it into the future using the planned egomotion. Experiments show that the reachability prior combined with multi-hypotheses learning improves multimodal prediction of the future location of tracked objects and, for the first time, the emergence of new objects. We also demonstrate promising zero-shot transfer to unseen datasets.", "year": 2020, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Osama Makansi", "Özgün Çiçek", "Kevin Buchicchio", "T. Brox" ], "externalIds": { "DBLP": "journals/corr/abs-2006-04700", "ArXiv": "2006.04700", "MAG": "3034398756", "DOI": "10.1109/cvpr42600.2020.00441", "CorpusId": 219531853 }, "url": "https://www.semanticscholar.org/paper/609407e2087f1ab4cfa4172ac070db91540d0118", "referenceCount": 68, "citationCount": 32, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Spatio-Temporal Graph Transformer Networks for Pedestrian Trajectory Prediction", "abstract": null, "year": 2020, "venue": "European Conference on Computer Vision", "authors": [ "Cunjun Yu", "Xiao Ma", "Jiawei Ren", "Haiyu Zhao", "Shuai Yi" ], "externalIds": { "MAG": "3097237405", "DBLP": "journals/corr/abs-2005-08514", "ArXiv": "2005.08514", "DOI": "10.1007/978-3-030-58610-2_30", "CorpusId": 218673505 }, "url": "https://www.semanticscholar.org/paper/a5d5aaf0de62f37b23251aed9c8266de4991c33c", "referenceCount": 54, "citationCount": 380, "influentialCitationCount": 57, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "TITAN: Future Forecast Using Action Priors", "abstract": "We consider the problem of predicting the future trajectory of scene agents from egocentric views obtained from a moving platform. This problem is important in a variety of domains, particularly for autonomous systems making reactive or strategic decisions in navigation. In an attempt to address this problem, we introduce TITAN (Trajectory Inference using Targeted Action priors Network), a new model that incorporates prior positions, actions, and context to forecast future trajectory of agents and future ego-motion. In the absence of an appropriate dataset for this task, we created the TITAN dataset that consists of 700 labeled video-clips (with odometry) captured from a moving vehicle on highly interactive urban traffic scenes in Tokyo. Our dataset includes 50 labels including vehicle states and actions, pedestrian age groups, and targeted pedestrian action attributes that are organized hierarchically corresponding to atomic, simple/complex-contextual, transportive, and communicative actions. To evaluate our model, we conducted extensive experiments on the TITAN dataset, revealing significant performance improvement against baselines and state-of-the-art algorithms. We also report promising results from our Agent Importance Mechanism (AIM), a module which provides insight into assessment of perceived risk by calculating the relative influence of each agent on the future ego-trajectory. The dataset is available at https://usa.honda-ri.com/titan", "year": 2020, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Srikanth Malla", "B. Dariush", "Chiho Choi" ], "externalIds": { "MAG": "3035172263", "DBLP": "conf/cvpr/MallaDC20", "ArXiv": "2003.13886", "DOI": "10.1109/cvpr42600.2020.01120", "CorpusId": 214727763 }, "url": "https://www.semanticscholar.org/paper/55303cc7773e5e0528b1dc579bcc348c0fc38569", "referenceCount": 63, "citationCount": 102, "influentialCitationCount": 12, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Social-STGCNN: A Social Spatio-Temporal Graph Convolutional Neural Network for Human Trajectory Prediction", "abstract": "Better machine understanding of pedestrian behaviors enables faster progress in modeling interactions between agents such as autonomous vehicles and humans. Pedestrian trajectories are not only influenced by the pedestrian itself but also by interaction with surrounding objects. Previous methods modeled these interactions by using a variety of aggregation methods that integrate different learned pedestrians states. We propose the Social Spatio-Temporal Graph Convolutional Neural Network (Social-STGCNN), which substitutes the need of aggregation methods by modeling the interactions as a graph. Our results show an improvement over the state of art by 20% on the Final Displacement Error (FDE) and an improvement on the Average Displacement Error (ADE) with 8.5 times less parameters and up to 48 times faster inference speed than previously reported methods. In addition, our model is data efficient, and exceeds previous state of the art on the ADE metric with only 20% of the training data. We propose a kernel function to embed the social interactions between pedestrians within the adjacency matrix. Through qualitative analysis, we show that our model inherited social behaviors that can be expected between pedestrians trajectories. Code is available at https://github.com/abduallahmohamed/Social-STGCNN.", "year": 2020, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Abduallah A. Mohamed", "Kun Qian", "Mohamed Elhoseiny", "C. Claudel" ], "externalIds": { "DBLP": "journals/corr/abs-2002-11927", "MAG": "3006895970", "ArXiv": "2002.11927", "DOI": "10.1109/cvpr42600.2020.01443", "CorpusId": 211532349 }, "url": "https://www.semanticscholar.org/paper/1b24169304c4e85f60beae5e198e61038cfb3095", "referenceCount": 30, "citationCount": 591, "influentialCitationCount": 112, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "PIE: A Large-Scale Dataset and Models for Pedestrian Intention Estimation and Trajectory Prediction", "abstract": "Pedestrian behavior anticipation is a key challenge in the design of assistive and autonomous driving systems suitable for urban environments. An intelligent system should be able to understand the intentions or underlying motives of pedestrians and to predict their forthcoming actions. To date, only a few public datasets were proposed for the purpose of studying pedestrian behavior prediction in the context of intelligent driving. To this end, we propose a novel large-scale dataset designed for pedestrian intention estimation (PIE). We conducted a large-scale human experiment to establish human reference data for pedestrian intention in traffic scenes. We propose models for estimating pedestrian crossing intention and predicting their future trajectory. Our intention estimation model achieves 79% accuracy and our trajectory prediction algorithm outperforms state-of-the-art by 26% on the proposed dataset. We further show that combining pedestrian intention with observed motion improves trajectory prediction. The dataset and models are available at http://data.nvision2.eecs.yorku.ca/PIE_dataset/.", "year": 2019, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Amir Rasouli", "Iuliia Kotseruba", "Toni Kunic", "John K. Tsotsos" ], "externalIds": { "MAG": "2991484432", "DBLP": "conf/iccv/RasouliKKT19", "DOI": "10.1109/ICCV.2019.00636", "CorpusId": 204959605 }, "url": "https://www.semanticscholar.org/paper/36194c76ce53be8e8fba71acbf8d235c7b39342b", "referenceCount": 47, "citationCount": 232, "influentialCitationCount": 61, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "STGAT: Modeling Spatial-Temporal Interactions for Human Trajectory Prediction", "abstract": "Human trajectory prediction is challenging and critical in various applications (e.g., autonomous vehicles and social robots). Because of the continuity and foresight of the pedestrian movements, the moving pedestrians in crowded spaces will consider both spatial and temporal interactions to avoid future collisions. However, most of the existing methods ignore the temporal correlations of interactions with other pedestrians involved in a scene. In this work, we propose a Spatial-Temporal Graph Attention network (STGAT), based on a sequence-to-sequence architecture to predict future trajectories of pedestrians. Besides the spatial interactions captured by the graph attention mechanism at each time-step, we adopt an extra LSTM to encode the temporal correlations of interactions. Through comparisons with state-of-the-art methods, our model achieves superior performance on two publicly available crowd datasets (ETH and UCY) and produces more \"socially\" plausible trajectories for pedestrians.", "year": 2019, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Yingfan Huang", "Huikun Bi", "Zhaoxin Li", "Tianlu Mao", "Zhaoqi Wang" ], "externalIds": { "MAG": "2991653934", "DBLP": "conf/iccv/HuangBLMW19", "DOI": "10.1109/ICCV.2019.00637", "CorpusId": 207986127 }, "url": "https://www.semanticscholar.org/paper/f2bd59f9ca31e941f63b900510109308d8ec9acf", "referenceCount": 44, "citationCount": 415, "influentialCitationCount": 86, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Multiple Object Forecasting: Predicting Future Object Locations in Diverse Environments", "abstract": "This paper introduces the problem of multiple object forecasting (MOF), in which the goal is to predict future bounding boxes of tracked objects. In contrast to existing works on object trajectory forecasting which primarily consider the problem from a birds-eye perspective, we formulate the problem from an object-level perspective and call for the prediction of full object bounding boxes, rather than trajectories alone. Towards solving this task, we introduce the Citywalks dataset, which consists of over 200k high-resolution video frames. Citywalks comprises of footage recorded in 21 cities from 10 European countries in a variety of weather conditions and over 3.5k unique pedestrian trajectories. For evaluation, we adapt existing trajectory forecasting methods for MOF and confirm cross-dataset generalizability on the MOT-17 dataset without fine-tuning. Finally, we present STED, a novel encoder-decoder architecture for MOF. STED combines visual and temporal features to model both object-motion and ego-motion, and outperforms existing approaches for MOF. Code & dataset link: https://github.com/olly-styles/Multiple-Object-Forecasting", "year": 2019, "venue": "IEEE Workshop/Winter Conference on Applications of Computer Vision", "authors": [ "Olly Styles", "T. Guha", "Victor Sanchez" ], "externalIds": { "DBLP": "journals/corr/abs-1909-11944", "MAG": "2976694788", "ArXiv": "1909.11944", "DOI": "10.1109/WACV45572.2020.9093446", "CorpusId": 202889302 }, "url": "https://www.semanticscholar.org/paper/3a209c030cc42380c644d288d130cf02c41031ca", "referenceCount": 49, "citationCount": 35, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Human motion trajectory prediction: a survey", "abstract": "With growing numbers of intelligent autonomous systems in human environments, the ability of such systems to perceive, understand, and anticipate human behavior becomes increasingly important. Specifically, predicting future positions of dynamic agents and planning considering such predictions are key tasks for self-driving vehicles, service robots, and advanced surveillance systems. This article provides a survey of human motion trajectory prediction. We review, analyze, and structure a large selection of work from different communities and propose a taxonomy that categorizes existing methods based on the motion modeling approach and level of contextual information used. We provide an overview of the existing datasets and performance metrics. We discuss limitations of the state of the art and outline directions for further research.", "year": 2019, "venue": "Int. J. Robotics Res.", "authors": [ "Andrey Rudenko", "Luigi Palmieri", "Michael Herman", "Kris M. Kitani", "D. Gavrila", "K. Arras" ], "externalIds": { "MAG": "3033920763", "DBLP": "journals/corr/abs-1905-06113", "ArXiv": "1905.06113", "DOI": "10.1177/0278364920917446", "CorpusId": 155093065 }, "url": "https://www.semanticscholar.org/paper/7b938bffcc946df1970caccbb5f473e494b3af0c", "referenceCount": 323, "citationCount": 647, "influentialCitationCount": 27, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "SR-LSTM: State Refinement for LSTM Towards Pedestrian Trajectory Prediction", "abstract": "In crowd scenarios, reliable trajectory prediction of pedestrians requires insightful understanding of their social behaviors. These behaviors have been well investigated by plenty of studies, while it is hard to be fully expressed by hand-craft rules. Recent studies based on LSTM networks have shown great ability to learn social behaviors. However, many of these methods rely on previous neighboring hidden states but ignore the important current intention of the neighbors. In order to address this issue, we propose a data-driven state refinement module for LSTM network (SR-LSTM), which activates the utilization of the current intention of neighbors, and jointly and iteratively refines the current states of all participants in the crowd through a message passing mechanism. To effectively extract the social effect of neighbors, we further introduce a social-aware information selection mechanism consisting of an element-wise motion gate and a pedestrian-wise attention to select useful message from neighboring pedestrians. Experimental results on two public datasets, i.e. ETH and UCY, demonstrate the effectiveness of our proposed SR-LSTM and we achieve state-of-the-art results.", "year": 2019, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Pu Zhang", "Wanli Ouyang", "Pengfei Zhang", "Jianru Xue", "Nanning Zheng" ], "externalIds": { "MAG": "2963353290", "ArXiv": "1903.02793", "DBLP": "conf/cvpr/ZhangO0XZ19", "DOI": "10.1109/CVPR.2019.01236", "CorpusId": 71148538 }, "url": "https://www.semanticscholar.org/paper/7623616066783e73ad29e84885e1b56dbcf39e97", "referenceCount": 55, "citationCount": 410, "influentialCitationCount": 56, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Group LSTM: Group Trajectory Prediction in Crowded Scenarios", "abstract": null, "year": 2018, "venue": "ECCV Workshops", "authors": [ "N. Bisagno", "Bo Zhang", "N. Conci" ], "externalIds": { "DBLP": "conf/eccv/BisagnoZC18", "MAG": "2911498565", "DOI": "10.1007/978-3-030-11015-4_18", "CorpusId": 59159139 }, "url": "https://www.semanticscholar.org/paper/f7ebabc07b29084a6b46dcd956edc8f283f93aed", "referenceCount": 42, "citationCount": 96, "influentialCitationCount": 6, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Social GAN: Socially Acceptable Trajectories with Generative Adversarial Networks", "abstract": "Understanding human motion behavior is critical for autonomous moving platforms (like self-driving cars and social robots) if they are to navigate human-centric environments. This is challenging because human motion is inherently multimodal: given a history of human motion paths, there are many socially plausible ways that people could move in the future. We tackle this problem by combining tools from sequence prediction and generative adversarial networks: a recurrent sequence-to-sequence model observes motion histories and predicts future behavior, using a novel pooling mechanism to aggregate information across people. We predict socially plausible futures by training adversarially against a recurrent discriminator, and encourage diverse predictions with a novel variety loss. Through experiments on several datasets we demonstrate that our approach outperforms prior work in terms of accuracy, variety, collision avoidance, and computational complexity.", "year": 2018, "venue": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition", "authors": [ "Agrim Gupta", "Justin Johnson", "Li Fei-Fei", "S. Savarese", "Alexandre Alahi" ], "externalIds": { "MAG": "2951873879", "ArXiv": "1803.10892", "DBLP": "conf/cvpr/GuptaJFSA18", "DOI": "10.1109/CVPR.2018.00240", "CorpusId": 4461350 }, "url": "https://www.semanticscholar.org/paper/49c076bbc21ab76720b610ab3840c15ce3dc4e6c", "referenceCount": 50, "citationCount": 1696, "influentialCitationCount": 426, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "SS-LSTM: A Hierarchical LSTM Model for Pedestrian Trajectory Prediction", "abstract": "Pedestrian trajectory prediction is an extremely challenging problem because of the crowdedness and clutter of the scenes. Previous deep learning LSTM-based approaches focus on the neighbourhood influence of pedestrians but ignore the scene layouts in pedestrian trajectory prediction. In this paper, a novel hierarchical LSTM-based network is proposed to consider both the influence of social neighbourhood and scene layouts. Our SS-LSTM, which stands for Social-Scene-LSTM, uses three different LSTMs to capture person, social and scene scale information. We also use a circular shape neighbourhood setting instead of the traditional rectangular shape neighbourhood in the social scale. We evaluate our proposed method against two baseline methods and a state-of-art technique on three public datasets. The results show that our method outperforms other methods and that using circular shape neighbourhood improves the prediction accuracy.", "year": 2018, "venue": "IEEE Workshop/Winter Conference on Applications of Computer Vision", "authors": [ "Hao Xue", "D. Huynh", "Mark Reynolds" ], "externalIds": { "MAG": "2801667201", "DBLP": "conf/wacv/XueHR18", "DOI": "10.1109/WACV.2018.00135", "CorpusId": 13663350 }, "url": "https://www.semanticscholar.org/paper/498c8abcef3b709be97167ea5870e85d0c586945", "referenceCount": 30, "citationCount": 325, "influentialCitationCount": 21, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Data-driven Model for Interaction-Aware Pedestrian Motion Prediction in Object Cluttered Environments", "abstract": "This paper reports on a data-driven, interaction-aware motion prediction approach for pedestrians in environments cluttered with static obstacles. When navigating in such workspaces shared with humans, robots need accurate motion predictions of the surrounding pedestrians. Human navigation behavior is mostly influenced by their surrounding pedestrians and by the static obstacles in their vicinity. In this paper we introduce a new model based on Long-Short Term Memory (LSTM) neural networks, which is able to learn human motion behavior from demonstrated data. To the best of our knowledge, this is the first approach using LSTMs, that incorporates both static obstacles and surrounding pedestrians for trajectory forecasting. As part of the model, we introduce a new way of encoding surrounding pedestrians based on a 1d-grid in polar angle space. We evaluate the benefit of interaction-aware motion prediction and the added value of incorporating static obstacles on both simulation and real-world datasets by comparing with state-of-the-art approaches. The results show, that our new approach outperforms the other approaches while being very computationally efficient and that taking into account static obstacles for motion predictions significantly improves the prediction accuracy, especially in cluttered environments.", "year": 2017, "venue": "IEEE International Conference on Robotics and Automation", "authors": [ "Mark Pfeiffer", "Giuseppe Paolo", "H. Sommer", "Juan I. Nieto", "R. Siegwart", "César Cadena" ], "externalIds": { "ArXiv": "1709.08528", "DBLP": "journals/corr/abs-1709-08528", "MAG": "2758542544", "DOI": "10.1109/ICRA.2018.8461157", "CorpusId": 3543712 }, "url": "https://www.semanticscholar.org/paper/6696013a9d2e5e3b3939cfba9aef6fecbb0ed8ef", "referenceCount": 34, "citationCount": 100, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Learning Social Etiquette: Human Trajectory Understanding In Crowded Scenes", "abstract": null, "year": 2016, "venue": "European Conference on Computer Vision", "authors": [ "Alexandre Robicquet", "Amir Sadeghian", "Alexandre Alahi", "S. Savarese" ], "externalIds": { "DBLP": "conf/eccv/RobicquetSAS16", "MAG": "2519586580", "DOI": "10.1007/978-3-319-46484-8_33", "CorpusId": 3150075 }, "url": "https://www.semanticscholar.org/paper/d927a7b282c8a5896a72fc1efac6ace7a0fc0b8a", "referenceCount": 47, "citationCount": 712, "influentialCitationCount": 112, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Social LSTM: Human Trajectory Prediction in Crowded Spaces", "abstract": "Pedestrians follow different trajectories to avoid obstacles and accommodate fellow pedestrians. Any autonomous vehicle navigating such a scene should be able to foresee the future positions of pedestrians and accordingly adjust its path to avoid collisions. This problem of trajectory prediction can be viewed as a sequence generation task, where we are interested in predicting the future trajectory of people based on their past positions. Following the recent success of Recurrent Neural Network (RNN) models for sequence prediction tasks, we propose an LSTM model which can learn general human movement and predict their future trajectories. This is in contrast to traditional approaches which use hand-crafted functions such as Social forces. We demonstrate the performance of our method on several public datasets. Our model outperforms state-of-the-art methods on some of these datasets. We also analyze the trajectories predicted by our model to demonstrate the motion behaviour learned by our model.", "year": 2016, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Alexandre Alahi", "Kratarth Goel", "Vignesh Ramanathan", "Alexandre Robicquet", "Li Fei-Fei", "S. Savarese" ], "externalIds": { "DBLP": "conf/cvpr/AlahiGRRLS16", "MAG": "2424778531", "DOI": "10.1109/CVPR.2016.110", "CorpusId": 9854676 }, "url": "https://www.semanticscholar.org/paper/e11a020f0d2942d09127daf1ce7e658d3bf67291", "referenceCount": 81, "citationCount": 2568, "influentialCitationCount": 525, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Trajectory analysis and prediction for improved pedestrian safety: Integrated framework and evaluations", "abstract": "This paper presents a monocular and purely vision based pedestrian trajectory tracking and prediction framework with integrated map-based hazard inference. In Advanced Driver Assistance systems research, a lot of effort has been put into pedestrian detection over the last decade, and several pedestrian detection systems are indeed showing impressive results. Considerably less effort has been put into processing the detections further. We present a tracking system for pedestrians, which based on detection bounding boxes tracks pedestrians and is able to predict their positions in the near future. The tracking system is combined with a module which, based on the car's GPS position acquires a map and uses the road information in the map to know where the car can drive. Then the system warns the driver about pedestrians at risk, by combining the information about hazardous areas for pedestrians with a probabilistic position prediction for all observed pedestrians.", "year": 2015, "venue": "2015 IEEE Intelligent Vehicles Symposium (IV)", "authors": [ "Andreas Møgelmose", "M. Trivedi", "T. Moeslund" ], "externalIds": { "DBLP": "conf/ivs/MogelmoseTM15", "MAG": "1584874746", "DOI": "10.1109/IVS.2015.7225707", "CorpusId": 13441612 }, "url": "https://www.semanticscholar.org/paper/8825c433910709a7ee1b2eedad4875e093f3f33c", "referenceCount": 27, "citationCount": 69, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Modelling shared space users via rule-based social force model", "abstract": null, "year": 2015, "venue": "", "authors": [ "Bani Anvari", "M. Bell", "A. Sivakumar", "W. Ochieng" ], "externalIds": { "MAG": "2087498670", "DOI": "10.1016/J.TRC.2014.10.012", "CorpusId": 3925979 }, "url": "https://www.semanticscholar.org/paper/f506d797e9f261ff61e62884270d09def3363de0", "referenceCount": 57, "citationCount": 131, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Engineering" ] }, { "title": "Modeling and Prediction of Pedestrian Behavior based on the Sub-goal Concept", "abstract": "This study addresses a method to predict pedestrians' long term behavior in order to enable a robot to provide them services. In order to do that we want to be able to predict their final goal and the trajectory they will follow to reach it. We attain this task borrowing from human science studies the concept of sub-goals, defined as points and landmarks of the environment towards which pedestrians walk or where they take directional choices before reaching the final destination. We retrieve the position of these sub-goals from the analysis of a large set of pedestrian trajectories in a shopping mall, and model their global behavior through transition probabilities between sub-goals. The method allows us to predict the future position of pedestrians on the basis of the observation of their trajectory up to the moment. Keywords-component; pedestrian models; sub-goal retrieval; behavior anticipation", "year": 2012, "venue": "Robotics: Science and Systems", "authors": [ "Tetsushi Ikeda", "Yoshihiro Chigodo", "Daniel J. Rea", "F. Zanlungo", "M. Shiomi", "T. Kanda" ], "externalIds": { "MAG": "2295055159", "DBLP": "conf/rss/IkedaCRZSK12", "DOI": "10.15607/RSS.2012.VIII.018", "CorpusId": 8468053 }, "url": "https://www.semanticscholar.org/paper/ab54453a571093b5f0b46ead0dc4f7d82b9fad36", "referenceCount": 26, "citationCount": 68, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "You'll never walk alone: Modeling social behavior for multi-target tracking", "abstract": "Object tracking typically relies on a dynamic model to predict the object's location from its past trajectory. In crowded scenarios a strong dynamic model is particularly important, because more accurate predictions allow for smaller search regions, which greatly simplifies data association. Traditional dynamic models predict the location for each target solely based on its own history, without taking into account the remaining scene objects. Collisions are resolved only when they happen. Such an approach ignores important aspects of human behavior: people are driven by their future destination, take into account their environment, anticipate collisions, and adjust their trajectories at an early stage in order to avoid them. In this work, we introduce a model of dynamic social behavior, inspired by models developed for crowd simulation. The model is trained with videos recorded from birds-eye view at busy locations, and applied as a motion model for multi-people tracking from a vehicle-mounted camera. Experiments on real sequences show that accounting for social interactions and scene knowledge improves tracking performance, especially during occlusions.", "year": 2009, "venue": "IEEE International Conference on Computer Vision", "authors": [ "S. Pellegrini", "Andreas Ess", "K. Schindler", "L. Gool" ], "externalIds": { "DBLP": "conf/iccv/PellegriniESG09", "MAG": "2532516272", "DOI": "10.1109/ICCV.2009.5459260", "CorpusId": 7065547 }, "url": "https://www.semanticscholar.org/paper/7d7a98a81715e2d3747071722b2deeed8937d122", "referenceCount": 25, "citationCount": 1456, "influentialCitationCount": 224, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Crowds by Example", "abstract": "We present an example‐based crowd simulation technique. Most crowd simulation techniques assume that the behavior exhibited by each person in the crowd can be defined by a restricted set of rules. This assumption limits the behavioral complexity of the simulated agents. By learning from real‐world examples, our autonomous agents display complex natural behaviors that are often missing in crowd simulations. Examples are created from tracked video segments of real pedestrian crowds. During a simulation, autonomous agents search for examples that closely match the situation that they are facing. Trajectories taken by real people in similar situations, are copied to the simulated agents, resulting in seemingly natural behaviors.", "year": 2007, "venue": "Computer graphics forum (Print)", "authors": [ "Alon Lerner", "Y. Chrysanthou", "Dani Lischinski" ], "externalIds": { "MAG": "1970206276", "DBLP": "journals/cgf/LernerCL07", "DOI": "10.1111/j.1467-8659.2007.01089.x", "CorpusId": 17374844 }, "url": "https://www.semanticscholar.org/paper/3921f459a9ee26827963abc4abf013b4cc9cbd32", "referenceCount": 32, "citationCount": 1054, "influentialCitationCount": 147, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Simulating dynamical features of escape panic", "abstract": null, "year": 2000, "venue": "Nature", "authors": [ "D. Helbing", "I. Farkas", "T. Vicsek" ], "externalIds": { "ArXiv": "cond-mat/0009448", "MAG": "1888172398", "DOI": "10.1038/35035023", "CorpusId": 310346, "PubMed": "11028994" }, "url": "https://www.semanticscholar.org/paper/e009942441ac200f7950077d099de47d41d81e2b", "referenceCount": 35, "citationCount": 4540, "influentialCitationCount": 336, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine", "Physics", "Computer Science" ] }, { "title": "Boltzmann-like and Boltzmann-Fokker-Planck equations as a foundation of behavioral models", "abstract": null, "year": 1993, "venue": "", "authors": [ "D. Helbing" ], "externalIds": { "ArXiv": "cond-mat/9805384", "MAG": "2071171834", "DOI": "10.1016/0378-4371(93)90034-2", "CorpusId": 8558414 }, "url": "https://www.semanticscholar.org/paper/69d3d136ccae439701cd075f28255fb8317aa0c6", "referenceCount": 42, "citationCount": 65, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Physics", "Mathematics" ] }, { "title": "Handbook for Proxemic Research", "abstract": null, "year": 1974, "venue": "", "authors": [ "E. Hall" ], "externalIds": { "MAG": "1986800603", "DOI": "10.1111/an.1995.36.2.40", "CorpusId": 143819580 }, "url": "https://www.semanticscholar.org/paper/60edad7c990c841d05e62b227fabe4c5bfcb6d26", "referenceCount": 0, "citationCount": 126, "influentialCitationCount": 7, "isOpenAccess": false, "fieldsOfStudy": [ "Sociology" ] }, { "title": "Pedestrian trajectory prediction based on deep con-22", "abstract": null, "year": 2020, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "volutional lstm network", "abstract": null, "year": null, "venue": "IEEE Transactions on Intelligent Transportation Systems", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "We integrate the pre-trained RNTransformer into trajectory prediction models, improving the accuracy of pedestrian trajectory prediction", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Pedes-trian trajectory prediction in pedestrian-vehicle mixed environments: A systematic review", "abstract": null, "year": null, "venue": "IEEE Transactions on Intelligent Transportation Systems", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "The RNTransformer shows significant improvements in prediction accuracy for various baseline models, validated through extensive experiments on a variety of datasets", "abstract": null, "year": null, "venue": "github", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2022. Social-implicit: Rethinking trajectory prediction evaluation and the effectiveness of implicit maximum likelihood estimation", "abstract": null, "year": null, "venue": "European Conference on Computer Vision", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2022. Scaling vision trans-formers", "abstract": null, "year": null, "venue": "Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Data-driven model discovery with Kolmogorov-Arnold networks": { "paper_title": "Data-driven model discovery with Kolmogorov-Arnold networks", "arxiv_id": "2409.15167v1", "keyword": "generative model", "authors": [ "Mohammadamin Moradi", "Shirin Panahi", "Erik M. Bollt", "Ying-Cheng Lai" ], "references": [ { "title": "Finding nonlinear system equations and complex network structures from data: A sparse optimization approach.", "abstract": "In applications of nonlinear and complex dynamical systems, a common situation is that the system can be measured, but its structure and the detailed rules of dynamical evolution are unknown. The inverse problem is to determine the system equations and structure from time series. The principle of exploiting sparse optimization to find the equations of dynamical systems from data was first articulated in 2011 by the ASU group. The basic idea is to expand the system equations into a power series or a Fourier series of a finite number of terms and then to determine the vector of the expansion coefficients based solely on data through sparse optimization. This Tutorial presents a brief review of the recent progress in this area. Issues discussed include discovering the equations of stationary or nonstationary chaotic systems to enable the prediction of critical transition and system collapse, inferring the full topology of complex oscillator networks and social networks hosting evolutionary game dynamics, and identifying partial differential equations for spatiotemporal dynamical systems. Situations where sparse optimization works or fails are pointed out. The relation with the traditional delay-coordinate embedding method is discussed, and the recent development of a model-free, data-driven prediction framework based on machine learning is mentioned.", "year": 2020, "venue": "Chaos", "authors": [ "Y. Lai" ], "externalIds": { "MAG": "3112054652", "ArXiv": "2012.04556", "DOI": "10.1063/5.0062042", "CorpusId": 227745326, "PubMed": "34470223" }, "url": "https://www.semanticscholar.org/paper/2b5477341805130c8be96405273bc1ad9452f806", "referenceCount": 148, "citationCount": 20, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Physics", "Medicine" ] }, { "title": "How Entropic Regression Beats the Outliers Problem in Nonlinear System Identification", "abstract": "In this work, we developed a nonlinear System Identification (SID) method that we called Entropic Regression. Our method adopts an information-theoretic measure for the data-driven discovery of the underlying dynamics. Our method shows robustness toward noise and outliers, and it outperforms many of the current state-of-the-art methods. Moreover, the method of Entropic Regression overcomes many of the major limitations of the current methods such as sloppy parameters, diverse scale, and SID in high-dimensional systems such as complex networks. The use of information-theoretic measures in entropic regression has unique advantages, due to the Asymptotic Equipartition Property of probability distributions, that outliers and other low-occurrence events are conveniently and intrinsically de-emphasized as not-typical, by definition. We provide a numerical comparison with the current state-of-the-art methods in sparse regression, and we apply the methods to different chaotic systems such as the Lorenz System, the Kuramoto-Sivashinsky equations, and the Double-Well Potential.", "year": 2019, "venue": "Chaos", "authors": [ "A. AlMomani", "Jie Sun", "E. Bollt" ], "externalIds": { "DBLP": "journals/corr/abs-1905-08061", "MAG": "2946149613", "ArXiv": "1905.08061", "DOI": "10.1063/1.5133386", "CorpusId": 53575323, "PubMed": "32013491" }, "url": "https://www.semanticscholar.org/paper/af625f009d639a4bc37feec0aecddc9bf9878531", "referenceCount": 76, "citationCount": 44, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics", "Physics", "Medicine" ] }, { "title": "Predicting tipping points in mutualistic networks through dimension reduction", "abstract": "Significance Complex systems in many fields, because of their intrinsic nonlinear dynamics, can exhibit a tipping point (point of no return) at which a total collapse of the system occurs. In ecosystems, environmental deterioration can lead to evolution toward a tipping point. To predict tipping point is an outstanding and extremely challenging problem. Using complex bipartite mutualistic networks, we articulate a dimension reduction strategy and establish its general applicability to predicting tipping points using a large number of empirical networks. Not only can our reduced model serve as a paradigm for understanding the tipping point dynamics in real world ecosystems for safeguarding pollinators, the principle can also be extended to other disciplines to address critical issues, such as resilience and sustainability. Complex networked systems ranging from ecosystems and the climate to economic, social, and infrastructure systems can exhibit a tipping point (a “point of no return”) at which a total collapse of the system occurs. To understand the dynamical mechanism of a tipping point and to predict its occurrence as a system parameter varies are of uttermost importance, tasks that are hindered by the often extremely high dimensionality of the underlying system. Using complex mutualistic networks in ecology as a prototype class of systems, we carry out a dimension reduction process to arrive at an effective 2D system with the two dynamical variables corresponding to the average pollinator and plant abundances. We show, using 59 empirical mutualistic networks extracted from real data, that our 2D model can accurately predict the occurrence of a tipping point, even in the presence of stochastic disturbances. We also find that, because of the lack of sufficient randomness in the structure of the real networks, weighted averaging is necessary in the dimension reduction process. Our reduced model can serve as a paradigm for understanding and predicting the tipping point dynamics in real world mutualistic networks for safeguarding pollinators, and the general principle can be extended to a broad range of disciplines to address the issues of resilience and sustainability.", "year": 2018, "venue": "Proceedings of the National Academy of Sciences of the United States of America", "authors": [ "Junjie Jiang", "Zi-Gang Huang", "T. Seager", "Wei Lin", "C. Grebogi", "A. Hastings", "Y. Lai" ], "externalIds": { "PubMedCentral": "5789925", "MAG": "2784207258", "DOI": "10.1073/pnas.1714958115", "CorpusId": 9120416, "PubMed": "29311325" }, "url": "https://www.semanticscholar.org/paper/fb8f84cf65aad5c83797dc81fe543af8f13f3064", "referenceCount": 40, "citationCount": 137, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Data-based reconstruction of complex geospatial networks, nodal positioning and detection of hidden nodes", "abstract": "Given a complex geospatial network with nodes distributed in a two-dimensional region of physical space, can the locations of the nodes be determined and their connection patterns be uncovered based solely on data? We consider the realistic situation where time series/signals can be collected from a single location. A key challenge is that the signals collected are necessarily time delayed, due to the varying physical distances from the nodes to the data collection centre. To meet this challenge, we develop a compressive-sensing-based approach enabling reconstruction of the full topology of the underlying geospatial network and more importantly, accurate estimate of the time delays. A standard triangularization algorithm can then be employed to find the physical locations of the nodes in the network. We further demonstrate successful detection of a hidden node (or a hidden source or threat), from which no signal can be obtained, through accurate detection of all its neighbouring nodes. As a geospatial network has the feature that a node tends to connect with geophysically nearby nodes, the localized region that contains the hidden node can be identified.", "year": 2016, "venue": "Royal Society Open Science", "authors": [ "Ri-Qi Su", "Wenxu Wang", "Xiao Wang", "Y. Lai" ], "externalIds": { "PubMedCentral": "4736942", "MAG": "2287231344", "DOI": "10.1098/rsos.150577", "CorpusId": 15363642, "PubMed": "26909187" }, "url": "https://www.semanticscholar.org/paper/e953a4720f0318ea95d5710a6f1fdf057af2415e", "referenceCount": 59, "citationCount": 25, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Discovering governing equations from data by sparse identification of nonlinear dynamical systems", "abstract": "Significance Understanding dynamic constraints and balances in nature has facilitated rapid development of knowledge and enabled technology, including aircraft, combustion engines, satellites, and electrical power. This work develops a novel framework to discover governing equations underlying a dynamical system simply from data measurements, leveraging advances in sparsity techniques and machine learning. The resulting models are parsimonious, balancing model complexity with descriptive ability while avoiding overfitting. There are many critical data-driven problems, such as understanding cognition from neural recordings, inferring climate patterns, determining stability of financial markets, predicting and suppressing the spread of disease, and controlling turbulence for greener transportation and energy. With abundant data and elusive laws, data-driven discovery of dynamics will continue to play an important role in these efforts. Extracting governing equations from data is a central challenge in many diverse areas of science and engineering. Data are abundant whereas models often remain elusive, as in climate science, neuroscience, ecology, finance, and epidemiology, to name only a few examples. In this work, we combine sparsity-promoting techniques and machine learning with nonlinear dynamical systems to discover governing equations from noisy measurement data. The only assumption about the structure of the model is that there are only a few important terms that govern the dynamics, so that the equations are sparse in the space of possible functions; this assumption holds for many physical systems in an appropriate basis. In particular, we use sparse regression to determine the fewest terms in the dynamic governing equations required to accurately represent the data. This results in parsimonious models that balance accuracy with model complexity to avoid overfitting. We demonstrate the algorithm on a wide range of problems, from simple canonical systems, including linear and nonlinear oscillators and the chaotic Lorenz system, to the fluid vortex shedding behind an obstacle. The fluid example illustrates the ability of this method to discover the underlying dynamics of a system that took experts in the community nearly 30 years to resolve. We also show that this method generalizes to parameterized systems and systems that are time-varying or have external forcing.", "year": 2015, "venue": "Proceedings of the National Academy of Sciences of the United States of America", "authors": [ "S. Brunton", "J. Proctor", "J. Kutz" ], "externalIds": { "MAG": "2239232218", "ArXiv": "1509.03580", "DOI": "10.1073/pnas.1517384113", "CorpusId": 1594001, "PubMed": "27035946" }, "url": "https://www.semanticscholar.org/paper/5d150cec2775f9bc863760448f14104cc8f42368", "referenceCount": 62, "citationCount": 3299, "influentialCitationCount": 296, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Medicine" ] }, { "title": "Identifying Chaotic FitzHugh-Nagumo Neurons Using Compressive Sensing", "abstract": "We develop a completely data-driven approach to reconstructing coupled neuronal networks that contain a small subset of chaotic neurons. Such chaotic elements can be the result of parameter shift in their individual dynamical systems and may lead to abnormal functions of the network. To accurately identify the chaotic neurons may thus be necessary and important, for example, applying appropriate controls to bring the network to a normal state. However, due to couplings among the nodes, the measured time series, even from non-chaotic neurons, would appear random, rendering inapplicable traditional nonlinear time-series analysis, such as the delay-coordinate embedding method, which yields information about the global dynamics of the entire network. Our method is based on compressive sensing. In particular, we demonstrate that identifying chaotic elements can be formulated as a general problem of reconstructing the nodal dynamical systems, network connections and all coupling functions, as well as their weights. The working and efficiency of the method are illustrated by using networks of non-identical FitzHugh–Nagumo neurons with randomly-distributed coupling weights.", "year": 2014, "venue": "Entropy", "authors": [ "Ri-Qi Su", "Y. Lai", "Xiao Wang" ], "externalIds": { "MAG": "2066394921", "DBLP": "journals/entropy/SuLW14", "DOI": "10.3390/e16073889", "CorpusId": 8916781 }, "url": "https://www.semanticscholar.org/paper/59aaa0d8c9a77391c9031b9ba67c64b19bde58fd", "referenceCount": 42, "citationCount": 19, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Reconstructing propagation networks with natural diversity and identifying hidden sources", "abstract": null, "year": 2014, "venue": "Nature Communications", "authors": [ "Zhesi Shen", "Wen-Xu Wang", "Ying Fan", "Z. Di", "Y. Lai" ], "externalIds": { "DBLP": "journals/corr/ShenWFDL14", "MAG": "2033150668", "PubMedCentral": "4104449", "ArXiv": "1407.4451", "DOI": "10.1038/ncomms5323", "CorpusId": 7815511, "PubMed": "25014310" }, "url": "https://www.semanticscholar.org/paper/a34f8d05e44966a5fa2a8bdcf553d3533f1851d8", "referenceCount": 76, "citationCount": 180, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Physics", "Medicine" ] }, { "title": "Uncovering hidden nodes in complex networks in the presence of noise", "abstract": null, "year": 2014, "venue": "Scientific Reports", "authors": [ "Ri-Qi Su", "Y. Lai", "Xiao Wang", "Y. Do" ], "externalIds": { "MAG": "1998320279", "PubMedCentral": "3909906", "DOI": "10.1038/srep03944", "CorpusId": 1251486, "PubMed": "24487720" }, "url": "https://www.semanticscholar.org/paper/d42cd6fc7eb2b93796bbc81c1da9c0361ecc502d", "referenceCount": 47, "citationCount": 43, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Detecting hidden nodes in complex networks from time series.", "abstract": "We develop a general method to detect hidden nodes in complex networks, using only time series from nodes that are accessible to external observation. Our method is based on compressive sensing and we formulate a general framework encompassing continuous- and discrete-time and the evolutionary-game type of dynamical systems as well. For concrete demonstration, we present an example of detecting hidden nodes from an experimental social network. Our paradigm for detecting hidden nodes is expected to find applications in a variety of fields where identifying hidden or black-boxed objects based on a limited amount of data is of interest.", "year": 2012, "venue": "Physical review. E, Statistical, nonlinear, and soft matter physics", "authors": [ "Ri-Qi Su", "Wenxu Wang", "Y. Lai" ], "externalIds": { "MAG": "2049347439", "DOI": "10.1103/PHYSREVE.85.065201", "CorpusId": 15633810, "PubMed": "23005153" }, "url": "https://www.semanticscholar.org/paper/a38969a1b662637913a916aa4b19ee53208bdbf4", "referenceCount": 21, "citationCount": 69, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Forecasting synchronizability of complex networks from data.", "abstract": "Given a complex networked system whose topology and dynamical equations are unknown, is it possible to foresee that a certain type of collective dynamics can potentially emerge in the system, provided that only time-series measurements are available? We address this question by focusing on a commonly studied type of collective dynamics, namely, synchronization in coupled dynamical networks. We demonstrate that, using the compressive-sensing paradigm, even when the coupling strength is not uniform so that the network is effectively weighted, the full topology, the coupling weights, and the nodal dynamical equations can all be uncovered accurately. The reconstruction accuracy and data requirement are systematically analyzed, in a process that includes a validation of the reconstructed eigenvalue spectrum of the underlying coupling matrix. A master stability function (MSF), the fundamental quantity determining the network synchronizability, can then be calculated based on the reconstructed dynamical system, the accuracy of which can be assessed as well. With the coupling matrix and MSF fully uncovered, the emergence of synchronous dynamics in the network can be anticipated and controlled. To forecast the collective dynamics on complex networks is an extremely challenging problem with significant applications in many disciplines, and our work represents an initial step in this important area.", "year": 2012, "venue": "Physical review. E, Statistical, nonlinear, and soft matter physics", "authors": [ "Ri-Qi Su", "X. Ni", "Wenxu Wang", "Y. Lai" ], "externalIds": { "MAG": "2076873021", "DOI": "10.1103/PHYSREVE.85.056220", "CorpusId": 15496005, "PubMed": "23004856" }, "url": "https://www.semanticscholar.org/paper/8b299096b4de4dada976f09b3205e8ac2143f1fc", "referenceCount": 58, "citationCount": 26, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Mathematics" ] }, { "title": "Network Reconstruction Based on Evolutionary-Game Data via Compressive Sensing", "abstract": "Evolutionary games model a common type of interactions in a variety of complex, networked, natural systems and social systems. Given such a system, uncovering the interacting structure of the underlying network is key to understanding its collective dynamics. Based on compressive sensing, we develop an efficient approach to reconstructing complex networks under game-based interactions from small amounts of data. The method is validated by using a variety of model networks and by conducting an actual experiment to reconstruct a social network. While most existing methods in this area assume oscillator networks that generate continuous-time data, our work successfully demonstrates that the extremely challenging problem of reverse engineering of complex networks can also be addressed even when the underlying dynamical processes are governed by realistic, evolutionary-game type of interactions in discrete time. DOI: 10.1103/PhysRevX.1.021021 Subject Areas: Complex Systems, Nonlinear Dynamics, Statistical Physics In many fields of science and engineering, one encounters the situation where the system of interest is composed of networked elements, called nodes, but the pattern of the node-to-node interaction or the network topology is totally unknown. It is desirable and of significant interest to uncover the network topology based on time series of certain observable quantities extracted from experiments or observations. Examples of potential applications abound: reconstruction of gene-regulatory networks based on expression data in systems biology [1‐4], extraction of various functional networks in the human brain from activation data in neuroscience [5‐8], and uncovering organizational networks based on discrete data or information in social science and homeland defense. In the past few years, the", "year": 2011, "venue": "Physical Review X", "authors": [ "Wenxu Wang", "Y. Lai", "C. Grebogi", "Jieping Ye" ], "externalIds": { "MAG": "1974430480", "DOI": "10.1103/PHYSREVX.1.021021", "CorpusId": 1082300 }, "url": "https://www.semanticscholar.org/paper/11894242565e0388eeb6cf05b59f5a71447fe082", "referenceCount": 50, "citationCount": 131, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Time-series–based prediction of complex oscillator networks via compressive sensing", "abstract": null, "year": 2011, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Predicting catastrophes in nonlinear dynamical systems by compressive sensing.", "abstract": "An extremely challenging problem of significant interest is to predict catastrophes in advance of their occurrences. We present a general approach to predicting catastrophes in nonlinear dynamical systems under the assumption that the system equations are completely unknown and only time series reflecting the evolution of the dynamical variables of the system are available. Our idea is to expand the vector field or map of the underlying system into a suitable function series and then to use the compressive-sensing technique to accurately estimate the various terms in the expansion. Examples using paradigmatic chaotic systems are provided to demonstrate our idea.", "year": 2011, "venue": "Physical Review Letters", "authors": [ "Wenxu Wang", "Rui Yang", "Y. Lai", "V. Kovanis", "C. Grebogi" ], "externalIds": { "ArXiv": "1105.0462", "MAG": "2137258853", "DOI": "10.1103/PhysRevLett.106.154101", "CorpusId": 17367392, "PubMed": "21568562" }, "url": "https://www.semanticscholar.org/paper/39ac9fe876e3eb5b1093f514a02a6ec5a7952694", "referenceCount": 10, "citationCount": 278, "influentialCitationCount": 10, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Physics", "Medicine" ] }, { "title": "On a Constructive Proof of Kolmogorov’s Superposition Theorem", "abstract": null, "year": 2009, "venue": "", "authors": [ "J. Braun", "M. Griebel" ], "externalIds": { "MAG": "2062368570", "DOI": "10.1007/S00365-009-9054-2", "CorpusId": 5164789 }, "url": "https://www.semanticscholar.org/paper/58a6b4967261ebf6fb6aa767629d90d54697cc63", "referenceCount": 26, "citationCount": 118, "influentialCitationCount": 8, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "An Introduction To Compressive Sampling", "abstract": "Conventional approaches to sampling signals or images follow Shannon's theorem: the sampling rate must be at least twice the maximum frequency present in the signal (Nyquist rate). In the field of data conversion, standard analog-to-digital converter (ADC) technology implements the usual quantized Shannon representation - the signal is uniformly sampled at or above the Nyquist rate. This article surveys the theory of compressive sampling, also known as compressed sensing or CS, a novel sensing/sampling paradigm that goes against the common wisdom in data acquisition. CS theory asserts that one can recover certain signals and images from far fewer samples or measurements than traditional methods use.", "year": 2008, "venue": "IEEE Signal Processing Magazine", "authors": [ "E. Candès", "M. Wakin" ], "externalIds": { "MAG": "2119667497", "DBLP": "journals/spm/CandesW08", "DOI": "10.1109/MSP.2007.914731", "CorpusId": 1704522 }, "url": "https://www.semanticscholar.org/paper/cc79e154ee8cf75e8d132f497b64f7c10c380bcd", "referenceCount": 32, "citationCount": 9496, "influentialCitationCount": 683, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Estimating system parameters from chaotic time series with synchronization optimized by a genetic algorithm.", "abstract": "A method is proposed to estimate system parameters by optimizing synchronization with a genetic algorithm. This method can effectively find the parameter values of a chaotic system with a rugged parameter landscape. Furthermore, even the parameters of a 200-dimensional coupled-map-lattice spatiotemporal chaotic system can be extracted from a scalar time series. Finally, a Chua's circuit experiment shows the capacity of this method to estimate multiple parameters of real systems.", "year": 2007, "venue": "Physical review. E, Statistical, nonlinear, and soft matter physics", "authors": [ "Chao Tao", "Yu Zhang", "Jack J. Jiang" ], "externalIds": { "MAG": "2006042644", "DOI": "10.1103/PHYSREVE.76.016209", "CorpusId": 7046835, "PubMed": "17677545" }, "url": "https://www.semanticscholar.org/paper/2bdaca36a051d92ea472c917e43f5d1490e6920c", "referenceCount": 0, "citationCount": 34, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Medicine" ] }, { "title": "Modeling and nonlinear parameter estimation with Kronecker product representation for coupled oscillators and spatiotemporal systems", "abstract": null, "year": 2007, "venue": "", "authors": [ "Chen Yao", "E. Bollt" ], "externalIds": { "MAG": "2069338608", "DOI": "10.1016/J.PHYSD.2006.12.006", "CorpusId": 13023533 }, "url": "https://www.semanticscholar.org/paper/5f5343d921dcc97ff89d9e6c72c899a064bc7a43", "referenceCount": 25, "citationCount": 46, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Stable signal recovery from incomplete and inaccurate measurements", "abstract": "Suppose we wish to recover a vector x0 ∈ ℝ𝓂 (e.g., a digital signal or image) from incomplete and contaminated observations y = A x0 + e; A is an 𝓃 × 𝓂 matrix with far fewer rows than columns (𝓃 ≪ 𝓂) and e is an error term. Is it possible to recover x0 accurately based on the data y?", "year": 2005, "venue": "", "authors": [ "E. Candès", "J. Romberg", "T. Tao" ], "externalIds": { "MAG": "2949311731", "ArXiv": "math/0503066", "DOI": "10.1002/CPA.20124", "CorpusId": 119159284 }, "url": "https://www.semanticscholar.org/paper/915df1a8dda45221204f3ecbf70b07d8b34d7ba8", "referenceCount": 27, "citationCount": 7393, "influentialCitationCount": 520, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Physics" ] }, { "title": "Reconstruction of shared nonlinear dynamics in a network.", "abstract": "We study a general physical network consisting of a collection of response systems with complex nonlinear dynamics, influenced by a common driver. The goal is to reconstruct dynamics, regular or chaotic, that are common to all of the response systems, working from simultaneous time series measured at the responses systems only. A fundamental theorem is stated concerning the reconstruction of the common driver. An algorithm is developed, based on the theorem, to carry out the reconstruction, and is demonstrated with several examples.", "year": 2004, "venue": "Physical Review Letters", "authors": [ "T. Sauer" ], "externalIds": { "MAG": "2016787894", "DOI": "10.1103/PHYSREVLETT.93.198701", "CorpusId": 25387151, "PubMed": "15600893" }, "url": "https://www.semanticscholar.org/paper/078299d2d9b22bfb95917673bec3ac2392445ef3", "referenceCount": 6, "citationCount": 35, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Robust uncertainty principles: exact signal reconstruction from highly incomplete frequency information", "abstract": "This paper considers the model problem of reconstructing an object from incomplete frequency samples. Consider a discrete-time signal f/spl isin/C/sup N/ and a randomly chosen set of frequencies /spl Omega/. Is it possible to reconstruct f from the partial knowledge of its Fourier coefficients on the set /spl Omega/? A typical result of this paper is as follows. Suppose that f is a superposition of |T| spikes f(t)=/spl sigma//sub /spl tau//spl isin/T/f(/spl tau/)/spl delta/(t-/spl tau/) obeying |T|/spl les/C/sub M//spl middot/(log N)/sup -1/ /spl middot/ |/spl Omega/| for some constant C/sub M/>0. We do not know the locations of the spikes nor their amplitudes. Then with probability at least 1-O(N/sup -M/), f can be reconstructed exactly as the solution to the /spl lscr//sub 1/ minimization problem. In short, exact recovery may be obtained by solving a convex optimization problem. We give numerical values for C/sub M/ which depend on the desired probability of success. Our result may be interpreted as a novel kind of nonlinear sampling theorem. In effect, it says that any signal made out of |T| spikes may be recovered by convex programming from almost every set of frequencies of size O(|T|/spl middot/logN). Moreover, this is nearly optimal in the sense that any method succeeding with probability 1-O(N/sup -M/) would in general require a number of frequency samples at least proportional to |T|/spl middot/logN. The methodology extends to a variety of other situations and higher dimensions. For example, we show how one can reconstruct a piecewise constant (one- or two-dimensional) object from incomplete frequency samples - provided that the number of jumps (discontinuities) obeys the condition above - by minimizing other convex functionals such as the total variation of f.", "year": 2004, "venue": "IEEE Transactions on Information Theory", "authors": [ "E. Candès", "J. Romberg", "T. Tao" ], "externalIds": { "MAG": "2145096794", "ArXiv": "math/0409186", "DBLP": "journals/tit/CandesRT06", "DOI": "10.1109/TIT.2005.862083", "CorpusId": 7033413 }, "url": "https://www.semanticscholar.org/paper/c1180048929ed490ab25e0e612f8f7c3d7196450", "referenceCount": 37, "citationCount": 15576, "influentialCitationCount": 1092, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Nonlinear state estimation, indistinguishable states, and the extended Kalman filter", "abstract": null, "year": 2003, "venue": "", "authors": [ "K. Judd" ], "externalIds": { "MAG": "2170561585", "DOI": "10.1016/S0167-2789(03)00180-5", "CorpusId": 16742099 }, "url": "https://www.semanticscholar.org/paper/0865d3e3c47c960f31bb91a521e910c55238adf3", "referenceCount": 23, "citationCount": 53, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Reconstructions and predictions of nonlinear dynamical systems: a hierarchical Bayesian approach", "abstract": "An attempt is made to reconstruct model nonlinear dynamical systems from scalar time series data via a hierarchical Bayesian framework. Reconstruction is performed by fitting given training data with a parameterized family of functions without overfitting. The reconstructed model dynamical systems are compared with respect to (approximated) model marginal likelihood, which is a natural Bayesian information criterion. The best model is selected with respect to this criterion and is utilized to make predictions. The results are applied to two problems: (i) chaotic time series prediction and (ii) building air-conditioning load prediction. The former is a very good class of problems for checking the abilities of prediction algorithms for at least two reasons. First, since no linear dynamical systems can admit chaotic behavior, an algorithm must capture the nonlinearities behind the time series. Second, chaotic dynamical systems are sensitive to initial conditions. More precisely, the error grows exponentially with respect to time so that crispness of capturing nonlinearities is also important. Experimental results appear to indicate that the proposed scheme can capture difficult nonlinearities behind the chaotic time series data. The latter class of problems (air conditioning load prediction) is motivated by a great amount of demand for reducing CO/sub 2/ emissions associated with electric power generation. The authors won a prediction competition using the proposed algorithm; therefore, it appears to be reasonably sound.", "year": 2001, "venue": "IEEE Transactions on Signal Processing", "authors": [ "Takashi Matsumoto", "Y. Nakajima", "M. Saito", "J. Sugi", "H. Hamagishi" ], "externalIds": { "MAG": "2149140400", "DBLP": "journals/tsp/MatsumotoNSSH01", "DOI": "10.1109/78.942641", "CorpusId": 9760930 }, "url": "https://www.semanticscholar.org/paper/e0cc559401c768a1439d3d331e8f7ef122f413e1", "referenceCount": 25, "citationCount": 55, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Controlling Chaos and the Inverse Frobenius-Perron Problem: Global stabilization of Arbitrary Invariant Measures", "abstract": "The inverse Frobenius–Perron problem (IFPP) is a global open-loop strategy to control chaos. The goal of our IFPP is to design a dynamical system in ℜn which is: (1) nearby the original dynamical system, and (2) has a desired invariant density. We reduce the question of stabilizing an arbitrary invariant measure, to the question of a hyperplane intersecting a unit hyperbox; several controllability theorems follow. We present a generalization of Baker maps with an arbitrary grammar and whose FP operator is the required stochastic matrix.", "year": 2000, "venue": "International Journal of Bifurcation and Chaos in Applied Sciences and Engineering", "authors": [ "E. Bollt" ], "externalIds": { "MAG": "2080457649", "DBLP": "journals/ijbc/Bollt00", "DOI": "10.1142/S0218127400000736", "CorpusId": 15146813 }, "url": "https://www.semanticscholar.org/paper/f3801e373913bb3fabd7bbf5456c0889e0299301", "referenceCount": 42, "citationCount": 49, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Coping with nonstationarity by overembedding", "abstract": "We discuss how nonstationarity in observed time series data due to pronounced fluctuations of system parameters can be resolved by making use of embedding techniques for scalar data. If a D-dimensional deterministic system is driven by P slowly time dependent parameters, a (D+P)-dimensional manifold has to be reconstructed from the scalar time series, which is done by an m>2(D+P)-dimensional time delay embedding. We show that in this space essential aspects of determinism are restored. We demonstrate the validity of the idea heuristically, for numerical examples and for human speech data.", "year": 2000, "venue": "Physical Review Letters", "authors": [ "Hegger", "Kantz", "Matassini", "Schreiber" ], "externalIds": { "MAG": "2060071521", "DOI": "10.1103/PHYSREVLETT.84.4092", "CorpusId": 11694190, "PubMed": "10990618" }, "url": "https://www.semanticscholar.org/paper/fcef193cd23e94024d71cb1cd96f9c9655d0fb32", "referenceCount": 6, "citationCount": 77, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine", "Mathematics" ] }, { "title": "Practical implementation of nonlinear time series methods: The TISEAN package.", "abstract": "We describe the implementation of methods of nonlinear time series analysis which are based on the paradigm of deterministic chaos. A variety of algorithms for data representation, prediction, noise reduction, dimension and Lyapunov estimation, and nonlinearity testing are discussed with particular emphasis on issues of implementation and choice of parameters. Computer programs that implement the resulting strategies are publicly available as the TISEAN software package. The use of each algorithm will be illustrated with a typical application. As to the theoretical background, we will essentially give pointers to the literature. (c) 1999 American Institute of Physics.", "year": 1998, "venue": "Chaos", "authors": [ "R. Hegger", "H. Kantz", "T. Schreiber" ], "externalIds": { "MAG": "1979957032", "ArXiv": "chao-dyn/9810005", "DOI": "10.1063/1.166424", "CorpusId": 11654906, "PubMed": "12779839" }, "url": "https://www.semanticscholar.org/paper/24947c5d56a85995642d56e846a417e63808bbaa", "referenceCount": 92, "citationCount": 1564, "influentialCitationCount": 121, "isOpenAccess": false, "fieldsOfStudy": [ "Physics", "Computer Science", "Medicine" ] }, { "title": "Forecasting chaotic time series with genetic algorithms", "abstract": null, "year": 1997, "venue": "", "authors": [ "George G. Szpiro" ], "externalIds": { "MAG": "1996619812", "DOI": "10.1103/PHYSREVE.55.2557", "CorpusId": 121024626 }, "url": "https://www.semanticscholar.org/paper/f16b19fbc1a259bea41234460f13bafa3640405e", "referenceCount": 23, "citationCount": 148, "influentialCitationCount": 11, "isOpenAccess": false, "fieldsOfStudy": [ "Physics" ] }, { "title": "Detecting dynamical interdependence and generalized synchrony through mutual prediction in a neural ensemble.", "abstract": "A method to characterize dynamical interdependence among nonlinear systems is derived based on mutual nonlinear prediction. Systems with nonlinear correlation will show mutual nonlinear prediction when standard analysis with linear cross correlation might fail. Mutual nonlinear prediction also provides information on the directionality of the coupling between systems. Furthermore, the existence of bidirectional mutual nonlinear prediction in unidirectionally coupled systems implies generalized synchrony. Numerical examples studied include three classes of unidirectionally coupled systems: systems with identical parameters, nonidentical parameters, and stochastic driving of a nonlinear system. This technique is then applied to the activity of motoneurons within a spinal cord motoneuron pool. The interrelationships examined include single neuron unit firing, the total number of neurons discharging at one time as measured by the integrated monosynaptic reflex, and intracellular measurements of integrated excitatory postsynaptic potentials ~EPSP’s !. Dynamical interdependence, perhaps generalized synchrony, was identified in this neuronal network between simultaneous single unit firings, between units and the population, and between units and intracellular EPSP’s.", "year": 1996, "venue": "Physical review. E, Statistical physics, plasmas, fluids, and related interdisciplinary topics", "authors": [ "S. Schiff", "P. So", "Taeun Chang", "R. Burke", "T. Sauer" ], "externalIds": { "MAG": "2109413227", "DOI": "10.1103/PHYSREVE.54.6708", "CorpusId": 19942403, "PubMed": "9965897" }, "url": "https://www.semanticscholar.org/paper/93de266cd06eac74800061fcac086d9d4fc2dd5a", "referenceCount": 29, "citationCount": 383, "influentialCitationCount": 24, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Medicine" ] }, { "title": "Estimating model parameters from time series by autosynchronization.", "abstract": "s The synchronization of (unidirectionally) coupled dy namical systems and its possible applications in commu cation schemes is currently a field of great interest ( [1–7] and references cited therein). In this Letter w discuss a special feature of synchronizing systems ca autosynchronizationwhere a system with slowly varying parameters converges from a state of nonsynchroniza to synchronization. This adaption process is governed additional ordinary differential equations (ODEs) for th parameters that are controlled by the synchronization er A systematic way for deriving the parameter controllin loop is presented and illustrated by numerical examp For the sake of brevity we consider unidirectionally co pled systems only, although the main ideas can in princ also be applied to mutually coupled synchronizing syste In order to indicate a possible application in nonlinear tim series analysis [8] and system identification autosynch nization is discussed and used in the following for es mating the parameters of a given model from a scalar ti series [9–14]. Let", "year": 1996, "venue": "Physical Review Letters", "authors": [ "U. Parlitz" ], "externalIds": { "MAG": "1999885564", "DOI": "10.1103/PHYSREVLETT.76.1232", "CorpusId": 40240371, "PubMed": "10061669" }, "url": "https://www.semanticscholar.org/paper/8e22e947b0e8edff929ea0ffda45aa7f27055d68", "referenceCount": 4, "citationCount": 275, "influentialCitationCount": 7, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Forecasting nonlinear economic time series: A simple test to accompany the nearest neighbor approach", "abstract": null, "year": 1995, "venue": "", "authors": [ "B. Finkenstädt", "P. Kuhbier" ], "externalIds": { "MAG": "2065341710", "DOI": "10.1007/BF01205437", "CorpusId": 115131033 }, "url": "https://www.semanticscholar.org/paper/7abafa436c5f827a30feea3192b1d573ab2cc177", "referenceCount": 20, "citationCount": 27, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Nonlinear Dynamics and Population Disappearances", "abstract": "Local extinctions generally have been attributed to demographic stochasticity (requiring very small population sizes) or to extrinsic effects such as environmental stochasticity. Itis shown here that, without any of these factors, the presence of chaotic transients in very simple and plausible cosystem models can produce sudden and unforeseeable disappearances of populations (fig. 1). The mathematical background and the ecological context for these occurrences are discussed. Figure 1 is a time series for the top predator in a simple three-species food chain; specifically itis a solution of the equations", "year": 1994, "venue": "American Naturalist", "authors": [ "K. McCann", "P. Yodzis" ], "externalIds": { "MAG": "2018360205", "DOI": "10.1086/285714", "CorpusId": 84214202 }, "url": "https://www.semanticscholar.org/paper/85a78b2758efb72333c0ec7d89759e964245ad32", "referenceCount": 14, "citationCount": 118, "influentialCitationCount": 7, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Nonlinear forecasting for the classification of natural time series", "abstract": "There is a growing trend in the natural sciences to view time series as products of dynamical systems. This viewpoint has proven to be particularly useful in stimulating debate and insight into the nature of the underlying generating mechanisms. Here I review some of the issues concerning the use of forecasting in the detection of nonlinearities and possible chaos, particularly with regard to stochastic chaos. Moreover, it is shown how recent attempts to measure meaningful Lyapunov exponents for ecological data are fundamentally flawed, and that when observational noise is convolved with process noise, computing Lyapunov exponents for the real system will be difficult. Such problems pave the way for more operational definitions of dynamic complexity (cf. Yao & Tong, this volume) . Aside from its use in the characterization of chaos, nonlinear forecasting can be used more broadly in pragmatic classification problems. Here I review a recent example of nonlinear forecasting as it is applied to classify human heart rhythms. In particular, it is shown how forecast nonlinearity can be a good discriminator of the physiological effects of age, and how prediction-decay may discriminate heartdisease. In so doing, I introduce a method for characterizing nonlinearity using ‘S-maps’ and a method for analysing multiple short time series with composite attractors.", "year": 1994, "venue": "Philosophical Transactions of the Royal Society of London Series A Physical and Engineering Sciences", "authors": [ "G. Sugihara" ], "externalIds": { "MAG": "2053059880", "DOI": "10.1098/rsta.1994.0106", "CorpusId": 121604829 }, "url": "https://www.semanticscholar.org/paper/3d370dc4a0baa41e5ad4694da56a24a74f103e09", "referenceCount": 44, "citationCount": 307, "influentialCitationCount": 32, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Reconstruction of dynamical systems from interspike intervals.", "abstract": "Attractor reconstruction from interspike interval (ISI) data is described, in rough analogy with Taken's theorem for attractor reconstruction from time series. Assuming a generic integrate-and-fire model coupling the dynamical system to the spike train, there is a one-to-one correspondence between the system states and interspike interval vectors of sufficiently large dimension. The correspondence has an important implication: interspike intervals can be forecast from past history. We show that deterministically driven ISI series can be distinguished from stochastically driven ISI series on the basis of prediction error.", "year": 1994, "venue": "Physical Review Letters", "authors": [ "T. Sauer" ], "externalIds": { "MAG": "2051839184", "DOI": "10.1103/PHYSREVLETT.72.3811", "CorpusId": 41414335, "PubMed": "10056303" }, "url": "https://www.semanticscholar.org/paper/74ef8e7597032cd22ef9729036e3ce24481a038b", "referenceCount": 0, "citationCount": 239, "influentialCitationCount": 12, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Forecasting a chaotic time series using an improved metric for embedding space", "abstract": null, "year": 1993, "venue": "", "authors": [ "D. Murray" ], "externalIds": { "MAG": "2100047653", "DOI": "10.1016/0167-2789(93)90127-M", "CorpusId": 123384767 }, "url": "https://www.semanticscholar.org/paper/c0d177c2fbeb3a69b46f249a4b9d2e3babb606fd", "referenceCount": 23, "citationCount": 46, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "NONLINEAR FORECASTING OF SPIKE TRAINS FROM SENSORY NEURONS", "abstract": "The sequence of firing times of a neuron can be viewed as a point process. The problem of spike train analysis is to infer the underlying neural dynamics from this point process when, for example, one does not have access to a state variable such as intracellular voltage. Traditional analyses of spike trains have focussed to a large extent on fitting the parameters of a model stochastic point process to the data, such as the intensity of a homogeneous Poisson point process. This paper shows how methods from nonlinear time series analysis can be used to gain knowledge about correlations between the spiking events recorded from periodically driven sensory neurons. Results on nonlinear forecastability of these spike trains are compared to those on data sets derived from the original data set and satisfying an appropriately chosen null hypothesis. While no predictability, linear or nonlinear, is revealed by our analysis of the raw data using local linear predictors, it appears that there is some predictability in the successive phases (rather than intervals) at which the neurons fire.", "year": 1993, "venue": "", "authors": [ "A. Longtin" ], "externalIds": { "MAG": "2027580899", "DOI": "10.1142/S0218127493000556", "CorpusId": 121282185 }, "url": "https://www.semanticscholar.org/paper/5abd2626428fd2c759316d3dd5af2ab39e563c00", "referenceCount": 0, "citationCount": 43, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Nonlinear prediction as a way of distinguishing chaos from random fractal sequences", "abstract": null, "year": 1992, "venue": "Nature", "authors": [ "A. Tsonis", "J. Elsner" ], "externalIds": { "MAG": "2050214465", "DOI": "10.1038/358217A0", "CorpusId": 4237145 }, "url": "https://www.semanticscholar.org/paper/49a0ab125b87b4a57b2ca5eb0adbcf4b9d0f2341", "referenceCount": 16, "citationCount": 199, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Fitting ordinary differential equations to chaotic data.", "abstract": "We address the problem of estimating parameters in systems of ordinary differential equations which give rise to chaotic time series. We claim that the problem is naturally tackled by boundary value problem methods. The power of this approach is demonstrated by various examples with ideal as well as noisy data. In particular, Lyapunov exponents can be computed accurately from time series much shorter than those required by previous methods.", "year": 1992, "venue": "Physical Review A. Atomic, Molecular, and Optical Physics", "authors": [ "Ellen Baake", "M. Baake", "Hans Bock", "Keith Briggs" ], "externalIds": { "MAG": "2081068198", "DOI": "10.1103/PHYSREVA.45.5524", "CorpusId": 42049246, "PubMed": "9907650" }, "url": "https://www.semanticscholar.org/paper/eb1ffadadc70768adcc76561b44da6686780b766", "referenceCount": 0, "citationCount": 182, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Physics", "Medicine" ] }, { "title": "Reconstruction of standard and inverse vector fields equivalent to a Rössler system.", "abstract": null, "year": 1991, "venue": "Physical Review A. Atomic, Molecular, and Optical Physics", "authors": [ "Gouesbet" ], "externalIds": { "MAG": "1992074539", "DOI": "10.1103/PHYSREVA.44.6264", "CorpusId": 39855096, "PubMed": "9905758" }, "url": "https://www.semanticscholar.org/paper/8e2f6f6def654532bcae57a7969b95c5bc6eb19a", "referenceCount": 0, "citationCount": 33, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Physics", "Medicine" ] }, { "title": "NONLINEAR TIME SEQUENCE ANALYSIS", "abstract": "We review several aspects of the analysis of time sequences, and concentrate on recent methods using concepts from the theory of nonlinear dynamical systems. In particular, we discuss problems in estimating attractor dimensions, entropies, and Lyapunov exponents, in reducing noise and in forecasting. For completeness and since we want to stress connections to more traditional (mostly spectrum-based) methods, we also give a short review of spectral methods.", "year": 1991, "venue": "", "authors": [ "P. Grassberger", "T. Schreiber", "Carsten Schaffrath" ], "externalIds": { "MAG": "2007979658", "DOI": "10.1142/S0218127491000403", "CorpusId": 119519524 }, "url": "https://www.semanticscholar.org/paper/79cad307b03f6ec033ca1e8b51d21bff1a5137d3", "referenceCount": 0, "citationCount": 497, "influentialCitationCount": 10, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Distinguishing error from chaos in ecological time series.", "abstract": "Over the years, there has been much discussion about the relative importance of environmental and biological factors in regulating natural populations. Often it is thought that environmental factors are associated with stochastic fluctuations in population density, and biological ones with deterministic regulation. We revisit these ideas in the light of recent work on chaos and nonlinear systems. We show that completely deterministic regulatory factors can lead to apparently random fluctuations in population density, and we then develop a new method (that can be applied to limited data sets) to make practical distinctions between apparently noisy dynamics produced by low-dimensional chaos and population variation that in fact derives from random (high-dimensional) noise, such as environmental stochasticity or sampling error. To show its practical use, the method is first applied to models where the dynamics are known. We then apply the method to several sets of real data, including newly analysed data on the incidence of measles in the United Kingdom. Here the additional problems of secular trends and spatial effects are explored. In particular, we find that on a city-by-city scale measles exhibits low-dimensional chaos (as has previously been found for measles in New York City), whereas on a larger, country-wide scale the dynamics appear as a noisy two-year cycle. In addition to shedding light on the basic dynamics of some nonlinear biological systems, this work dramatizes how the scale on which data is collected and analysed can affect the conclusions drawn.", "year": 1990, "venue": "Philosophical transactions of the Royal Society of London. Series B, Biological sciences", "authors": [ "G. Sugihara", "B. Grenfell", "R. May" ], "externalIds": { "MAG": "2110798438", "DOI": "10.1098/RSTB.1990.0195", "CorpusId": 23396490, "PubMed": "1981621" }, "url": "https://www.semanticscholar.org/paper/ef32430600a6eb29c48e94677b10978e91d0c4b5", "referenceCount": 26, "citationCount": 200, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Biology", "Medicine" ] }, { "title": "On forecasting the sunspot numbers", "abstract": null, "year": 1990, "venue": "", "authors": [ "J. Kurths", "A. Ruzmaikin" ], "externalIds": { "MAG": "1969271825", "DOI": "10.1007/BF00153060", "CorpusId": 120202227 }, "url": "https://www.semanticscholar.org/paper/2fa5f7108978f5b97df3095c1113ca7794dc0a25", "referenceCount": 13, "citationCount": 49, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Physics" ] }, { "title": "Nonlinear prediction of chaotic time series", "abstract": null, "year": 1989, "venue": "", "authors": [ "M. Casdagli" ], "externalIds": { "MAG": "2066366061", "DOI": "10.1016/0167-2789(89)90074-2", "CorpusId": 122236599 }, "url": "https://www.semanticscholar.org/paper/be946457d3f880d9ec836aee3d0d231ffa3bcc9a", "referenceCount": 36, "citationCount": 1408, "influentialCitationCount": 70, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Predicting chaotic time series.", "abstract": "We present a forecasting technique for chaotic data. After embedding a time series in a state space using delay coordinates, we ''learn'' the induced nonlinear mapping using local approximation. This allows us to make short-term predictions of the future behavior of a time series, using information based only on past values. We present an error estimate for this technique, and demonstrate its effectiveness by applying it to several examples, including data from the Mackey-Glass delay differential equation, Rayleigh-Benard convection, and Taylor-Couette flow.", "year": 1987, "venue": "Physical Review Letters", "authors": [ "J. Farmer", "J. J. SIDorowich" ], "externalIds": { "MAG": "2034099719", "DOI": "10.1103/PHYSREVLETT.59.845", "CorpusId": 44464211, "PubMed": "10035887" }, "url": "https://www.semanticscholar.org/paper/38918f38a875bc2ede6e6865552bcf736c67dc95", "referenceCount": 0, "citationCount": 1858, "influentialCitationCount": 74, "isOpenAccess": true, "fieldsOfStudy": [ "Physics", "Medicine" ] }, { "title": "Global dynamical behavior of the optical field in a ring cavity", "abstract": "A new class of instabilities for a plane-wave intracavity field in an optical ring resonator is identified. Dynamical systems techniques are explained and applied to the map. A bifurcation diagram is given that organizes the important information, and global pictures are developed that describe the evolution of the attractor and its basin boundary. Anomalous behavior observed in earlier numerical studies is explained.", "year": 1985, "venue": "", "authors": [ "S. Hammel", "C. Jones", "J. Moloney" ], "externalIds": { "MAG": "1996767607", "DOI": "10.1364/JOSAB.2.000552", "CorpusId": 120204901 }, "url": "https://www.semanticscholar.org/paper/37c6eee088c6273ae11f600ef7fb2cbac8eba981", "referenceCount": 14, "citationCount": 118, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Physics" ] }, { "title": "Mathematical analysis : a straightforward approach", "abstract": "From book description: \nFor the second edition of this very successful text, Professor Binmore has written two chapters on analysis in vector spaces. The discussion extends to the notion of the derivative of a vector function as a matrix and the use of second derivatives in classifying stationary points. Some necessary concepts from linear algebra are included where appropriate. The first edition contained numerous worked examples and an ample collection of exercises for all of which solutions were provided at the end of the book. The second edition retains this feature but in addition offers a set of problems for which no solutions are given. Teachers may find this a helpful innovation.", "year": 1982, "venue": "", "authors": [ "K. Binmore" ], "externalIds": { "MAG": "1533211841", "DOI": "10.1017/CBO9781139171656", "CorpusId": 60721773 }, "url": "https://www.semanticscholar.org/paper/cc6e509216528f429e038de4afc60dbfc1d98192", "referenceCount": 0, "citationCount": 82, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Multiple-valued stationary state and its instability of the transmitted light by a ring cavity system", "abstract": null, "year": 1979, "venue": "", "authors": [ "K. Ikeda" ], "externalIds": { "MAG": "2092152122", "DOI": "10.1016/0030-4018(79)90090-7", "CorpusId": 122423966 }, "url": "https://www.semanticscholar.org/paper/e7947ef0284d34df931f6bf08ab45d1d5d52c819", "referenceCount": 11, "citationCount": 994, "influentialCitationCount": 23, "isOpenAccess": false, "fieldsOfStudy": [ "Physics" ] }, { "title": "An equation for continuous chaos", "abstract": null, "year": 1976, "venue": "", "authors": [ "O. Rössler" ], "externalIds": { "MAG": "2248885457", "DOI": "10.1016/0375-9601(76)90101-8", "CorpusId": 119846844 }, "url": "https://www.semanticscholar.org/paper/6553606abd959346f8e5bdc9e8e358c46b5b333e", "referenceCount": 7, "citationCount": 3596, "influentialCitationCount": 147, "isOpenAccess": false, "fieldsOfStudy": [ "Physics" ] }, { "title": "Deterministic nonperiodic flow", "abstract": "Finite systems of deterministic ordinary nonlinear differential equations may be designed to represent forced dissipative hydrodynamic flow. Solutions of these equations can be identified with trajectories in phase space For those systems with bounded solutions, it is found that nonperiodic solutions are ordinarily unstable with respect to small modifications, so that slightly differing initial states can evolve into consider­ably different states. Systems with bounded solutions are shown to possess bounded numerical solutions.", "year": 1963, "venue": "", "authors": [ "E. Lorenz" ], "externalIds": { "MAG": "2141394518", "DOI": "10.1175/1520-0469(1963)020<0130:DNF>2.0.CO;2", "CorpusId": 15359559 }, "url": "https://www.semanticscholar.org/paper/b021e8cf155a7c4c8244506c7caaa99bea0eaac9", "referenceCount": 9, "citationCount": 16717, "influentialCitationCount": 1149, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Some Characteristics of Simple Types of Predation and Parasitism", "abstract": "In an earlier study (Holling, 1959) the basic and subsidiary components of predation were demonstrated in a predator-prey situation involving the predation of sawfly cocoons by small mammals. One of the basic components, termed the functional response, was a response of the consumption of prey by individual predators to changes of prey density, and it appeared to be at least theoretically important in population regulation: Because of this importance the functional response has been further examined in an attempt to explain its characteristics.", "year": 1959, "venue": "Canadian Entomologist", "authors": [ "C. S. Holling" ], "externalIds": { "MAG": "1969027200", "DOI": "10.4039/Ent91385-7", "CorpusId": 83738084 }, "url": "https://www.semanticscholar.org/paper/867255cf893f914c0a13527d7cb93ba321a6603d", "referenceCount": 13, "citationCount": 4075, "influentialCitationCount": 323, "isOpenAccess": false, "fieldsOfStudy": [ "Biology" ] }, { "title": "The Components of Predation as Revealed by a Study of Small-Mammal Predation of the European Pine Sawfly", "abstract": "The fluctuation of an animal's numbers between restricted limits is determined by a balance between that animal's capacity to increase and the environmenta1 cheks to this increase. Many authors have indulged in the calculating the propressive increase of a population when no checks nrerc operating. Thus Huxley calculated that the progeny of a single Aphis in the course of 10 generations, supposing all survived,would “contain more ponderable substance than five hundred millions of stout men; that is, more than the whole population of China”, (in Thompson, 1929). Checks, however, do occur and it has been the subject of much controversy to determine how these checks operate. Certain general principles—the density-dependence concept of Smith ( 1955) , the competition theory of Nicholson (1933)—have been proposed both verbally and mathematically, but because they have been based in part upon untested and restrictive assumptions they have been severelv criticized (e.g. Andrewartha and Birch 1954). These problems could be considerably clarified if we knew the mode of operation of each process that affects numbers, if we knew its basic and subsidiary components. predation, one such process, forms the subject of the present paper.", "year": 1959, "venue": "Canadian Entomologist", "authors": [ "C. S. Holling" ], "externalIds": { "MAG": "2131915744", "DOI": "10.4039/Ent91293-5", "CorpusId": 53474917 }, "url": "https://www.semanticscholar.org/paper/8a0fd62bc5866863e1666d2cf35d73d32625dabe", "referenceCount": 45, "citationCount": 3145, "influentialCitationCount": 227, "isOpenAccess": false, "fieldsOfStudy": [ "Biology" ] }, { "title": "Compressed Sensing", "abstract": "Machine generated contents note: 1. Introduction to compressed sensing Mark A. Davenport, Marco F. Duarte, Yonina C. Eldar and Gitta Kutyniok; 2. Second generation sparse modeling: structured and collaborative signal analysis Alexey Castrodad, Ignacio Ramirez, Guillermo Sapiro, Pablo Sprechmann and Guoshen Yu; 3. Xampling: compressed sensing of analog signals Moshe Mishali and Yonina C. Eldar; 4. Sampling at the rate of innovation: theory and applications Jose Antonia Uriguen, Yonina C. Eldar, Pier Luigi Dragotta and Zvika Ben-Haim; 5. Introduction to the non-asymptotic analysis of random matrices Roman Vershynin; 6. Adaptive sensing for sparse recovery Jarvis Haupt and Robert Nowak; 7. Fundamental thresholds in compressed sensing: a high-dimensional geometry approach Weiyu Xu and Babak Hassibi; 8. Greedy algorithms for compressed sensing Thomas Blumensath, Michael E. Davies and Gabriel Rilling; 9. Graphical models concepts in compressed sensing Andrea Montanari; 10. Finding needles in compressed haystacks Robert Calderbank, Sina Jafarpour and Jeremy Kent; 11. Data separation by sparse representations Gitta Kutyniok; 12. Face recognition by sparse representation Arvind Ganesh, Andrew Wagner, Zihan Zhou, Allen Y. Yang, Yi Ma and John Wright.", "year": 2012, "venue": "", "authors": [ "Gitta Kutyniok" ], "externalIds": { "MAG": "2196956961", "DBLP": "books/cu/12/EK2012", "DOI": "10.1017/CBO9780511794308", "CorpusId": 14790051 }, "url": "https://www.semanticscholar.org/paper/e7321ab0f3be0b29aaf5f073fd7de7da5fed2f92", "referenceCount": 50, "citationCount": 16140, "influentialCitationCount": 988, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Forecasting the future: is it possible for time-varying nonlinear dynamical systems?", "abstract": null, "year": 2012, "venue": "Chaos", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "On the representation of functions of several variables as a superposition of functions of a smaller number of variables", "abstract": null, "year": 2009, "venue": "", "authors": [ "V. Arnold", "A. Givental", "B. Khesin", "J. Marsden", "A. Varchenko", "V. Vassiliev", "O. Viro", "V. Zakalyukin" ], "externalIds": { "MAG": "2481190328", "DOI": "10.1007/978-3-642-01742-1_5", "CorpusId": 124485407 }, "url": "https://www.semanticscholar.org/paper/73ef8c010f18f0a763eda0d8cbaad6c0cbf35edf", "referenceCount": 0, "citationCount": 21, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "fore-casts?", "abstract": null, "year": 2002, "venue": "Proc. Nat. Acad. Sci. (USA)", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Solar cycle forecasting : A nonlinear dynamics approach", "abstract": "The problem of prediction of a given time series is examined on the basis of recent nonlinear dynamics theories. Particular attention is devoted to forecast the amplitude and phase of one of the most common solar indicator activities, the international monthly smoothed sunspot number. It is well known that the solar cycle is very difficult to predict due to the intrinsic complexity of the related time behaviour and to the lack of a successful quantitative theoretical model of the Sun’s magnetic cycle. Starting from a recent previous work, we checked the reliability and accuracy of a forecasting model based on concepts of nonlinear dynamical systems applied to experimental time series, such as embedding phase space, Lyapunov spectrum, chaotic behaviour. The model is based on a local hypothesis of the behaviour on embedding space, utilising an optimal number of neighbour vectors to predict the future evolution. The performances of this method for the current 23rd solar cycle suggest its valuable insertion in the set of the so-called non-precursor statisticalnumerical prediction techniques. The main task is to set up and to compare a promising numerical nonlinear prediction technique, essentially based on an inverse problem, with the most accurate prediction methods, like the so-called “precursor methods” which appear now reasonably accurate in predicting “long-term” Sun activity, with particular reference to “solar” and “geomagnetic” precursor methods based on a solar dynamo theory.", "year": 2001, "venue": "", "authors": [ "V. Pisano" ], "externalIds": { "CorpusId": 10349678 }, "url": "https://www.semanticscholar.org/paper/0d9c832e4ac6b323ab473e7330c4015c68b82585", "referenceCount": 0, "citationCount": 43, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "On the Representation of Continuous Functions of Several Variables as Superpositions of Continuous Functions of one Variable and Addition", "abstract": null, "year": 1991, "venue": "", "authors": [ "V. Tikhomirov" ], "externalIds": { "MAG": "996830848", "DOI": "10.1007/978-94-011-3030-1_56", "CorpusId": 116968444 }, "url": "https://www.semanticscholar.org/paper/300328d09233d3ada652d6aace66353c3bdb5762", "referenceCount": 0, "citationCount": 533, "influentialCitationCount": 31, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Equations of Motion from a Data Series", "abstract": "Temporal pattern learning, control and prediction, and chaotic data analysis share a common problem: deducing optimal equations of motion from observations of time-dependent behavior. Each desires to obtain models of the physical world from limited information. We describe a method to reconstruct the deterministic portion of the equations of motion directly from a data series. These equations of motion represent a vast reduction of a chaotic data set’s observed complexity to a compact, algorithmic specification. This approach employs an informational measure of model optimality to guide searching through the space of dynamical systems. As corollary results, we indicate how to estimate the minimum embedding dimension, extrinsic noise level, metric entropy, and Lyapunov spectrum. Numerical and experimental applications demonstrate the method’s feasibility and limitations. Extensions to estimating parametrized families of dynamical systems from bifurcation data and to spatial pattern evolution are presented. Applications to predicting chaotic data and the design of forecasting, learning, and control systems, are discussed.", "year": 1987, "venue": "Complex Systems", "authors": [ "J. Crutchfield", "Bruce S. McNamara" ], "externalIds": { "DBLP": "journals/compsys/CrutchfieldM87", "MAG": "133148819", "CorpusId": 14493184 }, "url": "https://www.semanticscholar.org/paper/944bb9874f26671e1cd64225f1ab2fb01aaf1934", "referenceCount": 59, "citationCount": 375, "influentialCitationCount": 6, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Supplementary Information contains", "abstract": null, "year": null, "venue": "a detailed description", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Explainable and Human-Grounded AI for Decision Support Systems: The Theory of Epistemic Quasi-Partnerships": { "paper_title": "Explainable and Human-Grounded AI for Decision Support Systems: The Theory of Epistemic Quasi-Partnerships", "arxiv_id": "2409.14839v1", "keyword": "generative model", "authors": [ "John Dorsch", "Maximilian Moll" ], "references": [ { "title": "“Quasi-Metacognitive Machines: Why We Don’t Need Morally Trustworthy AI and Communicating Reliability is Enough”", "abstract": null, "year": 2024, "venue": "Philosophy & Technology", "authors": [ "John Dorsch", "Ophélia Deroy" ], "externalIds": { "DOI": "10.1007/s13347-024-00752-w", "CorpusId": 269573835 }, "url": "https://www.semanticscholar.org/paper/1cb5e67616cecb31f42fb54d624f4a47266369b0", "referenceCount": 66, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "A Bayesian Network Approach to Explainable Reinforcement Learning with Distal Information", "abstract": "Nowadays, Artificial Intelligence systems have expanded their competence field from research to industry and daily life, so understanding how they make decisions is becoming fundamental to reducing the lack of trust between users and machines and increasing the transparency of the model. This paper aims to automate the generation of explanations for model-free Reinforcement Learning algorithms by answering “why” and “why not” questions. To this end, we use Bayesian Networks in combination with the NOTEARS algorithm for automatic structure learning. This approach complements an existing framework very well and demonstrates thus a step towards generating explanations with as little user input as possible. This approach is computationally evaluated in three benchmarks using different Reinforcement Learning methods to highlight that it is independent of the type of model used and the explanations are then rated through a human study. The results obtained are compared to other baseline explanation models to underline the satisfying performance of the framework presented in terms of increasing the understanding, transparency and trust in the action chosen by the agent.", "year": 2023, "venue": "Italian National Conference on Sensors", "authors": [ "Rudy Milani", "M. Moll", "R. Leone", "Stefan Pickl" ], "externalIds": { "PubMedCentral": "9961455", "DBLP": "journals/sensors/MilaniMLP23", "DOI": "10.3390/s23042013", "CorpusId": 256852749, "PubMed": "36850617" }, "url": "https://www.semanticscholar.org/paper/fba60ba14fcc9bc543bc4a6026b9ef694eee949d", "referenceCount": 61, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Understanding the Role of Human Intuition on Reliance in Human-AI Decision-Making with Explanations", "abstract": "AI explanations are often mentioned as a way to improve human-AI decision-making, but empirical studies have not found consistent evidence of explanations' effectiveness and, on the contrary, suggest that they can increase overreliance when the AI system is wrong. While many factors may affect reliance on AI support, one important factor is how decision-makers reconcile their own intuition---beliefs or heuristics, based on prior knowledge, experience, or pattern recognition, used to make judgments---with the information provided by the AI system to determine when to override AI predictions. We conduct a think-aloud, mixed-methods study with two explanation types (feature- and example-based) for two prediction tasks to explore how decision-makers' intuition affects their use of AI predictions and explanations, and ultimately their choice of when to rely on AI. Our results identify three types of intuition involved in reasoning about AI predictions and explanations: intuition about the task outcome, features, and AI limitations. Building on these, we summarize three observed pathways for decision-makers to apply their own intuition and override AI predictions. We use these pathways to explain why (1) the feature-based explanations we used did not improve participants' decision outcomes and increased their overreliance on AI, and (2) the example-based explanations we used improved decision-makers' performance over feature-based explanations and helped achieve complementary human-AI performance. Overall, our work identifies directions for further development of AI decision-support systems and explanation methods that help decision-makers effectively apply their intuition to achieve appropriate reliance on AI.", "year": 2023, "venue": "Proc. ACM Hum. Comput. Interact.", "authors": [ "Valerie Chen", "Q. Liao", "Jennifer Wortman Vaughan", "Gagan Bansal" ], "externalIds": { "DBLP": "journals/corr/abs-2301-07255", "ArXiv": "2301.07255", "DOI": "10.1145/3610219", "CorpusId": 255998499 }, "url": "https://www.semanticscholar.org/paper/e632d642dc115a7f4cb7d881481ed61e70200af8", "referenceCount": 95, "citationCount": 57, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Who Should I Trust: AI or Myself? Leveraging Human and AI Correctness Likelihood to Promote Appropriate Trust in AI-Assisted Decision-Making", "abstract": "In AI-assisted decision-making, it is critical for human decision-makers to know when to trust AI and when to trust themselves. However, prior studies calibrated human trust only based on AI confidence indicating AI’s correctness likelihood (CL) but ignored humans’ CL, hindering optimal team decision-making. To mitigate this gap, we proposed to promote humans’ appropriate trust based on the CL of both sides at a task-instance level. We first modeled humans’ CL by approximating their decision-making models and computing their potential performance in similar instances. We demonstrated the feasibility and effectiveness of our model via two preliminary studies. Then, we proposed three CL exploitation strategies to calibrate users’ trust explicitly/implicitly in the AI-assisted decision-making process. Results from a between-subjects experiment (N=293) showed that our CL exploitation strategies promoted more appropriate human trust in AI, compared with only using AI confidence. We further provided practical implications for more human-compatible AI-assisted decision-making.", "year": 2023, "venue": "International Conference on Human Factors in Computing Systems", "authors": [ "Shuai Ma", "Ying Lei", "Xinru Wang", "Chengbo Zheng", "Chuhan Shi", "Ming Yin", "Xiaojuan Ma" ], "externalIds": { "DBLP": "journals/corr/abs-2301-05809", "ArXiv": "2301.05809", "DOI": "10.1145/3544548.3581058", "CorpusId": 255941863 }, "url": "https://www.semanticscholar.org/paper/f61e4f6126e412a46dd1c9160eb97729fa435ace", "referenceCount": 110, "citationCount": 52, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Improving Model Understanding and Trust with Counterfactual Explanations of Model Confidence", "abstract": "In this paper, we show that counterfactual explanations of confidence scores help users better understand and better trust an AI model's prediction in human-subject studies. Showing confidence scores in human-agent interaction systems can help build trust between humans and AI systems. However, most existing research only used the confidence score as a form of communication, and we still lack ways to explain why the algorithm is confident. This paper also presents two methods for understanding model confidence using counterfactual explanation: (1) based on counterfactual examples; and (2) based on visualisation of the counterfactual space.", "year": 2022, "venue": "arXiv.org", "authors": [ "Thao Le", "Tim Miller", "Ronal Singh", "L. Sonenberg" ], "externalIds": { "DBLP": "journals/corr/abs-2206-02790", "ArXiv": "2206.02790", "DOI": "10.48550/arXiv.2206.02790", "CorpusId": 249431761 }, "url": "https://www.semanticscholar.org/paper/c03e9ac96153d54e31332a533acec0ed7bffcded", "referenceCount": 39, "citationCount": 9, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Improving Human-AI Partnerships in Child Welfare: Understanding Worker Practices, Challenges, and Desires for Algorithmic Decision Support", "abstract": "AI-based decision support tools (ADS) are increasingly used to augment human decision-making in high-stakes, social contexts. As public sector agencies begin to adopt ADS, it is critical that we understand workers’ experiences with these systems in practice. In this paper, we present findings from a series of interviews and contextual inquiries at a child welfare agency, to understand how they currently make AI-assisted child maltreatment screening decisions. Overall, we observe how workers’ reliance upon the ADS is guided by (1) their knowledge of rich, contextual information beyond what the AI model captures, (2) their beliefs about the ADS’s capabilities and limitations relative to their own, (3) organizational pressures and incentives around the use of the ADS, and (4) awareness of misalignments between algorithmic predictions and their own decision-making objectives. Drawing upon these findings, we discuss design implications towards supporting more effective human-AI decision-making.", "year": 2022, "venue": "International Conference on Human Factors in Computing Systems", "authors": [ "Anna Kawakami", "Venkatesh Sivaraman", "H. Cheng", "Logan Stapleton", "Yanghuidi Cheng", "Diana Qing", "Adam Perer", "Zhiwei Steven Wu", "Haiyi Zhu", "Kenneth Holstein" ], "externalIds": { "DBLP": "conf/chi/KawakamiSCSCQPW22", "ArXiv": "2204.02310", "DOI": "10.1145/3491102.3517439", "CorpusId": 247958405 }, "url": "https://www.semanticscholar.org/paper/59c036be9f820ac946fd9795ac05dce7a37a38ef", "referenceCount": 71, "citationCount": 75, "influentialCitationCount": 8, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Special issue on Explainable Artificial Intelligence (XAI)", "abstract": null, "year": 2022, "venue": "Artificial Intelligence", "authors": [ "Tim Miller", "Robert Hoffman", "Ofra Amir", "Andreas Holzinger" ], "externalIds": { "DBLP": "journals/ai/MillerHAH22", "DOI": "10.1016/j.artint.2022.103705", "CorpusId": 247563218 }, "url": "https://www.semanticscholar.org/paper/2297836c8572f6f088eac72e12e5bc42494503e1", "referenceCount": 2, "citationCount": 19, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "From Anecdotal Evidence to Quantitative Evaluation Methods: A Systematic Review on Evaluating Explainable AI", "abstract": "The rising popularity of explainable artificial intelligence (XAI) to understand high-performing black boxes raised the question of how to evaluate explanations of machine learning (ML) models. While interpretability and explainability are often presented as a subjectively validated binary property, we consider it a multi-faceted concept. We identify 12 conceptual properties, such as Compactness and Correctness, that should be evaluated for comprehensively assessing the quality of an explanation. Our so-called Co-12 properties serve as categorization scheme for systematically reviewing the evaluation practices of more than 300 papers published in the past 7 years at major AI and ML conferences that introduce an XAI method. We find that one in three papers evaluate exclusively with anecdotal evidence, and one in five papers evaluate with users. This survey also contributes to the call for objective, quantifiable evaluation methods by presenting an extensive overview of quantitative XAI evaluation methods. Our systematic collection of evaluation methods provides researchers and practitioners with concrete tools to thoroughly validate, benchmark, and compare new and existing XAI methods. The Co-12 categorization scheme and our identified evaluation methods open up opportunities to include quantitative metrics as optimization criteria during model training to optimize for accuracy and interpretability simultaneously.", "year": 2022, "venue": "ACM Computing Surveys", "authors": [ "Meike Nauta", "Jan Trienes", "Shreyasi Pathak", "Elisa Nguyen", "Michelle Peters", "Yasmin Schmitt", "Jörg Schlötterer", "M. V. Keulen", "C. Seifert" ], "externalIds": { "DBLP": "journals/corr/abs-2201-08164", "ArXiv": "2201.08164", "DOI": "10.1145/3583558", "CorpusId": 246063780 }, "url": "https://www.semanticscholar.org/paper/7caaafd5a3ee033c98e792c7ea5b699d005753d5", "referenceCount": 334, "citationCount": 226, "influentialCitationCount": 29, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Designing Trustworthy User Interfaces", "abstract": "Interface design can directly influence trustworthiness of a software. Thereby, it affects users’ intention to use a tool. Previous research on user trust has not comprehensively addressed user interface design, though. We lack an understanding of what makes interfaces trustworthy (1), as well as actionable measures to improve trustworthiness (2). We contribute to this by addressing both gaps. Based on a systematic literature review, we give a thorough overview over the theory on user trust and provide a taxonomy of factors influencing user interface trustworthiness. Then, we derive concrete measures to address these factors in interface design. We use the results to create a proof of concept interface. In a preliminary evaluation, we compare a variant designed to elicit trust with one designed to reduce it. Our results show that the measures we apply can be effective in fostering trust in users.", "year": 2021, "venue": "Australasian Computer-Human Interaction Conference", "authors": [ "Valentin Zieglmeier", "Antonia Maria Lehene" ], "externalIds": { "ArXiv": "2202.12915", "DBLP": "journals/corr/abs-2202-12915", "DOI": "10.1145/3520495.3520525", "CorpusId": 247158875 }, "url": "https://www.semanticscholar.org/paper/a47075d538dfe7fc04b48a8db43ba140b106f135", "referenceCount": 94, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Exploring Interactions Between Trust, Anthropomorphism, and Relationship Development in Voice Assistants", "abstract": "Modern conversational agents such as Alexa and Google Assistant represent significant progress in speech recognition, natural language processing, and speech synthesis. But as these agents have grown more realistic, concerns have been raised over how their social nature might unconsciously shape our interactions with them. Through a survey of 500 voice assistant users, we explore whether users' relationships with their voice assistants can be quantified using the same metrics as social, interpersonal relationships; as well as if this correlates with how much they trust their devices and the extent to which they anthropomorphise them. Using Knapp's staircase model of human relationships, we find that not only can human-device interactions be modelled in this way, but also that relationship development with voice assistants correlates with increased trust and anthropomorphism.", "year": 2021, "venue": "Proc. ACM Hum. Comput. Interact.", "authors": [ "W. Seymour", "M. V. Kleek" ], "externalIds": { "DBLP": "journals/corr/abs-2108-01923", "ArXiv": "2108.01923", "DOI": "10.1145/3479515", "CorpusId": 236912797 }, "url": "https://www.semanticscholar.org/paper/8c37ef9ed01ea4a4af457f310a16df54557c2f72", "referenceCount": 82, "citationCount": 38, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Evaluating Explainable AI: Which Algorithmic Explanations Help Users Predict Model Behavior?", "abstract": "Algorithmic approaches to interpreting machine learning models have proliferated in recent years. We carry out human subject tests that are the first of their kind to isolate the effect of algorithmic explanations on a key aspect of model interpretability, simulatability, while avoiding important confounding experimental factors. A model is simulatable when a person can predict its behavior on new inputs. Through two kinds of simulation tests involving text and tabular data, we evaluate five explanations methods: (1) LIME, (2) Anchor, (3) Decision Boundary, (4) a Prototype model, and (5) a Composite approach that combines explanations from each method. Clear evidence of method effectiveness is found in very few cases: LIME improves simulatability in tabular classification, and our Prototype method is effective in counterfactual simulation tests. We also collect subjective ratings of explanations, but we do not find that ratings are predictive of how helpful explanations are. Our results provide the first reliable and comprehensive estimates of how explanations influence simulatability across a variety of explanation methods and data domains. We show that (1) we need to be careful about the metrics we use to evaluate explanation methods, and (2) there is significant room for improvement in current methods.", "year": 2020, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Peter Hase", "Mohit Bansal" ], "externalIds": { "MAG": "3035371891", "ACL": "2020.acl-main.491", "ArXiv": "2005.01831", "DBLP": "conf/acl/HaseB20", "DOI": "10.18653/v1/2020.acl-main.491", "CorpusId": 218502350 }, "url": "https://www.semanticscholar.org/paper/cffd8f947ba03644f62baea31c64c8920b06288e", "referenceCount": 34, "citationCount": 254, "influentialCitationCount": 23, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Effect of confidence and explanation on accuracy and trust calibration in AI-assisted decision making", "abstract": "Today, AI is being increasingly used to help human experts make decisions in high-stakes scenarios. In these scenarios, full automation is often undesirable, not only due to the significance of the outcome, but also because human experts can draw on their domain knowledge complementary to the model's to ensure task success. We refer to these scenarios as AI-assisted decision making, where the individual strengths of the human and the AI come together to optimize the joint decision outcome. A key to their success is to appropriately calibrate human trust in the AI on a case-by-case basis; knowing when to trust or distrust the AI allows the human expert to appropriately apply their knowledge, improving decision outcomes in cases where the model is likely to perform poorly. This research conducts a case study of AI-assisted decision making in which humans and AI have comparable performance alone, and explores whether features that reveal case-specific model information can calibrate trust and improve the joint performance of the human and AI. Specifically, we study the effect of showing confidence score and local explanation for a particular prediction. Through two human experiments, we show that confidence score can help calibrate people's trust in an AI model, but trust calibration alone is not sufficient to improve AI-assisted decision making, which may also depend on whether the human can bring in enough unique knowledge to complement the AI's errors. We also highlight the problems in using local explanation for AI-assisted decision making scenarios and invite the research community to explore new approaches to explainability for calibrating human trust in AI.", "year": 2020, "venue": "FAT*", "authors": [ "Yunfeng Zhang", "Q. Liao", "R. Bellamy" ], "externalIds": { "DBLP": "journals/corr/abs-2001-02114", "ArXiv": "2001.02114", "MAG": "2999637955", "DOI": "10.1145/3351095.3372852", "CorpusId": 210023849 }, "url": "https://www.semanticscholar.org/paper/5cc4100a67fd6f2ce3c760655ba7a12f358c7950", "referenceCount": 33, "citationCount": 530, "influentialCitationCount": 52, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Beyond Accuracy: The Role of Mental Models in Human-AI Team Performance", "abstract": "Decisions made by human-AI teams (e.g., AI-advised humans) are increasingly common in high-stakes domains such as healthcare, criminal justice, and finance. Achieving high team performance depends on more than just the accuracy of the AI system: Since the human and the AI may have different expertise, the highest team performance is often reached when they both know how and when to complement one another. We focus on a factor that is crucial to supporting such complementary: the human’s mental model of the AI capabilities, specifically the AI system’s error boundary (i.e. knowing “When does the AI err?”). Awareness of this lets the human decide when to accept or override the AI’s recommendation. We highlight two key properties of an AI’s error boundary, parsimony and stochasticity, and a property of the task, dimensionality. We show experimentally how these properties affect humans’ mental models of AI capabilities and the resulting team performance. We connect our evaluations to related work and propose goals, beyond accuracy, that merit consideration during model selection and optimization to improve overall human-AI team performance.", "year": 2019, "venue": "AAAI Conference on Human Computation & Crowdsourcing", "authors": [ "Gagan Bansal", "Besmira Nushi", "Ece Kamar", "Walter S. Lasecki", "Daniel S. Weld", "E. Horvitz" ], "externalIds": { "DBLP": "conf/hcomp/BansalNKLWH19", "MAG": "2984353433", "DOI": "10.1609/hcomp.v7i1.5285", "CorpusId": 201685074 }, "url": "https://www.semanticscholar.org/paper/5688b8077117b3aafd54c2e71d959284f4d5c8b9", "referenceCount": 50, "citationCount": 319, "influentialCitationCount": 20, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Human-Grounded Evaluation of SHAP for Alert Processing", "abstract": "In the past years, many new explanation methods have been proposed to achieve interpretability of machine learning predictions. However, the utility of these methods in practical applications has not been researched extensively. In this paper we present the results of a human-grounded evaluation of SHAP, an explanation method that has been well-received in the XAI and related communities. In particular, we study whether this local model-agnostic explanation method can be useful for real human domain experts to assess the correctness of positive predictions, i.e. alerts generated by a classifier. We performed experimentation with three different groups of participants (159 in total), who had basic knowledge of explainable machine learning. We performed a qualitative analysis of recorded reflections of experiment participants performing alert processing with and without SHAP information. The results suggest that the SHAP explanations do impact the decision-making process, although the model's confidence score remains to be a leading source of evidence. We statistically test whether there is a significant difference in task utility metrics between tasks for which an explanation was available and tasks in which it was not provided. As opposed to common intuitions, we did not find a significant difference in alert processing performance when a SHAP explanation is available compared to when it is not.", "year": 2019, "venue": "arXiv.org", "authors": [ "Hilde J. P. Weerts", "Werner van Ipenburg", "Mykola Pechenizkiy" ], "externalIds": { "ArXiv": "1907.03324", "DBLP": "journals/corr/abs-1907-03324", "MAG": "2955486278", "CorpusId": 195833476 }, "url": "https://www.semanticscholar.org/paper/e9b1bf5fbff17d9ec337f9f55f813356a78522bc", "referenceCount": 18, "citationCount": 62, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Metrics for Explainable AI: Challenges and Prospects", "abstract": "The question addressed in this paper is: If we present to a user an AI system that explains how it works, how do we know whether the explanation works and the user has achieved a pragmatic understanding of the AI? In other words, how do we know that an explanainable AI system (XAI) is any good? Our focus is on the key concepts of measurement. We discuss specific methods for evaluating: (1) the goodness of explanations, (2) whether users are satisfied by explanations, (3) how well users understand the AI systems, (4) how curiosity motivates the search for explanations, (5) whether the user's trust and reliance on the AI are appropriate, and finally, (6) how the human-XAI work system performs. The recommendations we present derive from our integration of extensive research literatures and our own psychometric evaluations.", "year": 2018, "venue": "arXiv.org", "authors": [ "R. Hoffman", "Shane T. Mueller", "Gary Klein", "Jordan Litman" ], "externalIds": { "ArXiv": "1812.04608", "DBLP": "journals/corr/abs-1812-04608", "MAG": "2903808828", "CorpusId": 54577009 }, "url": "https://www.semanticscholar.org/paper/be711f681580d3a02c8bc4c4dab0c7a043f4e1d2", "referenceCount": 156, "citationCount": 630, "influentialCitationCount": 87, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Explaining Explanation For “Explainable Ai”", "abstract": "What makes for an explanation of “black box” AI systems such as Deep Nets? We reviewed the pertinent literatures on explanation and derived key ideas. This set the stage for our empirical inquiries, which include conceptual cognitive modeling, the analysis of a corpus of cases of \"naturalistic explanation\" of computational systems, computational cognitive modeling, and the development of measures for performance evaluation. The purpose of our work is to contribute to the program of research on “Explainable AI.” In this report we focus on our initial synthetic modeling activities and the development of measures for the evaluation of explainability in human-machine work systems.", "year": 2018, "venue": "Proceedings of the Human Factors and Ergonomics Society Annual Meeting", "authors": [ "R. Hoffman", "Gary Klein", "Shane T. Mueller" ], "externalIds": { "MAG": "2893622216", "DOI": "10.1177/1541931218621047", "CorpusId": 57674597 }, "url": "https://www.semanticscholar.org/paper/b2b12bd6894c5495a5dce90665431d56511ff7c9", "referenceCount": 23, "citationCount": 70, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Psychology" ] }, { "title": "Anchors: High-Precision Model-Agnostic Explanations", "abstract": "\n \n We introduce a novel model-agnostic system that explains the behavior of complex models with high-precision rules called anchors, representing local, \"sufficient\" conditions for predictions. We propose an algorithm to efficiently compute these explanations for any black-box model with high-probability guarantees. We demonstrate the flexibility of anchors by explaining a myriad of different models for different domains and tasks. In a user study, we show that anchors enable users to predict how a model would behave on unseen instances with less effort and higher precision, as compared to existing linear explanations or no explanations.\n \n", "year": 2018, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Marco Tulio Ribeiro", "Sameer Singh", "Carlos Guestrin" ], "externalIds": { "MAG": "2788403449", "DBLP": "conf/aaai/Ribeiro0G18", "DOI": "10.1609/aaai.v32i1.11491", "CorpusId": 3366554 }, "url": "https://www.semanticscholar.org/paper/1d8f4f76ac6534627ef8a1c24b9937d8ab2a5c5f", "referenceCount": 30, "citationCount": 1782, "influentialCitationCount": 199, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Explainable AI: Beware of Inmates Running the Asylum Or: How I Learnt to Stop Worrying and Love the Social and Behavioural Sciences", "abstract": "In his seminal book `The Inmates are Running the Asylum: Why High-Tech Products Drive Us Crazy And How To Restore The Sanity' [2004, Sams Indianapolis, IN, USA], Alan Cooper argues that a major reason why software is often poorly designed (from a user perspective) is that programmers are in charge of design decisions, rather than interaction designers. As a result, programmers design software for themselves, rather than for their target audience, a phenomenon he refers to as the `inmates running the asylum'. This paper argues that explainable AI risks a similar fate. While the re-emergence of explainable AI is positive, this paper argues most of us as AI researchers are building explanatory agents for ourselves, rather than for the intended users. But explainable AI is more likely to succeed if researchers and practitioners understand, adopt, implement, and improve models from the vast and valuable bodies of research in philosophy, psychology, and cognitive science, and if evaluation of these models is focused more on people than on technology. From a light scan of literature, we demonstrate that there is considerable scope to infuse more results from the social and behavioural sciences into explainable AI, and present some key results from these fields that are relevant to explainable AI.", "year": 2017, "venue": "arXiv.org", "authors": [ "Tim Miller", "P. Howe", "L. Sonenberg" ], "externalIds": { "MAG": "2774522520", "DBLP": "journals/corr/abs-1712-00547", "ArXiv": "1712.00547", "CorpusId": 28681432 }, "url": "https://www.semanticscholar.org/paper/0403275945c0f6d96fb22f69447b70c8967403f1", "referenceCount": 65, "citationCount": 341, "influentialCitationCount": 23, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Explanation in Artificial Intelligence: Insights from the Social Sciences", "abstract": null, "year": 2017, "venue": "Artificial Intelligence", "authors": [ "Tim Miller" ], "externalIds": { "ArXiv": "1706.07269", "MAG": "2953283116", "DBLP": "journals/ai/Miller19", "DOI": "10.1016/J.ARTINT.2018.07.007", "CorpusId": 36024272 }, "url": "https://www.semanticscholar.org/paper/e89dfa306723e8ef031765e9c44e5f6f94fd8fda", "referenceCount": 200, "citationCount": 3692, "influentialCitationCount": 408, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Thinking fast and slow.", "abstract": null, "year": 2014, "venue": "Australian Veterinary Journal", "authors": [ "N. McGlynn" ], "externalIds": { "MAG": "2753615671", "CorpusId": 36031679, "PubMed": "25577814" }, "url": "https://www.semanticscholar.org/paper/2f2961362355e45fa014ca0bb8ce4495aedf8824", "referenceCount": 0, "citationCount": 8490, "influentialCitationCount": 847, "isOpenAccess": false, "fieldsOfStudy": [ "Biology", "Medicine" ] }, { "title": "Algorithm Aversion: People Erroneously Avoid Algorithms after Seeing Them Err", "abstract": "Research shows that evidence-based algorithms more accurately predict the future than do human forecasters. Yet when forecasters are deciding whether to use a human forecaster or a statistical algorithm, they often choose the human forecaster. This phenomenon, which we call algorithm aversion, is costly, and it is important to understand its causes. We show that people are especially averse to algorithmic forecasters after seeing them perform, even when they see them outperform a human forecaster. This is because people more quickly lose confidence in algorithmic than human forecasters after seeing them make the same mistake. In 5 studies, participants either saw an algorithm make forecasts, a human make forecasts, both, or neither. They then decided whether to tie their incentives to the future predictions of the algorithm or the human. Participants who saw the algorithm perform were less confident in it, and less likely to choose it over an inferior human forecaster. This was true even among those who saw the algorithm outperform the human.", "year": 2014, "venue": "Journal of experimental psychology. General", "authors": [ "Berkeley J. Dietvorst", "J. Simmons", "Cade Massey" ], "externalIds": { "MAG": "2076034566", "DOI": "10.2139/ssrn.2466040", "CorpusId": 1646733, "PubMed": "25401381" }, "url": "https://www.semanticscholar.org/paper/8195bbdc561fa93e9811daf7b34808b80213f89e", "referenceCount": 29, "citationCount": 1375, "influentialCitationCount": 105, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Psychology" ] }, { "title": "Trust in Automation: Designing for Appropriate Reliance", "abstract": "Automation is often problematic because people fail to rely upon it appropriately. Because people respond to technology socially, trust influences reliance on automation. In particular, trust guides reliance when complexity and unanticipated situations make a complete understanding of the automation impractical. This review considers trust from the organizational, sociological, interpersonal, psychological, and neurological perspectives. It considers how the context, automation characteristics, and cognitive processes affect the appropriateness of trust. The context in which the automation is used influences automation performance and provides a goal-oriented perspective to assess automation characteristics along a dimension of attributional abstraction. These characteristics can influence trust through analytic, analogical, and affective processes. The challenges of extrapolating the concept of trust in people to trust in automation are discussed. A conceptual model integrates research regarding trust in automation and describes the dynamics of trust, the role of context, and the influence of display characteristics. Actual or potential applications of this research include improved designs of systems that require people to manage imperfect automation.", "year": 2004, "venue": "Hum. Factors", "authors": [ "John D. Lee", "Katrina A. See" ], "externalIds": { "DBLP": "journals/hf/LeeS04", "MAG": "2110171129", "DOI": "10.1518/hfes.46.1.50_30392", "CorpusId": 5210390, "PubMed": "15151155" }, "url": "https://www.semanticscholar.org/paper/7dd86508438657ac7a704a5d952a2a4422808975", "referenceCount": 206, "citationCount": 4218, "influentialCitationCount": 556, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Knowledge in a Social World", "abstract": "Book Information Knowledge in a Social World. By Alvin I. Goldman. Clarendon Press. Oxford. 1999. Pp. xiii + 407. Paperback, £16.99.", "year": 2001, "venue": "", "authors": [ "A. Goldman" ], "externalIds": { "MAG": "2040634350", "DOI": "10.1080/713659255", "CorpusId": 144290752 }, "url": "https://www.semanticscholar.org/paper/38cceb5bdf05018260db9c9727d1cf8530163ff4", "referenceCount": 0, "citationCount": 597, "influentialCitationCount": 62, "isOpenAccess": false, "fieldsOfStudy": [ "Philosophy" ] }, { "title": "A Note on the Pure Theory of Consumer's Behaviour", "abstract": "FROM its very beginning the theory of consumer's choice has marched steadily towards greater generality, sloughing off at successive stages unnecessarily restrictive conditions. From the time of Gossen to our own day we have seen the removal of (a) the assumption of linearity of marginal utility; (b) the assumption of independence of utilities; (c) the assumption of the measurability of utility in a cardinal sense; and (d) even the assumption of an integrable field of preference elements.. The discrediting of utility as a. psychological concept robbed it of its only possible virtue as an explanation -of human behaviour in other than a circular sense, revealing its emptiness as even a construction. As a result the most modern theory confines itself to an analysis of indifference elements, budgetary equilibrium being defined by equivalence of price ratios to respective indifference slopes. Consistently applied, however, the modern criticism turns back on itself and cuts deeply. For just as we do not claim to know by introspection the behaviour of utility, many will argue we cannot know the behaviour of ratios of marginal utilities or of indifference directions. Why should one believe in the increasing rate of marginal substitution, except in so far as it leads to the type of demand functions in the market which seem plausible ? Even on the advanced front we are confronted with this dilemma -either the argument with respect to indifference varieties is circular or to many people inadmissible (at least without further demonstration). Hence, despite the fact that the notion of utility has been repudiated or ignored by modern theory, it is clear that much of even the most modern analysis shows vestigial traces of the utility concept. Thus, to any person not", "year": 1938, "venue": "", "authors": [ "P. Samuelson" ], "externalIds": { "MAG": "2323086467", "DOI": "10.2307/2548836", "CorpusId": 155239302 }, "url": "https://www.semanticscholar.org/paper/1de2464829944076ef547f7a4f8b9aaa8dc871a0", "referenceCount": 0, "citationCount": 2025, "influentialCitationCount": 99, "isOpenAccess": false, "fieldsOfStudy": [ "Economics" ] }, { "title": "Model explainability with Amazon Web Services Artificial Intelligence and Machine Learning Solutions", "abstract": null, "year": 2024, "venue": "docs.aws.amazon.com/pdfs/whitepapers/latest/model-explainability-aws-ai-ml/model-explainability-aws-ai-ml", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Interpretable Machine Learning: A Guide for Making Black Box Models Explainable (2nd ed.)", "abstract": null, "year": 2022, "venue": "christophm.github.io/interpretable-ml-book/", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Explainable Active Learning (XAL): Toward AI Explanations as Interfaces for Machine Teachers", "abstract": null, "year": 2021, "venue": "Proceedings of the ACM on Human-Computer Interaction", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Considerations for Evaluation and Generalization in Interpretable Machine Learning", "abstract": null, "year": 2018, "venue": "", "authors": [ "F. Doshi-Velez", "Been Kim" ], "externalIds": { "MAG": "2902255491", "DOI": "10.1007/978-3-319-98131-4_1", "CorpusId": 52840192 }, "url": "https://www.semanticscholar.org/paper/e7ed4f39858957c9862a52ba6b09b4a7fcb5d3e1", "referenceCount": 74, "citationCount": 138, "influentialCitationCount": 12, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "AI & global governance: no one should trust AI", "abstract": null, "year": 2018, "venue": "cpr.unu.edu/publications/articles", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Why should I trust you?", "abstract": null, "year": 2016, "venue": "Proceedings of the 22nd ACM SIGKDD international conference on knowledge discovery and data mining", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Epistemic dependence", "abstract": null, "year": 1985, "venue": "Journal of Philosophy", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Investigations into logical deduction", "abstract": null, "year": 1964, "venue": "American philosophical quarterly", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "- in the European Union", "abstract": "robust systems to prevent abuse and threats to internal security arising from failings in document security", "year": null, "venue": "", "authors": [], "externalIds": { "CorpusId": 16101488 }, "url": "https://www.semanticscholar.org/paper/3a4ecd4bb216d17b9a22f915879d65eb088c81d0", "referenceCount": 2, "citationCount": 282, "influentialCitationCount": 10, "isOpenAccess": false, "fieldsOfStudy": null } ] }, "Robotic Environmental State Recognition with Pre-Trained Vision-Language Models and Black-Box Optimization": { "paper_title": "Robotic Environmental State Recognition with Pre-Trained Vision-Language Models and Black-Box Optimization", "arxiv_id": "2409.17519v1", "keyword": "vision language model", "authors": [ "Kento Kawaharazuka", "Yoshiki Obinata", "Naoaki Kanazawa", "Kei Okada", "Masayuki Inaba" ], "references": [ { "title": "Coaxiality Evaluation of Coaxial Imaging System with Concentric Silicon–Glass Hybrid Lens for Thermal and Color Imaging", "abstract": "Thermal imaging is useful for tasks such as detecting the presence of humans and recognizing surrounding objects in the operation of several types of robots, including service robots and personal mobility robots, which assist humans. Because the number of pixels on a thermal imager is generally smaller than that on a color imager, thermal images are more useful when combined with color images, assuming that the correspondence between points in the images captured by the two sensors is known. In the literature, several types of coaxial imaging systems have been reported that can capture thermal and color images, simultaneously, from the same point of view with the same optical axis. Among them, a coaxial imaging system using a concentric silicon–glass hybrid lens was devised. Long-wavelength infrared and visible light was focused using the hybrid lens. The focused light was subsequently split using a silicon plate. Separate thermal and color images were then captured using thermal and color imagers, respectively. However, a coaxiality evaluation of the hybrid lens has not been shown. This report proposes an implementation and coaxiality evaluation for a compact coaxial imaging system incorporating the hybrid lens. The coaxiality of the system was experimentally demonstrated by estimating the intrinsic and extrinsic parameters of the thermal and color imagers and performing 2D mapping between the thermal images and color images.", "year": 2020, "venue": "Italian National Conference on Sensors", "authors": [ "T. Takahata" ], "externalIds": { "MAG": "3091891304", "PubMedCentral": "7601588", "DBLP": "journals/sensors/Takahata20", "DOI": "10.3390/s20205753", "CorpusId": 222354190, "PubMed": "33050498" }, "url": "https://www.semanticscholar.org/paper/46e3cbd04e290d3d83570f49153fdfd9134b0461", "referenceCount": 31, "citationCount": 13, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine", "Materials Science" ] }, { "title": "A modified YOLOv3 detection method for vision-based water surface garbage capture robot", "abstract": "To tackle the water surface pollution problem, a vision-based water surface garbage capture robot has been developed in our lab. In this article, we present a modified you only look once v3-based garbage detection method, allowing real-time and high-precision object detection in dynamic aquatic environments. More specifically, to improve the real-time detection performance, the detection scales of you only look once v3 are simplified from 3 to 2. Besides, to guarantee the accuracy of detection, the anchor boxes of our training data set are reclustered for replacing some of the original you only look once v3 prior anchor boxes that are not appropriate to our data set. By virtue of the proposed detection method, the capture robot has the capability of cleaning floating garbage in the field. Experimental results demonstrate that both detection speed and accuracy of the modified you only look once v3 are better than those of other object detection algorithms. The obtained results provide valuable insight into the high-speed detection and grasping of dynamic objects in complex aquatic environments autonomously and intelligently.", "year": 2020, "venue": "International Journal of Advanced Robotic Systems", "authors": [ "Xiali Li", "Manjun Tian", "Shihan Kong", "Licheng Wu", "Junzhi Yu" ], "externalIds": { "MAG": "3038288066", "DOI": "10.1177/1729881420932715", "CorpusId": 221766008 }, "url": "https://www.semanticscholar.org/paper/540f1d716f338e557bffd379cc22bb0020addb82", "referenceCount": 53, "citationCount": 55, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Model-based recognition in robot vision", "abstract": "This paper presents a comparative study and survey of model-based object-recognition algorithms for robot vision. The goal of these algorithms is to recognize the identity, position, and orientation of randomly oriented industrial parts. In one form this is commonly referred to as the \"bin-picking\" problem, in which the parts to be recognized are presented in a jumbled bin. The paper is organized according to 2-D, 2½-D, and 3-D object representations, which are used as the basis for the recognition algorithms. Three central issues common to each category, namely, feature extraction, modeling, and matching, are examined in detail. An evaluation and comparison of existing industrial part-recognition systems and algorithms is given, providing insights for progress toward future robot vision systems.", "year": 1986, "venue": "CSUR", "authors": [ "R. Chin", "C. Dyer" ], "externalIds": { "DBLP": "journals/csur/ChinD86", "MAG": "2026311529", "DOI": "10.1145/6462.6464", "CorpusId": 10037306 }, "url": "https://www.semanticscholar.org/paper/5e0ce7719dcb315145284fea50fd7c96df3599ab", "referenceCount": 157, "citationCount": 678, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Door detection in 3D coloured point clouds of indoor environments", "abstract": null, "year": 2018, "venue": "", "authors": [ "B. Quintana", "S. Prieto", "A. Adán", "F. Bosché" ], "externalIds": { "MAG": "2766093809", "DOI": "10.1016/J.AUTCON.2017.10.016", "CorpusId": 55153902 }, "url": "https://www.semanticscholar.org/paper/3149cc59e894ae588de44ea6594ba82be99a0472", "referenceCount": 28, "citationCount": 67, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DEAP: evolutionary algorithms made easy", "abstract": "DEAP is a novel evolutionary computation framework for rapid prototyping and testing of ideas. Its design departs from most other existing frameworks in that it seeks to make algorithms explicit and data structures transparent, as opposed to the more common black-box frameworks. Freely available with extensive documentation at http://deap.gel.ulaval.ca, DEAP is an open source project under an LGPL license.", "year": 2012, "venue": "Journal of machine learning research", "authors": [ "Félix-Antoine Fortin", "F. Rainville", "Marc-André Gardner", "M. Parizeau", "Christian Gagné" ], "externalIds": { "MAG": "2109042184", "DBLP": "journals/jmlr/FortinRGPG12", "DOI": "10.5555/2503308.2503311", "CorpusId": 15629107 }, "url": "https://www.semanticscholar.org/paper/80e9cdc3679e56f3dd33a498bb8e30164b7bb578", "referenceCount": 14, "citationCount": 1734, "influentialCitationCount": 143, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] } ] }, "Overcoming Reward Model Noise in Instruction-Guided Reinforcement Learning": { "paper_title": "Overcoming Reward Model Noise in Instruction-Guided Reinforcement Learning", "arxiv_id": "2409.15922v1", "keyword": "vision language model", "authors": [ "Sukai Huang", "Nir Lipovetzky", "Trevor Cohn" ], "references": [ { "title": "Language models are not naysayers: an analysis of language models on negation benchmarks", "abstract": "Negation has been shown to be a major bottleneck for masked language models, such as BERT. However, whether this finding still holds for larger-sized auto-regressive language models (“LLMs”) has not been studied comprehensively. With the ever-increasing volume of research and applications of LLMs, we take a step back to evaluate the ability of current-generation LLMs to handle negation, a fundamental linguistic phenomenon that is central to language understanding. We evaluate different LLMs - including the open-source GPT-neo, GPT-3, and InstructGPT - against a wide range of negation benchmarks. Through systematic experimentation with varying model sizes and prompts, we show that LLMs have several limitations including insensitivity to the presence of negation, an inability to capture the lexical semantics of negation, and a failure to reason under negation.", "year": 2023, "venue": "STARSEM", "authors": [ "Thinh Hung Truong", "Timothy Baldwin", "K. Verspoor", "Trevor Cohn" ], "externalIds": { "DBLP": "journals/corr/abs-2306-08189", "ACL": "2023.starsem-1.10", "ArXiv": "2306.08189", "DOI": "10.48550/arXiv.2306.08189", "CorpusId": 259164714 }, "url": "https://www.semanticscholar.org/paper/438a0221379aacd53f4d8af3b44dfdb2cc3ddab0", "referenceCount": 49, "citationCount": 32, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The Effect of Modeling Human Rationality Level on Learning Rewards from Multiple Feedback Types", "abstract": "When inferring reward functions from human behavior (be it demonstrations, comparisons, physical corrections, or e-stops), it has proven useful to model the human as making noisy-rational choices, with a \"rationality coefficient\" capturing how much noise or entropy we expect to see in the human behavior. Prior work typically sets the rationality level to a constant value, regardless of the type, or quality, of human feedback. However, in many settings, giving one type of feedback (e.g. a demonstration) may be much more difficult than a different type of feedback (e.g. answering a comparison query). Thus, we expect to see more or less noise depending on the type of human feedback. In this work, we advocate that grounding the rationality coefficient in real data for each feedback type, rather than assuming a default value, has a significant positive effect on reward learning. We test this in both simulated experiments and in a user study with real human feedback. We find that overestimating human rationality can have dire effects on reward learning accuracy and regret. We also find that fitting the rationality coefficient to human data enables better reward learning, even when the human deviates significantly from the noisy-rational choice model due to systematic biases. Further, we find that the rationality level affects the informativeness of each feedback type: surprisingly, demonstrations are not always the most informative---when the human acts very suboptimally, comparisons actually become more informative, even when the rationality level is the same for both. Ultimately, our results emphasize the importance and advantage of paying attention to the assumed human-rationality-level, especially when agents actively learn from multiple types of human feedback.", "year": 2022, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Gaurav R. Ghosal", "M. Zurek", "Daniel S. Brown", "A. Dragan" ], "externalIds": { "DBLP": "conf/aaai/GhosalZBD23", "ArXiv": "2208.10687", "DOI": "10.48550/arXiv.2208.10687", "CorpusId": 251740992 }, "url": "https://www.semanticscholar.org/paper/e920f426eb32e64474b2a1176d97725f875dd82a", "referenceCount": 51, "citationCount": 18, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "X-CLIP: End-to-End Multi-grained Contrastive Learning for Video-Text Retrieval", "abstract": "Video-text retrieval has been a crucial and fundamental task in multi-modal research. The development of video-text retrieval has been considerably promoted by large-scale multi-modal contrastive pre-training, which primarily focuses on coarse-grained or fine-grained contrast. However, cross-grained contrast, which is the contrast between coarse-grained representations and fine-grained representations, has rarely been explored in prior research. Compared with fine-grained or coarse-grained contrasts, cross-grained contrast calculate the correlation between coarse-grained features and each fine-grained feature, and is able to filter out the unnecessary fine-grained features guided by the coarse-grained feature during similarity calculation, thus improving the accuracy of retrieval. To this end, this paper presents a novel multi-grained contrastive model, namely X-CLIP, for video-text retrieval. However, another challenge lies in the similarity aggregation problem, which aims to aggregate fine-grained and cross-grained similarity matrices to instance-level similarity. To address this challenge, we propose the Attention Over Similarity Matrix (AOSM) module to make the model focus on the contrast between essential frames and words, thus lowering the impact of unnecessary frames and words on retrieval results. With multi-grained contrast and the proposed AOSM module, X-CLIP achieves outstanding performance on five widely-used video-text retrieval datasets, including MSR-VTT (49.3 R@1), MSVD (50.4 R@1), LSMDC (26.1 R@1), DiDeMo (47.8 R@1) and ActivityNet (46.2 R@1).", "year": 2022, "venue": "ACM Multimedia", "authors": [ "Yiwei Ma", "Guohai Xu", "Xiaoshuai Sun", "Ming Yan", "Ji Zhang", "Rongrong Ji" ], "externalIds": { "ArXiv": "2207.07285", "DBLP": "journals/corr/abs-2207-07285", "DOI": "10.1145/3503161.3547910", "CorpusId": 250607505 }, "url": "https://www.semanticscholar.org/paper/1ec886e2235763b08fa606a5d5ea3f4540f715ec", "referenceCount": 67, "citationCount": 174, "influentialCitationCount": 30, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Reinforcement Learning with Stochastic Reward Machines", "abstract": "Reward machines are an established tool for dealing with reinforcement learning problems in which rewards are sparse and depend on complex sequences of actions.\n However, existing algorithms for learning reward machines assume an overly idealized setting where rewards have to be free of noise.\n To overcome this practical limitation, we introduce a novel type of reward machines, called stochastic reward machines, and an algorithm for learning them.\n Our algorithm, based on constraint solving, learns minimal stochastic reward machines from the explorations of a reinforcement learning agent.\n This algorithm can easily be paired with existing reinforcement learning algorithms for reward machines and guarantees to converge to an optimal policy in the limit.\n We demonstrate the effectiveness of our algorithm in two case studies and show that it outperforms both existing methods and a naive approach for handling noisy reward functions.", "year": 2022, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Jan Corazza", "I. Gavran", "D. Neider" ], "externalIds": { "DBLP": "conf/aaai/CorazzaGN22", "DOI": "10.1609/aaai.v36i6.20594", "CorpusId": 250297195 }, "url": "https://www.semanticscholar.org/paper/44f925cad517464c61c0bd21f397cce556a5dbc2", "referenceCount": 24, "citationCount": 24, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "An Analysis of Negation in Natural Language Understanding Corpora", "abstract": "This paper analyzes negation in eight popular corpora spanning six natural language understanding tasks. We show that these corpora have few negations compared to general-purpose English, and that the few negations in them are often unimportant. Indeed, one can often ignore negations and still make the right predictions. Additionally, experimental results show that state-of-the-art transformers trained with these corpora obtain substantially worse results with instances that contain negation, especially if the negations are important. We conclude that new corpora accounting for negation are needed to solve natural language understanding tasks when negation is present.", "year": 2022, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Md Mosharaf Hossain", "Dhivya Chinnappa", "Eduardo Blanco" ], "externalIds": { "DBLP": "conf/acl/HossainC022", "ACL": "2022.acl-short.81", "ArXiv": "2203.08929", "DOI": "10.48550/arXiv.2203.08929", "CorpusId": 247519037 }, "url": "https://www.semanticscholar.org/paper/b9ffb9ccc000ccc5b74d1e9eaeafbcd5fe86e78d", "referenceCount": 37, "citationCount": 37, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Training language models to follow instructions with human feedback", "abstract": "Making language models bigger does not inherently make them better at following a user's intent. For example, large language models can generate outputs that are untruthful, toxic, or simply not helpful to the user. In other words, these models are not aligned with their users. In this paper, we show an avenue for aligning language models with user intent on a wide range of tasks by fine-tuning with human feedback. Starting with a set of labeler-written prompts and prompts submitted through the OpenAI API, we collect a dataset of labeler demonstrations of the desired model behavior, which we use to fine-tune GPT-3 using supervised learning. We then collect a dataset of rankings of model outputs, which we use to further fine-tune this supervised model using reinforcement learning from human feedback. We call the resulting models InstructGPT. In human evaluations on our prompt distribution, outputs from the 1.3B parameter InstructGPT model are preferred to outputs from the 175B GPT-3, despite having 100x fewer parameters. Moreover, InstructGPT models show improvements in truthfulness and reductions in toxic output generation while having minimal performance regressions on public NLP datasets. Even though InstructGPT still makes simple mistakes, our results show that fine-tuning with human feedback is a promising direction for aligning language models with human intent.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Long Ouyang", "Jeff Wu", "Xu Jiang", "Diogo Almeida", "Carroll L. Wainwright", "Pamela Mishkin", "Chong Zhang", "Sandhini Agarwal", "Katarina Slama", "Alex Ray", "John Schulman", "Jacob Hilton", "Fraser Kelton", "Luke E. Miller", "Maddie Simens", "Amanda Askell", "P. Welinder", "P. Christiano", "J. Leike", "Ryan J. Lowe" ], "externalIds": { "DBLP": "conf/nips/Ouyang0JAWMZASR22", "ArXiv": "2203.02155", "CorpusId": 246426909 }, "url": "https://www.semanticscholar.org/paper/d766bffc357127e0dc86dd69561d5aeb520d6f4c", "referenceCount": 83, "citationCount": 8493, "influentialCitationCount": 1115, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "CLIPort: What and Where Pathways for Robotic Manipulation", "abstract": "How can we imbue robots with the ability to manipulate objects precisely but also to reason about them in terms of abstract concepts? Recent works in manipulation have shown that end-to-end networks can learn dexterous skills that require precise spatial reasoning, but these methods often fail to generalize to new goals or quickly learn transferable concepts across tasks. In parallel, there has been great progress in learning generalizable semantic representations for vision and language by training on large-scale internet data, however these representations lack the spatial understanding necessary for fine-grained manipulation. To this end, we propose a framework that combines the best of both worlds: a two-stream architecture with semantic and spatial pathways for vision-based manipulation. Specifically, we present CLIPort, a language-conditioned imitation-learning agent that combines the broad semantic understanding (what) of CLIP [1] with the spatial precision (where) of Transporter [2]. Our end-to-end framework is capable of solving a variety of language-specified tabletop tasks from packing unseen objects to folding cloths, all without any explicit representations of object poses, instance segmentations, memory, symbolic states, or syntactic structures. Experiments in simulated and real-world settings show that our approach is data efficient in few-shot settings and generalizes effectively to seen and unseen semantic concepts. We even learn one multi-task policy for 10 simulated and 9 real-world tasks that is better or comparable to single-task policies.", "year": 2021, "venue": "Conference on Robot Learning", "authors": [ "Mohit Shridhar", "Lucas Manuelli", "D. Fox" ], "externalIds": { "DBLP": "conf/corl/ShridharMF21", "ArXiv": "2109.12098", "CorpusId": 237396838 }, "url": "https://www.semanticscholar.org/paper/69ee9b3a915951cc84b74599a3a2699a66d4004f", "referenceCount": 81, "citationCount": 505, "influentialCitationCount": 80, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Benchmarking the Spectrum of Agent Capabilities", "abstract": "Evaluating the general abilities of intelligent agents requires complex simulation environments. Existing benchmarks typically evaluate only one narrow task per environment, requiring researchers to perform expensive training runs on many different environments. We introduce Crafter, an open world survival game with visual inputs that evaluates a wide range of general abilities within a single environment. Agents either learn from the provided reward signal or through intrinsic objectives and are evaluated by semantically meaningful achievements that can be unlocked during each episode, such as discovering resources and crafting tools. Consistently unlocking all achievements requires strong generalization, deep exploration, and long-term reasoning. We experimentally verify that Crafter is of appropriate difficulty to drive future research and provide baselines scores of reward agents and unsupervised agents. Furthermore, we observe sophisticated behaviors emerging from maximizing the reward signal, such as building tunnel systems, bridges, houses, and plantations. We hope that Crafter will accelerate research progress by quickly evaluating a wide spectrum of abilities.", "year": 2021, "venue": "International Conference on Learning Representations", "authors": [ "Danijar Hafner" ], "externalIds": { "DBLP": "conf/iclr/Hafner22", "ArXiv": "2109.06780", "CorpusId": 237504552 }, "url": "https://www.semanticscholar.org/paper/8e128a1b2efb0ddf688902ade4405d22d5b61eec", "referenceCount": 24, "citationCount": 93, "influentialCitationCount": 26, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "RoFormer: Enhanced Transformer with Rotary Position Embedding", "abstract": null, "year": 2021, "venue": "Neurocomputing", "authors": [ "Jianlin Su", "Yu Lu", "Shengfeng Pan", "Bo Wen", "Yunfeng Liu" ], "externalIds": { "DBLP": "journals/ijon/SuALPBL24", "ArXiv": "2104.09864", "DOI": "10.1016/j.neucom.2023.127063", "CorpusId": 233307138 }, "url": "https://www.semanticscholar.org/paper/66c10bf1f11bc1b2d92204d8f8391d087f6de1c4", "referenceCount": 55, "citationCount": 1151, "influentialCitationCount": 114, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning Transferable Visual Models From Natural Language Supervision", "abstract": "State-of-the-art computer vision systems are trained to predict a fixed set of predetermined object categories. This restricted form of supervision limits their generality and usability since additional labeled data is needed to specify any other visual concept. Learning directly from raw text about images is a promising alternative which leverages a much broader source of supervision. We demonstrate that the simple pre-training task of predicting which caption goes with which image is an efficient and scalable way to learn SOTA image representations from scratch on a dataset of 400 million (image, text) pairs collected from the internet. After pre-training, natural language is used to reference learned visual concepts (or describe new ones) enabling zero-shot transfer of the model to downstream tasks. We study the performance of this approach by benchmarking on over 30 different existing computer vision datasets, spanning tasks such as OCR, action recognition in videos, geo-localization, and many types of fine-grained object classification. The model transfers non-trivially to most tasks and is often competitive with a fully supervised baseline without the need for any dataset specific training. For instance, we match the accuracy of the original ResNet-50 on ImageNet zero-shot without needing to use any of the 1.28 million training examples it was trained on. We release our code and pre-trained model weights at https://github.com/OpenAI/CLIP.", "year": 2021, "venue": "International Conference on Machine Learning", "authors": [ "Alec Radford", "Jong Wook Kim", "Chris Hallacy", "A. Ramesh", "Gabriel Goh", "Sandhini Agarwal", "Girish Sastry", "Amanda Askell", "Pamela Mishkin", "Jack Clark", "Gretchen Krueger", "I. Sutskever" ], "externalIds": { "ArXiv": "2103.00020", "DBLP": "conf/icml/RadfordKHRGASAM21", "CorpusId": 231591445 }, "url": "https://www.semanticscholar.org/paper/6f870f7f02a8c59c3e23f407f3ef00dd1dcf8fc4", "referenceCount": 220, "citationCount": 18886, "influentialCitationCount": 5013, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Out of Order: How important is the sequential order of words in a sentence in Natural Language Understanding tasks?", "abstract": "Do state-of-the-art natural language understanding models care about word order - one of the most important characteristics of a sequence? Not always! We found 75% to 90% of the correct predictions of BERT-based classifiers, trained on many GLUE tasks, remain constant after input words are randomly shuffled. Despite BERT embeddings are famously contextual, the contribution of each individual word to downstream tasks is almost unchanged even after the word's context is shuffled. BERT-based models are able to exploit superficial cues (e.g. the sentiment of keywords in sentiment analysis; or the word-wise similarity between sequence-pair inputs in natural language inference) to make correct decisions when tokens are arranged in random orders. Encouraging classifiers to capture word order information improves the performance on most GLUE tasks, SQuAD 2.0 and out-of-samples. Our work suggests that many GLUE tasks are not challenging machines to understand the meaning of a sentence.", "year": 2020, "venue": "Findings", "authors": [ "Thang M. Pham", "Trung Bui", "Long Mai", "Anh Totti Nguyen" ], "externalIds": { "ArXiv": "2012.15180", "DBLP": "conf/acl/PhamBMN21", "ACL": "2021.findings-acl.98", "DOI": "10.18653/v1/2021.findings-acl.98", "CorpusId": 229923132 }, "url": "https://www.semanticscholar.org/paper/776a49616c84d52e8fff9911c561e3bac90910eb", "referenceCount": 70, "citationCount": 113, "influentialCitationCount": 9, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "PixL2R: Guiding Reinforcement Learning Using Natural Language by Mapping Pixels to Rewards", "abstract": "Reinforcement learning (RL), particularly in sparse reward settings, often requires prohibitively large numbers of interactions with the environment, thereby limiting its applicability to complex problems. To address this, several prior approaches have used natural language to guide the agent's exploration. However, these approaches typically operate on structured representations of the environment, and/or assume some structure in the natural language commands. In this work, we propose a model that directly maps pixels to rewards, given a free-form natural language description of the task, which can then be used for policy learning. Our experiments on the Meta-World robot manipulation domain show that language-based rewards significantly improves the sample efficiency of policy learning, both in sparse and dense reward settings.", "year": 2020, "venue": "Conference on Robot Learning", "authors": [ "Prasoon Goyal", "S. Niekum", "R. Mooney" ], "externalIds": { "ArXiv": "2007.15543", "MAG": "3046086546", "DBLP": "conf/corl/GoyalNM20", "CorpusId": 220870869 }, "url": "https://www.semanticscholar.org/paper/826b2efc9e3f563e9aa1845e5f88ea725bcefd9e", "referenceCount": 32, "citationCount": 45, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "On the Theory of Policy Gradient Methods: Optimality, Approximation, and Distribution Shift", "abstract": "Policy gradient methods are among the most effective methods in challenging reinforcement learning problems with large state and/or action spaces. However, little is known about even their most basic theoretical convergence properties, including: if and how fast they converge to a globally optimal solution or how they cope with approximation error due to using a restricted class of parametric policies. This work provides provable characterizations of the computational, approximation, and sample size properties of policy gradient methods in the context of discounted Markov Decision Processes (MDPs). We focus on both: \"tabular\" policy parameterizations, where the optimal policy is contained in the class and where we show global convergence to the optimal policy; and parametric policy classes (considering both log-linear and neural policy classes), which may not contain the optimal policy and where we provide agnostic learning results. One central contribution of this work is in providing approximation guarantees that are average case -- which avoid explicit worst-case dependencies on the size of state space -- by making a formal connection to supervised learning under distribution shift. This characterization shows an important interplay between estimation error, approximation error, and exploration (as characterized through a precisely defined condition number).", "year": 2019, "venue": "Journal of machine learning research", "authors": [ "Alekh Agarwal", "S. Kakade", "J. Lee", "G. Mahajan" ], "externalIds": { "DBLP": "journals/jmlr/AgarwalKLM21", "MAG": "3039845099", "CorpusId": 220364961 }, "url": "https://www.semanticscholar.org/paper/f7f8f05eb2798272fc3a61443d45f2aa47e65135", "referenceCount": 69, "citationCount": 424, "influentialCitationCount": 124, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Using Natural Language for Reward Shaping in Reinforcement Learning", "abstract": "Recent reinforcement learning (RL) approaches have shown strong performance in complex domains, such as Atari games, but are highly sample inefficient. A common approach to reduce interaction time with the environment is to use reward shaping, which involves carefully designing reward functions that provide the agent intermediate rewards for progress towards the goal. Designing such rewards remains a challenge, though. In this work, we use natural language instructions to perform reward shaping. We propose a framework that maps free-form natural language instructions to intermediate rewards, that can seamlessly be integrated into any standard reinforcement learning algorithm. We experiment with Montezuma's Revenge from the Atari video games domain, a popular benchmark in RL. Our experiments on a diverse set of 15 tasks demonstrate that for the same number of interactions with the environment, using language-based rewards can successfully complete the task 60% more often, averaged across all tasks, compared to learning without language. ", "year": 2019, "venue": "International Joint Conference on Artificial Intelligence", "authors": [ "Prasoon Goyal", "S. Niekum", "R. Mooney" ], "externalIds": { "ArXiv": "1903.02020", "DBLP": "conf/ijcai/GoyalNM19", "MAG": "2964654516", "DOI": "10.24963/ijcai.2019/331", "CorpusId": 70350059 }, "url": "https://www.semanticscholar.org/paper/0fa1c75a452a046e11e775eb6120051c696d9366", "referenceCount": 26, "citationCount": 154, "influentialCitationCount": 9, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Reinforced Cross-Modal Matching and Self-Supervised Imitation Learning for Vision-Language Navigation", "abstract": "Vision-language navigation (VLN) is the task of navigating an embodied agent to carry out natural language instructions inside real 3D environments. In this paper, we study how to address three critical challenges for this task: the cross-modal grounding, the ill-posed feedback, and the generalization problems. First, we propose a novel Reinforced Cross-Modal Matching (RCM) approach that enforces cross-modal grounding both locally and globally via reinforcement learning (RL). Particularly, a matching critic is used to provide an intrinsic reward to encourage global matching between instructions and trajectories, and a reasoning navigator is employed to perform cross-modal grounding in the local visual scene. Evaluation on a VLN benchmark dataset shows that our RCM model significantly outperforms previous methods by 10% on SPL and achieves the new state-of-the-art performance. To improve the generalizability of the learned policy, we further introduce a Self-Supervised Imitation Learning (SIL) method to explore unseen environments by imitating its own past, good decisions. We demonstrate that SIL can approximate a better and more efficient policy, which tremendously minimizes the success rate performance gap between seen and unseen environments (from 30.7% to 11.7%).", "year": 2018, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Xin Eric Wang", "Qiuyuan Huang", "Asli Celikyilmaz", "Jianfeng Gao", "Dinghan Shen", "Yuan-fang Wang", "William Yang Wang", "Lei Zhang" ], "externalIds": { "MAG": "2964935470", "DBLP": "conf/cvpr/WangHcGSWWZ19", "ArXiv": "1811.10092", "DOI": "10.1109/CVPR.2019.00679", "CorpusId": 53735892 }, "url": "https://www.semanticscholar.org/paper/c66b8e508718f4b7f14829e5c2cde0add31d2693", "referenceCount": 66, "citationCount": 474, "influentialCitationCount": 53, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "BabyAI: A Platform to Study the Sample Efficiency of Grounded Language Learning", "abstract": "Allowing humans to interactively train artificial agents to understand language instructions is desirable for both practical and scientific reasons, but given the poor data efficiency of the current learning methods, this goal may require substantial research efforts. Here, we introduce the BabyAI research platform to support investigations towards including humans in the loop for grounded language learning. The BabyAI platform comprises an extensible suite of 19 levels of increasing difficulty. The levels gradually lead the agent towards acquiring a combinatorially rich synthetic language which is a proper subset of English. The platform also provides a heuristic expert agent for the purpose of simulating a human teacher. We report baseline results and estimate the amount of human involvement that would be required to train a neural network-based agent on some of the BabyAI levels. We put forward strong evidence that current deep learning methods are not yet sufficiently sample efficient when it comes to learning a language with compositional properties.", "year": 2018, "venue": "International Conference on Learning Representations", "authors": [ "Maxime Chevalier-Boisvert", "Dzmitry Bahdanau", "Salem Lahlou", "Lucas Willems", "Chitwan Saharia", "Thien Huu Nguyen", "Yoshua Bengio" ], "externalIds": { "MAG": "2994943647", "ArXiv": "1810.08272", "DBLP": "conf/iclr/Chevalier-Boisvert19", "CorpusId": 59536625 }, "url": "https://www.semanticscholar.org/paper/1b19f433a3e8497e9d9bd67efb108521d16b5b85", "referenceCount": 45, "citationCount": 205, "influentialCitationCount": 36, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Using Reward Machines for High-Level Task Specification and Decomposition in Reinforcement Learning", "abstract": "In this paper we propose Reward Machines – a type of finite state machine that supports the specification of reward functions while exposing reward function structure to the learner and supporting decomposition. We then present Q-Learning for Reward Machines (QRM), an algorithm which appropriately decomposes the reward machine and uses off-policy q-learning to simultaneously learn subpolicies for the different components. QRM is guaranteed to converge to an optimal policy in the tabular case, in contrast to Hierarchical Reinforcement Learning methods which might converge to suboptimal policies. We demonstrate this behavior experimentally in two discrete domains. We also show how function approximation methods like neural networks can be incorporated into QRM, and that doing so can find better policies more quickly than hierarchical methods in a domain with a continuous state space.", "year": 2018, "venue": "International Conference on Machine Learning", "authors": [ "Rodrigo Toro Icarte", "Toryn Q. Klassen", "R. Valenzano", "Sheila A. McIlraith" ], "externalIds": { "MAG": "2804948070", "DBLP": "conf/icml/IcarteKVM18", "CorpusId": 51868784 }, "url": "https://www.semanticscholar.org/paper/00ec8123dd2ba03afab7c1fa02f774062f769181", "referenceCount": 26, "citationCount": 244, "influentialCitationCount": 48, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Beating Atari with Natural Language Guided Reinforcement Learning", "abstract": "We introduce the first deep reinforcement learning agent that learns to beat Atari games with the aid of natural language instructions. The agent uses a multimodal embedding between environment observations and natural language to self-monitor progress through a list of English instructions, granting itself reward for completing instructions in addition to increasing the game score. Our agent significantly outperforms Deep Q-Networks (DQNs), Asynchronous Advantage Actor-Critic (A3C) agents, and the best agents posted to OpenAI Gym on what is often considered the hardest Atari 2600 environment: Montezuma's Revenge.", "year": 2017, "venue": "arXiv.org", "authors": [ "Russell Kaplan", "Chris Sauer", "A. Sosa" ], "externalIds": { "MAG": "2609374097", "ArXiv": "1704.05539", "DBLP": "journals/corr/KaplanSS17", "CorpusId": 6022828 }, "url": "https://www.semanticscholar.org/paper/4248b1c782d1e3e3b53a5126ea269518af92c68a", "referenceCount": 18, "citationCount": 67, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Least Ambiguous Set-Valued Classifiers With Bounded Error Levels", "abstract": "ABSTRACT In most classification tasks, there are observations that are ambiguous and therefore difficult to correctly label. Set-valued classifiers output sets of plausible labels rather than a single label, thereby giving a more appropriate and informative treatment to the labeling of ambiguous instances. We introduce a framework for multiclass set-valued classification, where the classifiers guarantee user-defined levels of coverage or confidence (the probability that the true label is contained in the set) while minimizing the ambiguity (the expected size of the output). We first derive oracle classifiers assuming the true distribution to be known. We show that the oracle classifiers are obtained from level sets of the functions that define the conditional probability of each class. Then we develop estimators with good asymptotic and finite sample properties. The proposed estimators build on existing single-label classifiers. The optimal classifier can sometimes output the empty set, but we provide two solutions to fix this issue that are suitable for various practical needs. Supplementary materials for this article are available online.", "year": 2016, "venue": "Journal of the American Statistical Association", "authors": [ "Mauricio Sadinle", "Jing Lei", "L. Wasserman" ], "externalIds": { "MAG": "2514278201", "ArXiv": "1609.00451", "DBLP": "journals/corr/SadinleLW16", "DOI": "10.1080/01621459.2017.1395341", "CorpusId": 622583 }, "url": "https://www.semanticscholar.org/paper/c949dfebccbdfa43f59c219c6bd2389dba1b2d38", "referenceCount": 32, "citationCount": 179, "influentialCitationCount": 29, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "DeViSE: A Deep Visual-Semantic Embedding Model", "abstract": "Modern visual recognition systems are often limited in their ability to scale to large numbers of object categories. This limitation is in part due to the increasing difficulty of acquiring sufficient training data in the form of labeled images as the number of object categories grows. One remedy is to leverage data from other sources - such as text data - both to train visual models and to constrain their predictions. In this paper we present a new deep visual-semantic embedding model trained to identify visual objects using both labeled image data as well as semantic information gleaned from unannotated text. We demonstrate that this model matches state-of-the-art performance on the 1000-class ImageNet object recognition challenge while making more semantically reasonable errors, and also show that the semantic information can be exploited to make predictions about tens of thousands of image labels not observed during training. Semantic knowledge improves such zero-shot predictions achieving hit rates of up to 18% across thousands of novel labels never seen by the visual model.", "year": 2013, "venue": "Neural Information Processing Systems", "authors": [ "Andrea Frome", "G. Corrado", "Jonathon Shlens", "Samy Bengio", "J. Dean", "Marc'Aurelio Ranzato", "Tomas Mikolov" ], "externalIds": { "MAG": "2123024445", "DBLP": "conf/nips/FromeCSBDRM13", "CorpusId": 261138 }, "url": "https://www.semanticscholar.org/paper/4aa4069693bee00d1b0759ca3df35e59284e9845", "referenceCount": 22, "citationCount": 2675, "influentialCitationCount": 301, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The Arcade Learning Environment: An Evaluation Platform for General Agents", "abstract": "In this article we introduce the Arcade Learning Environment (ALE): both a challenge problem and a platform and methodology for evaluating the development of general, domain-independent AI technology. ALE provides an interface to hundreds of Atari 2600 game environments, each one different, interesting, and designed to be a challenge for human players. ALE presents significant research challenges for reinforcement learning, model learning, model-based planning, imitation learning, transfer learning, and intrinsic motivation. Most importantly, it provides a rigorous testbed for evaluating and comparing approaches to these problems. We illustrate the promise of ALE by developing and benchmarking domain-independent agents designed using well-established AI techniques for both reinforcement learning and planning. In doing so, we also propose an evaluation methodology made possible by ALE, reporting empirical results on over 55 different games. All of the software, including the benchmark agents, is publicly available.", "year": 2012, "venue": "Journal of Artificial Intelligence Research", "authors": [ "Marc G. Bellemare", "Yavar Naddaf", "J. Veness", "Michael Bowling" ], "externalIds": { "ArXiv": "1207.4708", "DBLP": "journals/jair/BellemareNVB13", "MAG": "2150468603", "DOI": "10.1613/jair.3912", "CorpusId": 1552061 }, "url": "https://www.semanticscholar.org/paper/f82e4ff4f003581330338aaae71f60316e58dd26", "referenceCount": 36, "citationCount": 2802, "influentialCitationCount": 423, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Multilayer feedforward networks are universal approximators", "abstract": null, "year": 1989, "venue": "Neural Networks", "authors": [ "K. Hornik", "M. Stinchcombe", "H. White" ], "externalIds": { "DBLP": "journals/nn/HornikSW89", "MAG": "2137983211", "DOI": "10.1016/0893-6080(89)90020-8", "CorpusId": 2757547 }, "url": "https://www.semanticscholar.org/paper/f22f6972e66bdd2e769fa64b0df0a13063c0c101", "referenceCount": 25, "citationCount": 21061, "influentialCitationCount": 498, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Describe, Explain, Plan and Select: Interactive Planning with LLMs Enables Open-World Multi-Task Agents", "abstract": null, "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Zihao Wang", "Shaofei Cai", "Guanzhou Chen", "Anji Liu", "Xiaojian Ma", "Yitao Liang" ], "externalIds": { "DBLP": "conf/nips/WangCCLML23", "CorpusId": 268042457 }, "url": "https://www.semanticscholar.org/paper/99bf7219b8d3e6ac1ad8dc9bfd8589cbcf843f23", "referenceCount": 0, "citationCount": 45, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Zero-Shot Reward Specification via Grounded Natural Language", "abstract": "Reward signals in reinforcement learning are expensive to design and often require access to the true state which is not available in the real world. Common alternatives are usually demonstrations or goal images which can be labor-intensive to collect. On the other hand, text descriptions provide a general, natural, and low-effort way of commu-nicating the desired task. However, prior works in learning text-conditioned policies still rely on rewards that are defined using either true state or labeled expert demonstrations. We use recent de-velopments in building large-scale visuolanguage models like CLIP to devise a framework that generates the task reward signal just from goal text description and raw pixel observations which is then used to learn the task policy. We evaluate the proposed framework on control and robotic manipulation tasks. Finally, we distill the individual task policies into a single goal text conditioned policy that can generalize in a zero-shot manner to new tasks with unseen objects and unseen goal text descriptions.", "year": 2022, "venue": "International Conference on Machine Learning", "authors": [ "Parsa Mahmoudieh", "Deepak Pathak", "Trevor Darrell" ], "externalIds": { "DBLP": "conf/icml/MahmoudiehPD22", "CorpusId": 250340669 }, "url": "https://www.semanticscholar.org/paper/a09560239e398fe8aea05856823b46219a7dc539", "referenceCount": 34, "citationCount": 42, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "NovelD: A Simple yet Effective Exploration Criterion", "abstract": "Efficient exploration under sparse rewards remains a key challenge in deep re-inforcement learning. Previous exploration methods (e.g., RND) have achieved strong results in multiple hard tasks. However, if there are multiple novel areas to explore, these methods often focus quickly on one without sufficiently trying others (like a depth-wise first search manner). In some scenarios (e.g., four corridor environment in Sec. 4.2), we observe they explore in one corridor for long and fail to cover all the states. On the other hand, in theoretical RL, with optimistic initialization and the inverse square root of visitation count as a bonus, it won’t suffer from this and explores different novel regions alternatively (like a breadth-first search manner). In this paper, inspired by this, we propose a simple but effective criterion called NovelD by weighting every novel area approximately equally. Our algorithm is very simple but yet shows comparable performance or even outperforms multiple SOTA exploration methods in many hard exploration tasks. Specifically, NovelD solves all the static procedurally-generated tasks in Mini-Grid with just 120 M environment steps, without any curriculum learning. In comparison, the previous SOTA only solves 50 % of them. NovelD also achieves SOTA on multiple tasks in NetHack, a rogue-like game that contains more challenging procedurally-generated environments. In multiple Atari games (e.g., MonteZuma’s Revenge, Venture, Gravitar), NovelD outperforms RND. We analyze NovelD thoroughly in Mini-Grid and found that empirically it helps the agent explore the environment more uniformly with a focus on exploring beyond the boundary. 1", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Tianjun Zhang", "Huazhe Xu", "Xiaolong Wang", "Yi Wu", "K. Keutzer", "Joseph E. Gonzalez", "Yuandong Tian" ], "externalIds": { "DBLP": "conf/nips/ZhangXWWKGT21", "CorpusId": 245877021 }, "url": "https://www.semanticscholar.org/paper/8d15f17ea8f807efe8801d236b7218b6659ac1d9", "referenceCount": 72, "citationCount": 55, "influentialCitationCount": 16, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Faulty Reward Functions in the Wild", "abstract": null, "year": 2016, "venue": "openai.com/index/faulty-reward-functions/", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2023. Guiding Pre-training in Reinforcement Learning with Large Language Models", "abstract": null, "year": null, "venue": "International Conference on Machine Learning", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2023. Discovering Hierarchical Achievements in Reinforcement Learning via Contrastive Learning", "abstract": null, "year": null, "venue": "Neural Information Processing Systems", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "We assume that | Π G | and | Π B | is a fixed number pre-defined by the task environment while E [ G π | π ∈ Π B ] is non-zero as false positive rewards are unavoidable", "abstract": null, "year": null, "venue": "real-world VLMs", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "ReLEP: A Novel Framework for Real-world Long-horizon Embodied Planning": { "paper_title": "ReLEP: A Novel Framework for Real-world Long-horizon Embodied Planning", "arxiv_id": "2409.15658v1", "keyword": "vision language model", "authors": [ "Siyuan Liu", "Jiawei Du", "Sicheng Xiang", "Zibo Wang", "Dingsheng Luo" ], "references": [ { "title": "OpenEQA: Embodied Question Answering in the Era of Foundation Models", "abstract": "We present a modern formulation of Embodied Question Answering (EQA) as the task of understanding an environment well enough to answer questions about it in natural language. An agent can achieve such an understanding by either drawing upon episodic memory, exemplified by agents on smart glasses, or by actively exploring the environment, as in the case of mobile robots. We accompany our formulation with OpenEQA - the first open-vocabulary benchmark dataset for EQA supporting both episodic memory and active exploration use cases. OpenEQA contains over 1600 high-quality human generated questions drawn from over 180 real-world environments. In addition to the dataset, we also provide an automatic LLM-powered evaluation protocol that has excellent correlation with human judgement. Using this dataset and evaluation protocol, we evaluate several state-of-the-art foundation models including GPT-4V, and find that they significantly lag behind human-level performance. Consequently, OpenEQA stands out as a straightforward, measurable, and practically rele-vant benchmark that poses a considerable challenge to current generation offoundation models. We hope this inspires and stimulates future research at the intersection of Embod-ied AI, conversational agents, and world models.", "year": 2024, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Arjun Majumdar", "Anurag Ajay", "Xiaohan Zhang", "Pranav Putta", "Sriram Yenamandra", "Mikael Henaff", "Sneha Silwal", "Paul Mcvay", "Oleksandr Maksymets", "Sergio Arnaud", "Karmesh Yadav", "Qiyang Li", "Ben Newman", "Mohit Sharma", "Vincent-Pierre Berges", "Shiqi Zhang", "Pulkit Agrawal", "Yonatan Bisk", "Dhruv Batra", "Mrinal Kalakrishnan", "Franziska Meier", "Chris Paxton", "Alexander Sax", "A. Rajeswaran" ], "externalIds": { "DOI": "10.1109/CVPR52733.2024.01560", "CorpusId": 268066655 }, "url": "https://www.semanticscholar.org/paper/920731b589af90a5b79236f4939ac117bbb939f2", "referenceCount": 64, "citationCount": 32, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Closed-Loop Open-Vocabulary Mobile Manipulation with GPT-4V", "abstract": "Autonomous robot navigation and manipulation in open environments require reasoning and replanning with closed-loop feedback. We present COME-robot, the first closed-loop framework utilizing the GPT-4V vision-language foundation model for open-ended reasoning and adaptive planning in real-world scenarios. We meticulously construct a library of action primitives for robot exploration, navigation, and manipulation, serving as callable execution modules for GPT-4V in task planning. On top of these modules, GPT-4V serves as the brain that can accomplish multimodal reasoning, generate action policy with code, verify the task progress, and provide feedback for replanning. Such design enables COME-robot to (i) actively perceive the environments, (ii) perform situated reasoning, and (iii) recover from failures. Through comprehensive experiments involving 8 challenging real-world tabletop and manipulation tasks, COME-robot demonstrates a significant improvement in task success rate (~25%) compared to state-of-the-art baseline methods. We further conduct comprehensive analyses to elucidate how COME-robot's design facilitates failure recovery, free-form instruction following, and long-horizon task planning.", "year": 2024, "venue": "arXiv.org", "authors": [ "Peiyuan Zhi", "Zhiyuan Zhang", "Muzhi Han", "Zeyu Zhang", "Zhitian Li", "Ziyuan Jiao", "Baoxiong Jia", "Siyuan Huang" ], "externalIds": { "DBLP": "journals/corr/abs-2404-10220", "ArXiv": "2404.10220", "DOI": "10.48550/arXiv.2404.10220", "CorpusId": 269157231 }, "url": "https://www.semanticscholar.org/paper/bb9d1e2de8d4a982b5c8d0e407abadfe11432246", "referenceCount": 48, "citationCount": 8, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "RT-H: Action Hierarchies Using Language", "abstract": "Language provides a way to break down complex concepts into digestible pieces. Recent works in robot imitation learning use language-conditioned policies that predict actions given visual observations and the high-level task specified in language. These methods leverage the structure of natural language to share data between semantically similar tasks (e.g.,\"pick coke can\"and\"pick an apple\") in multi-task datasets. However, as tasks become more semantically diverse (e.g.,\"pick coke can\"and\"pour cup\"), sharing data between tasks becomes harder, so learning to map high-level tasks to actions requires much more demonstration data. To bridge tasks and actions, our insight is to teach the robot the language of actions, describing low-level motions with more fine-grained phrases like\"move arm forward\". Predicting these language motions as an intermediate step between tasks and actions forces the policy to learn the shared structure of low-level motions across seemingly disparate tasks. Furthermore, a policy that is conditioned on language motions can easily be corrected during execution through human-specified language motions. This enables a new paradigm for flexible policies that can learn from human intervention in language. Our method RT-H builds an action hierarchy using language motions: it first learns to predict language motions, and conditioned on this and the high-level task, it predicts actions, using visual context at all stages. We show that RT-H leverages this language-action hierarchy to learn policies that are more robust and flexible by effectively tapping into multi-task datasets. We show that these policies not only allow for responding to language interventions, but can also learn from such interventions and outperform methods that learn from teleoperated interventions. Our website and videos are found at https://rt-hierarchy.github.io.", "year": 2024, "venue": "Robotics", "authors": [ "Suneel Belkhale", "Tianli Ding", "Ted Xiao", "P. Sermanet", "Quon Vuong", "Jonathan Tompson", "Yevgen Chebotar", "Debidatta Dwibedi", "Dorsa Sadigh" ], "externalIds": { "ArXiv": "2403.01823", "DBLP": "journals/corr/abs-2403-01823", "DOI": "10.48550/arXiv.2403.01823", "CorpusId": 268249108 }, "url": "https://www.semanticscholar.org/paper/2bbc0b48052f1f3f52cbd46ece378baa61fa711b", "referenceCount": 58, "citationCount": 19, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Look Before You Leap: Unveiling the Power of GPT-4V in Robotic Vision-Language Planning", "abstract": "In this study, we are interested in imbuing robots with the capability of physically-grounded task planning. Recent advancements have shown that large language models (LLMs) possess extensive knowledge useful in robotic tasks, especially in reasoning and planning. However, LLMs are constrained by their lack of world grounding and dependence on external affordance models to perceive environmental information, which cannot jointly reason with LLMs. We argue that a task planner should be an inherently grounded, unified multimodal system. To this end, we introduce Robotic Vision-Language Planning (ViLa), a novel approach for long-horizon robotic planning that leverages vision-language models (VLMs) to generate a sequence of actionable steps. ViLa directly integrates perceptual data into its reasoning and planning process, enabling a profound understanding of commonsense knowledge in the visual world, including spatial layouts and object attributes. It also supports flexible multimodal goal specification and naturally incorporates visual feedback. Our extensive evaluation, conducted in both real-robot and simulated environments, demonstrates ViLa's superiority over existing LLM-based planners, highlighting its effectiveness in a wide array of open-world manipulation tasks.", "year": 2023, "venue": "arXiv.org", "authors": [ "Yingdong Hu", "Fanqi Lin", "Tong Zhang", "Li Yi", "Yang Gao" ], "externalIds": { "ArXiv": "2311.17842", "DBLP": "journals/corr/abs-2311-17842", "DOI": "10.48550/arXiv.2311.17842", "CorpusId": 265715696 }, "url": "https://www.semanticscholar.org/paper/498decc50ccea9293f63a98c30d7c3439be074b7", "referenceCount": 97, "citationCount": 49, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "RT-2: Vision-Language-Action Models Transfer Web Knowledge to Robotic Control", "abstract": "We study how vision-language models trained on Internet-scale data can be incorporated directly into end-to-end robotic control to boost generalization and enable emergent semantic reasoning. Our goal is to enable a single end-to-end trained model to both learn to map robot observations to actions and enjoy the benefits of large-scale pretraining on language and vision-language data from the web. To this end, we propose to co-fine-tune state-of-the-art vision-language models on both robotic trajectory data and Internet-scale vision-language tasks, such as visual question answering. In contrast to other approaches, we propose a simple, general recipe to achieve this goal: in order to fit both natural language responses and robotic actions into the same format, we express the actions as text tokens and incorporate them directly into the training set of the model in the same way as natural language tokens. We refer to such category of models as vision-language-action models (VLA) and instantiate an example of such a model, which we call RT-2. Our extensive evaluation (6k evaluation trials) shows that our approach leads to performant robotic policies and enables RT-2 to obtain a range of emergent capabilities from Internet-scale training. This includes significantly improved generalization to novel objects, the ability to interpret commands not present in the robot training data (such as placing an object onto a particular number or icon), and the ability to perform rudimentary reasoning in response to user commands (such as picking up the smallest or largest object, or the one closest to another object). We further show that incorporating chain of thought reasoning allows RT-2 to perform multi-stage semantic reasoning, for example figuring out which object to pick up for use as an improvised hammer (a rock), or which type of drink is best suited for someone who is tired (an energy drink).", "year": 2023, "venue": "Conference on Robot Learning", "authors": [ "Anthony Brohan", "Noah Brown", "Justice Carbajal", "Yevgen Chebotar", "K. Choromanski", "Tianli Ding", "Danny Driess", "Kumar Avinava Dubey", "Chelsea Finn", "Peter R. Florence", "Chuyuan Fu", "Montse Gonzalez Arenas", "K. Gopalakrishnan", "Kehang Han", "Karol Hausman", "Alexander Herzog", "Jasmine Hsu", "Brian Ichter", "A. Irpan", "Nikhil J. Joshi", "Ryan C. Julian", "Dmitry Kalashnikov", "Yuheng Kuang", "Isabel Leal", "S. Levine", "H. Michalewski", "Igor Mordatch", "Karl Pertsch", "Kanishka Rao", "Krista Reymann", "M. Ryoo", "Grecia Salazar", "Pannag R. Sanketi", "P. Sermanet", "Jaspiar Singh", "Anika Singh", "Radu Soricut", "Huong Tran", "Vincent Vanhoucke", "Q. Vuong", "Ayzaan Wahid", "Stefan Welker", "Paul Wohlhart", "Ted Xiao", "Tianhe Yu", "Brianna Zitkovich" ], "externalIds": { "ArXiv": "2307.15818", "DBLP": "conf/corl/ZitkovichYXXXXW23", "DOI": "10.48550/arXiv.2307.15818", "CorpusId": 260293142 }, "url": "https://www.semanticscholar.org/paper/38939304bb760473141c2aca0305e44fbe04e6e8", "referenceCount": 94, "citationCount": 595, "influentialCitationCount": 47, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "VoxPoser: Composable 3D Value Maps for Robotic Manipulation with Language Models", "abstract": "Large language models (LLMs) are shown to possess a wealth of actionable knowledge that can be extracted for robot manipulation in the form of reasoning and planning. Despite the progress, most still rely on pre-defined motion primitives to carry out the physical interactions with the environment, which remains a major bottleneck. In this work, we aim to synthesize robot trajectories, i.e., a dense sequence of 6-DoF end-effector waypoints, for a large variety of manipulation tasks given an open-set of instructions and an open-set of objects. We achieve this by first observing that LLMs excel at inferring affordances and constraints given a free-form language instruction. More importantly, by leveraging their code-writing capabilities, they can interact with a vision-language model (VLM) to compose 3D value maps to ground the knowledge into the observation space of the agent. The composed value maps are then used in a model-based planning framework to zero-shot synthesize closed-loop robot trajectories with robustness to dynamic perturbations. We further demonstrate how the proposed framework can benefit from online experiences by efficiently learning a dynamics model for scenes that involve contact-rich interactions. We present a large-scale study of the proposed method in both simulated and real-robot environments, showcasing the ability to perform a large variety of everyday manipulation tasks specified in free-form natural language. Videos and code at https://voxposer.github.io", "year": 2023, "venue": "Conference on Robot Learning", "authors": [ "Wenlong Huang", "Chen Wang", "Ruohan Zhang", "Yunzhu Li", "Jiajun Wu", "Li Fei-Fei" ], "externalIds": { "ArXiv": "2307.05973", "DBLP": "conf/corl/HuangWZL0023", "DOI": "10.48550/arXiv.2307.05973", "CorpusId": 259837330 }, "url": "https://www.semanticscholar.org/paper/1cd8373490efc2d74c2796f4b2aa27c7d4415ec9", "referenceCount": 146, "citationCount": 289, "influentialCitationCount": 24, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "EmbodiedGPT: Vision-Language Pre-Training via Embodied Chain of Thought", "abstract": "Embodied AI is a crucial frontier in robotics, capable of planning and executing action sequences for robots to accomplish long-horizon tasks in physical environments. In this work, we introduce EmbodiedGPT, an end-to-end multi-modal foundation model for embodied AI, empowering embodied agents with multi-modal understanding and execution capabilities. To achieve this, we have made the following efforts: (i) We craft a large-scale embodied planning dataset, termed EgoCOT. The dataset consists of carefully selected videos from the Ego4D dataset, along with corresponding high-quality language instructions. Specifically, we generate a sequence of sub-goals with the\"Chain of Thoughts\"mode for effective embodied planning. (ii) We introduce an efficient training approach to EmbodiedGPT for high-quality plan generation, by adapting a 7B large language model (LLM) to the EgoCOT dataset via prefix tuning. (iii) We introduce a paradigm for extracting task-related features from LLM-generated planning queries to form a closed loop between high-level planning and low-level control. Extensive experiments show the effectiveness of EmbodiedGPT on embodied tasks, including embodied planning, embodied control, visual captioning, and visual question answering. Notably, EmbodiedGPT significantly enhances the success rate of the embodied control task by extracting more effective features. It has achieved a remarkable 1.6 times increase in success rate on the Franka Kitchen benchmark and a 1.3 times increase on the Meta-World benchmark, compared to the BLIP-2 baseline fine-tuned with the Ego4D dataset.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Yao Mu", "Qinglong Zhang", "Mengkang Hu", "Wen Wang", "Mingyu Ding", "Jun Jin", "Bin Wang", "Jifeng Dai", "Y. Qiao", "Ping Luo" ], "externalIds": { "ArXiv": "2305.15021", "DBLP": "journals/corr/abs-2305-15021", "DOI": "10.48550/arXiv.2305.15021", "CorpusId": 258865718 }, "url": "https://www.semanticscholar.org/paper/00cb69a9f280317d1c59ac5827551ee9b10642b8", "referenceCount": 73, "citationCount": 133, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "InstructBLIP: Towards General-purpose Vision-Language Models with Instruction Tuning", "abstract": "Large-scale pre-training and instruction tuning have been successful at creating general-purpose language models with broad competence. However, building general-purpose vision-language models is challenging due to the rich input distributions and task diversity resulting from the additional visual input. Although vision-language pretraining has been widely studied, vision-language instruction tuning remains under-explored. In this paper, we conduct a systematic and comprehensive study on vision-language instruction tuning based on the pretrained BLIP-2 models. We gather 26 publicly available datasets, covering a wide variety of tasks and capabilities, and transform them into instruction tuning format. Additionally, we introduce an instruction-aware Query Transformer, which extracts informative features tailored to the given instruction. Trained on 13 held-in datasets, InstructBLIP attains state-of-the-art zero-shot performance across all 13 held-out datasets, substantially outperforming BLIP-2 and larger Flamingo models. Our models also lead to state-of-the-art performance when finetuned on individual downstream tasks (e.g., 90.7% accuracy on ScienceQA questions with image contexts). Furthermore, we qualitatively demonstrate the advantages of InstructBLIP over concurrent multimodal models. All InstructBLIP models are open-sourced at https://github.com/salesforce/LAVIS/tree/main/projects/instructblip.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Wenliang Dai", "Junnan Li", "Dongxu Li", "A. M. H. Tiong", "Junqi Zhao", "Weisheng Wang", "Boyang Albert Li", "Pascale Fung", "Steven C. H. Hoi" ], "externalIds": { "ArXiv": "2305.06500", "DBLP": "journals/corr/abs-2305-06500", "DOI": "10.48550/arXiv.2305.06500", "CorpusId": 258615266 }, "url": "https://www.semanticscholar.org/paper/8bd6a2a89503be083176f2cc26fabedb79238cbd", "referenceCount": 52, "citationCount": 1182, "influentialCitationCount": 281, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Grounding DINO: Marrying DINO with Grounded Pre-Training for Open-Set Object Detection", "abstract": "In this paper, we present an open-set object detector, called Grounding DINO, by marrying Transformer-based detector DINO with grounded pre-training, which can detect arbitrary objects with human inputs such as category names or referring expressions. The key solution of open-set object detection is introducing language to a closed-set detector for open-set concept generalization. To effectively fuse language and vision modalities, we conceptually divide a closed-set detector into three phases and propose a tight fusion solution, which includes a feature enhancer, a language-guided query selection, and a cross-modality decoder for cross-modality fusion. While previous works mainly evaluate open-set object detection on novel categories, we propose to also perform evaluations on referring expression comprehension for objects specified with attributes. Grounding DINO performs remarkably well on all three settings, including benchmarks on COCO, LVIS, ODinW, and RefCOCO/+/g. Grounding DINO achieves a $52.5$ AP on the COCO detection zero-shot transfer benchmark, i.e., without any training data from COCO. It sets a new record on the ODinW zero-shot benchmark with a mean $26.1$ AP. Code will be available at \\url{https://github.com/IDEA-Research/GroundingDINO}.", "year": 2023, "venue": "arXiv.org", "authors": [ "Shilong Liu", "Zhaoyang Zeng", "Tianhe Ren", "Feng Li", "Hao Zhang", "Jie Yang", "Chun-yue Li", "Jianwei Yang", "Hang Su", "Jun-Juan Zhu", "Lei Zhang" ], "externalIds": { "DBLP": "journals/corr/abs-2303-05499", "ArXiv": "2303.05499", "DOI": "10.48550/arXiv.2303.05499", "CorpusId": 257427307 }, "url": "https://www.semanticscholar.org/paper/c3e5a20b844c042d2174263d2fd5b30d8cc8f0b0", "referenceCount": 68, "citationCount": 988, "influentialCitationCount": 147, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "PaLM-E: An Embodied Multimodal Language Model", "abstract": "Large language models excel at a wide range of complex tasks. However, enabling general inference in the real world, e.g., for robotics problems, raises the challenge of grounding. We propose embodied language models to directly incorporate real-world continuous sensor modalities into language models and thereby establish the link between words and percepts. Input to our embodied language model are multi-modal sentences that interleave visual, continuous state estimation, and textual input encodings. We train these encodings end-to-end, in conjunction with a pre-trained large language model, for multiple embodied tasks including sequential robotic manipulation planning, visual question answering, and captioning. Our evaluations show that PaLM-E, a single large embodied multimodal model, can address a variety of embodied reasoning tasks, from a variety of observation modalities, on multiple embodiments, and further, exhibits positive transfer: the model benefits from diverse joint training across internet-scale language, vision, and visual-language domains. Our largest model, PaLM-E-562B with 562B parameters, in addition to being trained on robotics tasks, is a visual-language generalist with state-of-the-art performance on OK-VQA, and retains generalist language capabilities with increasing scale.", "year": 2023, "venue": "International Conference on Machine Learning", "authors": [ "Danny Driess", "F. Xia", "Mehdi S. M. Sajjadi", "Corey Lynch", "Aakanksha Chowdhery", "Brian Ichter", "Ayzaan Wahid", "Jonathan Tompson", "Q. Vuong", "Tianhe Yu", "Wenlong Huang", "Yevgen Chebotar", "P. Sermanet", "Daniel Duckworth", "S. Levine", "Vincent Vanhoucke", "Karol Hausman", "Marc Toussaint", "Klaus Greff", "Andy Zeng", "Igor Mordatch", "Peter R. Florence" ], "externalIds": { "DBLP": "journals/corr/abs-2303-03378", "ArXiv": "2303.03378", "DOI": "10.48550/arXiv.2303.03378", "CorpusId": 257364842 }, "url": "https://www.semanticscholar.org/paper/38fe8f324d2162e63a967a9ac6648974fc4c66f3", "referenceCount": 71, "citationCount": 1102, "influentialCitationCount": 66, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Scaling Vision Transformers to 22 Billion Parameters", "abstract": "The scaling of Transformers has driven breakthrough capabilities for language models. At present, the largest large language models (LLMs) contain upwards of 100B parameters. Vision Transformers (ViT) have introduced the same architecture to image and video modelling, but these have not yet been successfully scaled to nearly the same degree; the largest dense ViT contains 4B parameters (Chen et al., 2022). We present a recipe for highly efficient and stable training of a 22B-parameter ViT (ViT-22B) and perform a wide variety of experiments on the resulting model. When evaluated on downstream tasks (often with a lightweight linear model on frozen features), ViT-22B demonstrates increasing performance with scale. We further observe other interesting benefits of scale, including an improved tradeoff between fairness and performance, state-of-the-art alignment to human visual perception in terms of shape/texture bias, and improved robustness. ViT-22B demonstrates the potential for\"LLM-like\"scaling in vision, and provides key steps towards getting there.", "year": 2023, "venue": "International Conference on Machine Learning", "authors": [ "Mostafa Dehghani", "Josip Djolonga", "Basil Mustafa", "Piotr Padlewski", "J. Heek", "J. Gilmer", "A. Steiner", "Mathilde Caron", "Robert Geirhos", "Ibrahim M. Alabdulmohsin", "Rodolphe Jenatton", "Lucas Beyer", "M. Tschannen", "Anurag Arnab", "Xiao Wang", "C. Riquelme", "Matthias Minderer", "J. Puigcerver", "Utku Evci", "Manoj Kumar", "Sjoerd van Steenkiste", "Gamaleldin F. Elsayed", "Aravindh Mahendran", "F. Yu", "Avital Oliver", "Fantine Huot", "Jasmijn Bastings", "Mark Collier", "A. Gritsenko", "Vighnesh Birodkar", "C. Vasconcelos", "Yi Tay", "Thomas Mensink", "Alexander Kolesnikov", "Filip Paveti'c", "Dustin Tran", "Thomas Kipf", "Mario Luvci'c", "Xiaohua Zhai", "Daniel Keysers", "Jeremiah Harmsen", "N. Houlsby" ], "externalIds": { "DBLP": "journals/corr/abs-2302-05442", "ArXiv": "2302.05442", "DOI": "10.48550/arXiv.2302.05442", "CorpusId": 256808367 }, "url": "https://www.semanticscholar.org/paper/61e721334296ebfbbf6443b5ed9eb8c83b708c95", "referenceCount": 134, "citationCount": 390, "influentialCitationCount": 25, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "RT-1: Robotics Transformer for Real-World Control at Scale", "abstract": "By transferring knowledge from large, diverse, task-agnostic datasets, modern machine learning models can solve specific downstream tasks either zero-shot or with small task-specific datasets to a high level of performance. While this capability has been demonstrated in other fields such as computer vision, natural language processing or speech recognition, it remains to be shown in robotics, where the generalization capabilities of the models are particularly critical due to the difficulty of collecting real-world robotic data. We argue that one of the keys to the success of such general robotic models lies with open-ended task-agnostic training, combined with high-capacity architectures that can absorb all of the diverse, robotic data. In this paper, we present a model class, dubbed Robotics Transformer, that exhibits promising scalable model properties. We verify our conclusions in a study of different model classes and their ability to generalize as a function of the data size, model size, and data diversity based on a large-scale data collection on real robots performing real-world tasks. The project's website and videos can be found at robotics-transformer1.github.io", "year": 2022, "venue": "Robotics: Science and Systems", "authors": [ "Anthony Brohan", "Noah Brown", "Justice Carbajal", "Yevgen Chebotar", "Joseph Dabis", "Chelsea Finn", "K. Gopalakrishnan", "Karol Hausman", "Alexander Herzog", "Jasmine Hsu", "Julian Ibarz", "Brian Ichter", "A. Irpan", "Tomas Jackson", "Sally Jesmonth", "Nikhil J. Joshi", "Ryan C. Julian", "Dmitry Kalashnikov", "Yuheng Kuang", "Isabel Leal", "Kuang-Huei Lee", "S. Levine", "Yao Lu", "U. Malla", "D. Manjunath", "Igor Mordatch", "Ofir Nachum", "Carolina Parada", "Jodilyn Peralta", "Emily Perez", "Karl Pertsch", "Jornell Quiambao", "Kanishka Rao", "M. Ryoo", "Grecia Salazar", "Pannag R. Sanketi", "Kevin Sayed", "Jaspiar Singh", "S. Sontakke", "Austin Stone", "Clayton Tan", "Huong Tran", "Vincent Vanhoucke", "Steve Vega", "Q. Vuong", "F. Xia", "Ted Xiao", "Peng Xu", "Sichun Xu", "Tianhe Yu", "Brianna Zitkovich" ], "externalIds": { "DBLP": "conf/rss/BrohanBCCDFGHHH23", "ArXiv": "2212.06817", "DOI": "10.48550/arXiv.2212.06817", "CorpusId": 254591260 }, "url": "https://www.semanticscholar.org/paper/fd1cf28a2b8caf2fe29af5e7fa9191cecfedf84d", "referenceCount": 72, "citationCount": 608, "influentialCitationCount": 64, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Code as Policies: Language Model Programs for Embodied Control", "abstract": "Large language models (LLMs) trained on code-completion have been shown to be capable of synthesizing simple Python programs from docstrings [1]. We find that these code-writing LLMs can be re-purposed to write robot policy code, given natural language commands. Specifically, policy code can express functions or feedback loops that process perception outputs (e.g., from object detectors [2], [3]) and parameterize control primitive APIs. When provided as input several example language commands (formatted as comments) followed by corresponding policy code (via few-shot prompting), LLMs can take in new commands and autonomously re-compose API calls to generate new policy code respectively. By chaining classic logic structures and referencing third-party libraries (e.g., NumPy, Shapely) to perform arithmetic, LLMs used in this way can write robot policies that (i) exhibit spatial-geometric reasoning, (ii) generalize to new instructions, and (iii) prescribe precise values (e.g., velocities) to ambiguous descriptions (‘faster’) depending on context (i.e., behavioral commonsense). This paper presents Code as Policies: a robot-centric formulation of language model generated programs (LMPs) that can represent reactive policies (e.g., impedance controllers), as well as waypoint-based policies (vision-based pick and place, trajectory-based control), demonstrated across multiple real robot platforms. Central to our approach is prompting hierarchical code-gen (recursively defining undefined functions), which can write more complex code and also improves state-of-the-art to solve 39.8% of problems on the HumanEval [1] benchmark. Code and videos are available at https://code-as-policies.github.io", "year": 2022, "venue": "IEEE International Conference on Robotics and Automation", "authors": [ "Jacky Liang", "Wenlong Huang", "F. Xia", "Peng Xu", "Karol Hausman", "Brian Ichter", "Peter R. Florence", "Andy Zeng" ], "externalIds": { "DBLP": "conf/icra/LiangHXXHIFZ23", "ArXiv": "2209.07753", "DOI": "10.1109/ICRA48891.2023.10160591", "CorpusId": 252355542 }, "url": "https://www.semanticscholar.org/paper/41531594d7e0f3b2e138ae43e0a0f6e24a9b014c", "referenceCount": 62, "citationCount": 583, "influentialCitationCount": 53, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation", "abstract": "Vision-Language Pre-training (VLP) has advanced the performance for many vision-language tasks. However, most existing pre-trained models only excel in either understanding-based tasks or generation-based tasks. Furthermore, performance improvement has been largely achieved by scaling up the dataset with noisy image-text pairs collected from the web, which is a suboptimal source of supervision. In this paper, we propose BLIP, a new VLP framework which transfers flexibly to both vision-language understanding and generation tasks. BLIP effectively utilizes the noisy web data by bootstrapping the captions, where a captioner generates synthetic captions and a filter removes the noisy ones. We achieve state-of-the-art results on a wide range of vision-language tasks, such as image-text retrieval (+2.7% in average recall@1), image captioning (+2.8% in CIDEr), and VQA (+1.6% in VQA score). BLIP also demonstrates strong generalization ability when directly transferred to video-language tasks in a zero-shot manner. Code, models, and datasets are released at https://github.com/salesforce/BLIP.", "year": 2022, "venue": "International Conference on Machine Learning", "authors": [ "Junnan Li", "Dongxu Li", "Caiming Xiong", "S. Hoi" ], "externalIds": { "ArXiv": "2201.12086", "DBLP": "conf/icml/0001LXH22", "CorpusId": 246411402 }, "url": "https://www.semanticscholar.org/paper/a3b42a83669998f65df60d7c065a70d07ca95e99", "referenceCount": 60, "citationCount": 2731, "influentialCitationCount": 451, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Language Models as Zero-Shot Planners: Extracting Actionable Knowledge for Embodied Agents", "abstract": "Can world knowledge learned by large language models (LLMs) be used to act in interactive environments? In this paper, we investigate the possibility of grounding high-level tasks, expressed in natural language (e.g.\"make breakfast\"), to a chosen set of actionable steps (e.g.\"open fridge\"). While prior work focused on learning from explicit step-by-step examples of how to act, we surprisingly find that if pre-trained LMs are large enough and prompted appropriately, they can effectively decompose high-level tasks into mid-level plans without any further training. However, the plans produced naively by LLMs often cannot map precisely to admissible actions. We propose a procedure that conditions on existing demonstrations and semantically translates the plans to admissible actions. Our evaluation in the recent VirtualHome environment shows that the resulting method substantially improves executability over the LLM baseline. The conducted human evaluation reveals a trade-off between executability and correctness but shows a promising sign towards extracting actionable knowledge from language models. Website at https://huangwl18.github.io/language-planner", "year": 2022, "venue": "International Conference on Machine Learning", "authors": [ "Wenlong Huang", "P. Abbeel", "Deepak Pathak", "Igor Mordatch" ], "externalIds": { "DBLP": "journals/corr/abs-2201-07207", "ArXiv": "2201.07207", "CorpusId": 246035276 }, "url": "https://www.semanticscholar.org/paper/92a8f7f09f3705cb5a6009a42220a6f01ea084e8", "referenceCount": 55, "citationCount": 792, "influentialCitationCount": 73, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale", "abstract": "While the Transformer architecture has become the de-facto standard for natural language processing tasks, its applications to computer vision remain limited. In vision, attention is either applied in conjunction with convolutional networks, or used to replace certain components of convolutional networks while keeping their overall structure in place. We show that this reliance on CNNs is not necessary and a pure transformer applied directly to sequences of image patches can perform very well on image classification tasks. When pre-trained on large amounts of data and transferred to multiple mid-sized or small image recognition benchmarks (ImageNet, CIFAR-100, VTAB, etc.), Vision Transformer (ViT) attains excellent results compared to state-of-the-art convolutional networks while requiring substantially fewer computational resources to train.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Alexey Dosovitskiy", "Lucas Beyer", "Alexander Kolesnikov", "Dirk Weissenborn", "Xiaohua Zhai", "Thomas Unterthiner", "Mostafa Dehghani", "Matthias Minderer", "G. Heigold", "S. Gelly", "Jakob Uszkoreit", "N. Houlsby" ], "externalIds": { "MAG": "3094502228", "ArXiv": "2010.11929", "DBLP": "conf/iclr/DosovitskiyB0WZ21", "CorpusId": 225039882 }, "url": "https://www.semanticscholar.org/paper/268d347e8a55b5eb82fb5e7d2f800e33c75ab18a", "referenceCount": 65, "citationCount": 28233, "influentialCitationCount": 4121, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ALFRED: A Benchmark for Interpreting Grounded Instructions for Everyday Tasks", "abstract": "We present ALFRED (Action Learning From Realistic Environments and Directives), a benchmark for learning a mapping from natural language instructions and egocentric vision to sequences of actions for household tasks. ALFRED includes long, compositional tasks with non-reversible state changes to shrink the gap between research benchmarks and real-world applications. ALFRED consists of expert demonstrations in interactive visual environments for 25k natural language directives. These directives contain both high-level goals like “Rinse off a mug and place it in the coffee maker.” and low-level language instructions like “Walk to the coffee maker on the right.” ALFRED tasks are more complex in terms of sequence length, action space, and language than existing vision- and-language task datasets. We show that a baseline model based on recent embodied vision-and-language tasks performs poorly on ALFRED, suggesting that there is significant room for developing innovative grounded visual language understanding models with this benchmark.", "year": 2019, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Mohit Shridhar", "Jesse Thomason", "Daniel Gordon", "Yonatan Bisk", "Winson Han", "Roozbeh Mottaghi", "Luke Zettlemoyer", "D. Fox" ], "externalIds": { "MAG": "2993086250", "ArXiv": "1912.01734", "DBLP": "journals/corr/abs-1912-01734", "DOI": "10.1109/cvpr42600.2020.01075", "CorpusId": 208617407 }, "url": "https://www.semanticscholar.org/paper/f4cf4246f3882aa6337e9c05d5675a3b8463a32e", "referenceCount": 59, "citationCount": 617, "influentialCitationCount": 108, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Embodied Question Answering", "abstract": "We present a new AI task - Embodied Question Answering (EmbodiedQA) - where an agent is spawned at a random location in a 3D environment and asked a question ('What color is the car?'). In order to answer, the agent must first intelligently navigate to explore the environment, gather necessary visual information through first-person (egocentric) vision, and then answer the question ('orange'). EmbodiedQA requires a range of AI skills - language understanding, visual recognition, active perception, goal-driven navigation, commonsense reasoning, long-term memory, and grounding language into actions. In this work, we develop a dataset of questions and answers in House3D environments [1], evaluation metrics, and a hierarchical model trained with imitation and reinforcement learning.", "year": 2017, "venue": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "authors": [ "Abhishek Das", "Samyak Datta", "Georgia Gkioxari", "Stefan Lee", "Devi Parikh", "Dhruv Batra" ], "externalIds": { "DBLP": "conf/cvpr/DasDGLPB18", "MAG": "2774005037", "ArXiv": "1711.11543", "DOI": "10.1109/CVPR.2018.00008", "CorpusId": 35985986 }, "url": "https://www.semanticscholar.org/paper/cc5ac1d3083b6663482ba6830dfa3bf65343286c", "referenceCount": 48, "citationCount": 591, "influentialCitationCount": 75, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Qwen-VL: A Frontier Large Vision-Language Model with Versatile Abilities", "abstract": "We introduce the Qwen-VL series, a set of large-scale vision-language models designed to perceive and understand both text and images. Comprising Qwen-VL and Qwen-VL-Chat, these models exhibit remarkable performance in tasks like image captioning, question answering, visual localization, and flexible interaction. The evaluation covers a wide range of tasks including zero-shot captioning, visual or document visual question answering, and grounding. We demonstrate the Qwen-VL outperforms existing Large Vision Language Models (LVLMs). We present their architecture, training, capabilities, and performance, highlighting their contributions to advancing multimodal artificial intelligence. Code, demo and models are available at https://github.com/QwenLM/Qwen-VL .", "year": 2023, "venue": "arXiv.org", "authors": [ "Jinze Bai", "Shuai Bai", "Shusheng Yang", "Shijie Wang", "Sinan Tan", "Peng Wang", "Junyang Lin", "Chang Zhou", "Jingren Zhou" ], "externalIds": { "DBLP": "journals/corr/abs-2308-12966", "DOI": "10.48550/arXiv.2308.12966", "CorpusId": 263875678 }, "url": "https://www.semanticscholar.org/paper/5ddb51ae85deca14dc7fc8adc07305c22a1ebe0a", "referenceCount": 80, "citationCount": 570, "influentialCitationCount": 133, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "2023a. Instruct2act: Mapping multi-modality instructions to robotic actions with large language model", "abstract": null, "year": null, "venue": "arXiv", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2023. Embod-ied task planning with large language models", "abstract": null, "year": null, "venue": "arXiv preprint", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2022. Ego4d: Around the world in 3,000 hours of ego-centric video", "abstract": null, "year": null, "venue": "Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2023. Drivegpt4: Interpretable end-to-end autonomous driving via large language model", "abstract": null, "year": null, "venue": "arXiv preprint", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "OpenAI. 2023. Gpt-4v(ision) system card", "abstract": null, "year": null, "venue": "cdn. openai.com/papers/GPTVSystemCard.pdf", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2024. Visual instruction tuning", "abstract": null, "year": null, "venue": "Advances in neural information processing systems", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2022. Indoor Objects Detection", "abstract": null, "year": null, "venue": ":/", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2024. MLDT: Multi-Level Decomposition for Complex Long-Horizon Robotic Task Planning with Open-Source Large Language Model", "abstract": null, "year": null, "venue": "arXiv preprint", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "VLMine: Long-Tail Data Mining with Vision Language Models": { "paper_title": "VLMine: Long-Tail Data Mining with Vision Language Models", "arxiv_id": "2409.15486v1", "keyword": "vision language model", "authors": [ "Mao Ye", "Gregory P. Meyer", "Zaiwei Zhang", "Dennis Park", "Siva Karthik Mustikovela", "Yuning Chai", "Eric M Wolff" ], "references": [ { "title": "Probabilistic Contrastive Learning for Long-Tailed Visual Recognition", "abstract": "Long-tailed distributions frequently emerge in real-world data, where a large number of minority categories contain a limited number of samples. Such imbalance issue considerably impairs the performance of standard supervised learning algorithms, which are mainly designed for balanced training sets. Recent investigations have revealed that supervised contrastive learning exhibits promising potential in alleviating the data imbalance. However, the performance of supervised contrastive learning is plagued by an inherent challenge: it necessitates sufficiently large batches of training data to construct contrastive pairs that cover all categories, yet this requirement is difficult to meet in the context of class-imbalanced data. To overcome this obstacle, we propose a novel probabilistic contrastive (ProCo) learning algorithm that estimates the data distribution of the samples from each class in the feature space, and samples contrastive pairs accordingly. In fact, estimating the distributions of all classes using features in a small batch, particularly for imbalanced data, is not feasible. Our key idea is to introduce a reasonable and simple assumption that the normalized features in contrastive learning follow a mixture of von Mises-Fisher (vMF) distributions on unit space, which brings two-fold benefits. First, the distribution parameters can be estimated using only the first sample moment, which can be efficiently computed in an online manner across different batches. Second, based on the estimated distribution, the vMF distribution allows us to sample an infinite number of contrastive pairs and derive a closed form of the expected contrastive loss for efficient optimization. Other than long-tailed problems, ProCo can be directly applied to semi-supervised learning by generating pseudo-labels for unlabeled data, which can subsequently be utilized to estimate the distribution of the samples inversely. Theoretically, we analyze the error bound of ProCo. Empirically, extensive experimental results on supervised/semi-supervised visual recognition and object detection tasks demonstrate that ProCo consistently outperforms existing methods across various datasets.", "year": 2024, "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", "authors": [ "Chaoqun Du", "Yulin Wang", "Shiji Song", "Gao Huang" ], "externalIds": { "DBLP": "journals/pami/DuWSH24", "ArXiv": "2403.06726", "DOI": "10.1109/TPAMI.2024.3369102", "CorpusId": 267852922, "PubMed": "38393854" }, "url": "https://www.semanticscholar.org/paper/70f1530cb242254fd0e0871c3454f23021e52625", "referenceCount": 96, "citationCount": 13, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Image Clustering Conditioned on Text Criteria", "abstract": "Classical clustering methods do not provide users with direct control of the clustering results, and the clustering results may not be consistent with the relevant criterion that a user has in mind. In this work, we present a new methodology for performing image clustering based on user-specified text criteria by leveraging modern vision-language models and large language models. We call our method Image Clustering Conditioned on Text Criteria (IC|TC), and it represents a different paradigm of image clustering. IC|TC requires a minimal and practical degree of human intervention and grants the user significant control over the clustering results in return. Our experiments show that IC|TC can effectively cluster images with various criteria, such as human action, physical location, or the person's mood, while significantly outperforming baselines.", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Sehyun Kwon", "Jaeseung Park", "Minkyu Kim", "Jaewoong Cho", "Ernest K. Ryu", "Kangwook Lee" ], "externalIds": { "ArXiv": "2310.18297", "DBLP": "conf/iclr/KwonPKCR024", "DOI": "10.48550/arXiv.2310.18297", "CorpusId": 264555257 }, "url": "https://www.semanticscholar.org/paper/e6f93ae09064de1cdba070ad71de69655cb44a01", "referenceCount": 83, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "InstructBLIP: Towards General-purpose Vision-Language Models with Instruction Tuning", "abstract": "Large-scale pre-training and instruction tuning have been successful at creating general-purpose language models with broad competence. However, building general-purpose vision-language models is challenging due to the rich input distributions and task diversity resulting from the additional visual input. Although vision-language pretraining has been widely studied, vision-language instruction tuning remains under-explored. In this paper, we conduct a systematic and comprehensive study on vision-language instruction tuning based on the pretrained BLIP-2 models. We gather 26 publicly available datasets, covering a wide variety of tasks and capabilities, and transform them into instruction tuning format. Additionally, we introduce an instruction-aware Query Transformer, which extracts informative features tailored to the given instruction. Trained on 13 held-in datasets, InstructBLIP attains state-of-the-art zero-shot performance across all 13 held-out datasets, substantially outperforming BLIP-2 and larger Flamingo models. Our models also lead to state-of-the-art performance when finetuned on individual downstream tasks (e.g., 90.7% accuracy on ScienceQA questions with image contexts). Furthermore, we qualitatively demonstrate the advantages of InstructBLIP over concurrent multimodal models. All InstructBLIP models are open-sourced at https://github.com/salesforce/LAVIS/tree/main/projects/instructblip.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Wenliang Dai", "Junnan Li", "Dongxu Li", "A. M. H. Tiong", "Junqi Zhao", "Weisheng Wang", "Boyang Albert Li", "Pascale Fung", "Steven C. H. Hoi" ], "externalIds": { "ArXiv": "2305.06500", "DBLP": "journals/corr/abs-2305-06500", "DOI": "10.48550/arXiv.2305.06500", "CorpusId": 258615266 }, "url": "https://www.semanticscholar.org/paper/8bd6a2a89503be083176f2cc26fabedb79238cbd", "referenceCount": 52, "citationCount": 1182, "influentialCitationCount": 281, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "AdaptiveShape: Solving Shape Variability for 3D Object Detection with Geometry Aware Anchor Distributions", "abstract": "3D object detection with point clouds and images plays an important role in perception tasks such as autonomous driving. Current methods show great performance on detection and pose estimation of standard-shaped vehicles but lack behind on more complex shapes as e.g. semi-trailer truck combinations. Determining the shape and motion of those special vehicles accurately is crucial in yard operation and maneuvering and industrial automation applications. This work introduces several new methods to improve and measure the performance for such classes. State-of-the-art methods are based on predefined anchor grids or heatmaps for ground truth targets. However, the underlying representations do not take the shape of different sized objects into account. Our main contribution, AdaptiveShape, uses shape aware anchor distributions and heatmaps to improve the detection capabilities. For large vehicles we achieve +10.9% AP in comparison to current shape agnostic methods. Furthermore we introduce a new fast LiDAR-camera fusion. It is based on 2D bounding box camera detections which are available in many processing pipelines. This fusion method does not rely on perfectly calibrated or temporally synchronized systems and is therefore applicable to a broad range of robotic applications. We extend a standard point pillar network to account for temporal data and improve learning of complex object movements. In addition we extended a ground truth augmentation to use grouped object pairs to further improve truck AP by +2.2% compared to conventional augmentation.", "year": 2023, "venue": "arXiv.org", "authors": [ "B. Sick", "Michael Walter", "Jochen Abhau" ], "externalIds": { "ArXiv": "2302.14522", "DBLP": "journals/corr/abs-2302-14522", "DOI": "10.48550/arXiv.2302.14522", "CorpusId": 257232784 }, "url": "https://www.semanticscholar.org/paper/02db9960c3c405be389cbaa998144d2f93cb0604", "referenceCount": 39, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "LLaMA: Open and Efficient Foundation Language Models", "abstract": "We introduce LLaMA, a collection of foundation language models ranging from 7B to 65B parameters. We train our models on trillions of tokens, and show that it is possible to train state-of-the-art models using publicly available datasets exclusively, without resorting to proprietary and inaccessible datasets. In particular, LLaMA-13B outperforms GPT-3 (175B) on most benchmarks, and LLaMA-65B is competitive with the best models, Chinchilla-70B and PaLM-540B. We release all our models to the research community.", "year": 2023, "venue": "arXiv.org", "authors": [ "Hugo Touvron", "Thibaut Lavril", "Gautier Izacard", "Xavier Martinet", "Marie-Anne Lachaux", "Timothée Lacroix", "Baptiste Rozière", "Naman Goyal", "Eric Hambro", "Faisal Azhar", "Aurelien Rodriguez", "Armand Joulin", "Edouard Grave", "Guillaume Lample" ], "externalIds": { "DBLP": "journals/corr/abs-2302-13971", "ArXiv": "2302.13971", "CorpusId": 257219404 }, "url": "https://www.semanticscholar.org/paper/57e849d0de13ed5f91d086936296721d4ff75a75", "referenceCount": 80, "citationCount": 8031, "influentialCitationCount": 1073, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Long-tail Detection with Effective Class-Margins", "abstract": null, "year": 2023, "venue": "European Conference on Computer Vision", "authors": [ "Jang Hyun Cho", "Philipp Krähenbühl" ], "externalIds": { "DBLP": "journals/corr/abs-2301-09724", "ArXiv": "2301.09724", "DOI": "10.1007/978-3-031-20074-8_40", "CorpusId": 253525596 }, "url": "https://www.semanticscholar.org/paper/cc7c1f7ab6767c0075cc988eab314c139e326c3f", "referenceCount": 55, "citationCount": 13, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Improving the Intra-class Long-tail in 3D Detection via Rare Example Mining", "abstract": "Continued improvements in deep learning architectures have steadily advanced the overall performance of 3D object detectors to levels on par with humans for certain tasks and datasets, where the overall performance is mostly driven by common examples. However, even the best performing models suffer from the most naive mistakes when it comes to rare examples that do not appear frequently in the training data, such as vehicles with irregular geometries. Most studies in the long-tail literature focus on class-imbalanced classification problems with known imbalanced label counts per class, but they are not directly applicable to the intra-class long-tail examples in problems with large intra-class variations such as 3D object detection, where instances with the same class label can have drastically varied properties such as shapes and sizes. Other works propose to mitigate this problem using active learning based on the criteria of uncertainty, difficulty, or diversity. In this study, we identify a new conceptual dimension - rareness - to mine new data for improving the long-tail performance of models. We show that rareness, as opposed to difficulty, is the key to data-centric improvements for 3D detectors, since rareness is the result of a lack in data support while difficulty is related to the fundamental ambiguity in the problem. We propose a general and effective method to identify the rareness of objects based on density estimation in the feature space using flow models, and propose a principled cost-aware formulation for mining rare object tracks, which improves overall model performance, but more importantly - significantly improves the performance for rare objects (by 30.97\\%", "year": 2022, "venue": "European Conference on Computer Vision", "authors": [ "C. Jiang", "Mahyar Najibi", "C. Qi", "Yin Zhou", "Drago Anguelov" ], "externalIds": { "ArXiv": "2210.08375", "DBLP": "journals/corr/abs-2210-08375", "DOI": "10.48550/arXiv.2210.08375", "CorpusId": 252918739 }, "url": "https://www.semanticscholar.org/paper/e80b2b224b27718649ae4a2cd91213cbde87b35e", "referenceCount": 71, "citationCount": 16, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "LPT: Long-tailed Prompt Tuning for Image Classification", "abstract": "For long-tailed classification, most works often pretrain a big model on a large-scale dataset, and then fine-tune the whole model for adapting to long-tailed data. Though promising, fine-tuning the whole pretrained model tends to suffer from high cost in computation and deployment of different models for different tasks, as well as weakened generalization ability for overfitting to certain features of long-tailed data. To alleviate these issues, we propose an effective Long-tailed Prompt Tuning method for long-tailed classification. LPT introduces several trainable prompts into a frozen pretrained model to adapt it to long-tailed data. For better effectiveness, we divide prompts into two groups: 1) a shared prompt for the whole long-tailed dataset to learn general features and to adapt a pretrained model into target domain; and 2) group-specific prompts to gather group-specific features for the samples which have similar features and also to empower the pretrained model with discrimination ability. Then we design a two-phase training paradigm to learn these prompts. In phase 1, we train the shared prompt via supervised prompt tuning to adapt a pretrained model to the desired long-tailed domain. In phase 2, we use the learnt shared prompt as query to select a small best matched set for a group of similar samples from the group-specific prompt set to dig the common features of these similar samples, then optimize these prompts with dual sampling strategy and asymmetric GCL loss. By only fine-tuning a few prompts while fixing the pretrained model, LPT can reduce training and deployment cost by storing a few prompts, and enjoys a strong generalization ability of the pretrained model. Experiments show that on various long-tailed benchmarks, with only ~1.1% extra parameters, LPT achieves comparable performance than previous whole model fine-tuning methods, and is more robust to domain-shift.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Bowen Dong", "Pan Zhou", "Shuicheng Yan", "W. Zuo" ], "externalIds": { "DBLP": "journals/corr/abs-2210-01033", "ArXiv": "2210.01033", "DOI": "10.48550/arXiv.2210.01033", "CorpusId": 252683720 }, "url": "https://www.semanticscholar.org/paper/e9bebbe12a1124dee2214e3f6bd7973540d8af63", "referenceCount": 67, "citationCount": 32, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "PaLM: Scaling Language Modeling with Pathways", "abstract": "Large language models have been shown to achieve remarkable performance across a variety of natural language tasks using few-shot learning, which drastically reduces the number of task-specific training examples needed to adapt the model to a particular application. To further our understanding of the impact of scale on few-shot learning, we trained a 540-billion parameter, densely activated, Transformer language model, which we call Pathways Language Model PaLM. We trained PaLM on 6144 TPU v4 chips using Pathways, a new ML system which enables highly efficient training across multiple TPU Pods. We demonstrate continued benefits of scaling by achieving state-of-the-art few-shot learning results on hundreds of language understanding and generation benchmarks. On a number of these tasks, PaLM 540B achieves breakthrough performance, outperforming the finetuned state-of-the-art on a suite of multi-step reasoning tasks, and outperforming average human performance on the recently released BIG-bench benchmark. A significant number of BIG-bench tasks showed discontinuous improvements from model scale, meaning that performance steeply increased as we scaled to our largest model. PaLM also has strong capabilities in multilingual tasks and source code generation, which we demonstrate on a wide array of benchmarks. We additionally provide a comprehensive analysis on bias and toxicity, and study the extent of training data memorization with respect to model scale. Finally, we discuss the ethical considerations related to large language models and discuss potential mitigation strategies.", "year": 2022, "venue": "Journal of machine learning research", "authors": [ "Aakanksha Chowdhery", "Sharan Narang", "Jacob Devlin", "Maarten Bosma", "Gaurav Mishra", "Adam Roberts", "P. Barham", "Hyung Won Chung", "Charles Sutton", "Sebastian Gehrmann", "Parker Schuh", "Kensen Shi", "Sasha Tsvyashchenko", "Joshua Maynez", "Abhishek Rao", "Parker Barnes", "Yi Tay", "Noam M. Shazeer", "Vinodkumar Prabhakaran", "Emily Reif", "Nan Du", "Ben Hutchinson", "Reiner Pope", "James Bradbury", "Jacob Austin", "M. Isard", "Guy Gur-Ari", "Pengcheng Yin", "Toju Duke", "Anselm Levskaya", "Sanjay Ghemawat", "Sunipa Dev", "H. Michalewski", "Xavier García", "Vedant Misra", "Kevin Robinson", "Liam Fedus", "Denny Zhou", "Daphne Ippolito", "D. Luan", "Hyeontaek Lim", "Barret Zoph", "A. Spiridonov", "Ryan Sepassi", "David Dohan", "Shivani Agrawal", "Mark Omernick", "Andrew M. Dai", "Thanumalayan Sankaranarayana Pillai", "Marie Pellat", "Aitor Lewkowycz", "Erica Moreira", "R. Child", "Oleksandr Polozov", "Katherine Lee", "Zongwei Zhou", "Xuezhi Wang", "Brennan Saeta", "Mark Díaz", "Orhan Firat", "Michele Catasta", "Jason Wei", "K. Meier-Hellstern", "D. Eck", "J. Dean", "Slav Petrov", "Noah Fiedel" ], "externalIds": { "ArXiv": "2204.02311", "DBLP": "journals/corr/abs-2204-02311", "CorpusId": 247951931 }, "url": "https://www.semanticscholar.org/paper/094ff971d6a8b8ff870946c9b3ce5aa173617bfb", "referenceCount": 173, "citationCount": 4789, "influentialCitationCount": 335, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Long- Tailed Recognition via Weight Balancing", "abstract": "In the real open world, data tends to follow long-tailed class distributions, motivating the well-studied long-tailed recognition (LTR) problem. Naive training produces models that are biased toward common classes in terms of higher accuracy. The key to addressing LTR is to balance various aspects including data distribution, training losses, and gradients in learning. We explore an orthogonal direction, weight balancing, motivated by the empirical observation that the naively trained classifier has “artificially” larger weights in norm for common classes (because there exists abundant data to train them, unlike the rare classes). We investigate three techniques to balance weights, L2-normalization, weight decay, and MaxNorm. We first point out that L2-normalization “perfectly” balances per-class weights to be unit norm, but such a hard constraint might prevent classes from learning better classifiers. In contrast, weight decay penalizes larger weights more heavily and so learns small balanced weights; the MaxNorm constraint encourages growing small weights within a norm ball but caps all the weights by the radius. Our extensive study shows that both help learn balanced weights and greatly improve the LTR accuracy. Surprisingly, weight decay, although underexplored in LTR, significantly improves over prior work. Therefore, we adopt a two-stage training paradigm and propose a simple approach to LTR: (1) learning features using the cross-entropy loss by tuning weight decay, and (2) learning classifiers using class-balanced loss by tuning weight decay and MaxNorm. Our approach achieves the state-of-the-art accuracy on five standard benchmarks, serving as a future baseline for long-tailed recognition.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Shaden Alshammari", "Yu-Xiong Wang", "Deva Ramanan", "Shu Kong" ], "externalIds": { "DBLP": "journals/corr/abs-2203-14197", "ArXiv": "2203.14197", "DOI": "10.1109/CVPR52688.2022.00677", "CorpusId": 247763005 }, "url": "https://www.semanticscholar.org/paper/cc8e9f795f83c5107816bd500acb13c4e200198c", "referenceCount": 88, "citationCount": 114, "influentialCitationCount": 18, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Retrieval Augmented Classification for Long-Tail Visual Recognition", "abstract": "We introduce Retrieval Augmented Classification (RAC), a generic approach to augmenting standard image classification pipelines with an explicit retrieval module. RAC consists of a standard base image encoder fused with a parallel retrieval branch that queries a non-parametric external memory of pre-encoded images and associated text snippets. We apply RAC to the problem of long-tail classification and demonstrate a significant improvement over previous state-of-the-art on Places365-LT and iNaturalist-2018 (14.5% and 6.7% respectively), despite using only the training datasets themselves as the external information source. We demonstrate that RAC's retrieval module, without prompting, learns a high level of accuracy on tail classes. This, in turn, frees the base encoder to focus on common classes, and improve its performance thereon. RAC represents an alternative approach to utilizing large, pretrained models without requiring fine-tuning, as well as a first step towards more effectively making use of external memory within common computer vision architectures.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Alex Long", "Wei Yin", "Thalaiyasingam Ajanthan", "V. Nguyen", "Pulak Purkait", "Ravi Garg", "Alan Blair", "Chunhua Shen", "A. Hengel" ], "externalIds": { "DBLP": "conf/cvpr/Long0ANPGBSH22", "ArXiv": "2202.11233", "DOI": "10.1109/CVPR52688.2022.00683", "CorpusId": 247058346 }, "url": "https://www.semanticscholar.org/paper/15115f67452f3305b69e6886cee98ac466d42cd5", "referenceCount": 65, "citationCount": 80, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Simple Long-Tailed Recognition Baseline via Vision-Language Model", "abstract": "The visual world naturally exhibits a long-tailed distribution of open classes, which poses great challenges to modern visual systems. Existing approaches either perform class re-balancing strategies or directly improve network modules to address the problem. However, they still train models with a finite set of predefined labels, limiting their supervision information and restricting their transferability to novel instances. Recent advances in large-scale contrastive visual-language pretraining shed light on a new pathway for visual recognition. With open-vocabulary supervisions, pretrained contrastive vision-language models learn powerful multimodal representations that are promising to handle data deficiency and unseen concepts. By calculating the semantic similarity between visual and text inputs, visual recognition is converted to a vision-language matching problem. Inspired by this, we propose BALLAD to leverage contrastive vision-language models for long-tailed recognition. We first continue pretraining the vision-language backbone through contrastive learning on a specific long-tailed target dataset. Afterward, we freeze the backbone and further employ an additional adapter layer to enhance the representations of tail classes on balanced training samples built with re-sampling strategies. Extensive experiments have been conducted on three popular long-tailed recognition benchmarks. As a result, our simple and effective approach sets the new state-of-the-art performances and outperforms competitive baselines with a large margin. Code is released at https://github.com/gaopengcuhk/BALLAD.", "year": 2021, "venue": "arXiv.org", "authors": [ "Teli Ma", "Shijie Geng", "Mengmeng Wang", "Jing Shao", "Jiasen Lu", "Hongsheng Li", "Peng Gao", "Y. Qiao" ], "externalIds": { "DBLP": "journals/corr/abs-2111-14745", "ArXiv": "2111.14745", "CorpusId": 244714569 }, "url": "https://www.semanticscholar.org/paper/8fef6a33d39c6bb4c9ab55a8ef8cd7efc54aeff3", "referenceCount": 79, "citationCount": 34, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "VL-LTR: Learning Class-wise Visual-Linguistic Representation for Long-Tailed Visual Recognition", "abstract": null, "year": 2021, "venue": "European Conference on Computer Vision", "authors": [ "Changyao Tian", "Wenhai Wang", "Xizhou Zhu", "Xiaogang Wang", "Jifeng Dai", "Y. Qiao" ], "externalIds": { "ArXiv": "2111.13579", "DBLP": "journals/corr/abs-2111-13579", "DOI": "10.1007/978-3-031-19806-9_5", "CorpusId": 244709355 }, "url": "https://www.semanticscholar.org/paper/0665104edcc9adc0d302e2e7f9f63f32f2390c92", "referenceCount": 59, "citationCount": 48, "influentialCitationCount": 16, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Self Supervision to Distillation for Long-Tailed Visual Recognition", "abstract": "Deep learning has achieved remarkable progress for visual recognition on large-scale balanced datasets but still performs poorly on real-world long-tailed data. Previous methods often adopt class re-balanced training strategies to effectively alleviate the imbalance issue, but might be a risk of over-fitting tail classes. The recent decoupling method overcomes over-fitting issues by using a multi-stage training scheme, yet, it is still incapable of capturing tail class information in the feature learning stage. In this paper, we show that soft label can serve as a powerful solution to incorporate label correlation into a multi-stage training scheme for long-tailed recognition. The intrinsic relation between classes embodied by soft labels turns out to be helpful for long-tailed recognition by transferring knowledge from head to tail classes.Specifically, we propose a conceptually simple yet particularly effective multi-stage training scheme, termed as Self Supervised to Distillation (SSD). This scheme is composed of two parts. First, we introduce a self-distillation framework for long-tailed recognition, which can mine the label relation automatically. Second, we present a new distillation label generation module guided by self-supervision. The distilled labels integrate information from both label and data domains that can model long-tailed distribution effectively. We conduct extensive experiments and our method achieves the state-of-the-art results on three long-tailed recognition benchmarks: ImageNet-LT, CIFAR100-LT and iNaturalist 2018. Our SSD outperforms the strong LWS baseline by from 2.7% to 4.5% on various datasets.", "year": 2021, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Tianhao Li", "Limin Wang", "Gangshan Wu" ], "externalIds": { "DBLP": "journals/corr/abs-2109-04075", "ArXiv": "2109.04075", "DOI": "10.1109/ICCV48922.2021.00067", "CorpusId": 237453494 }, "url": "https://www.semanticscholar.org/paper/2eb3766268a24de4f59d42c8cdf757d8f3f0b91c", "referenceCount": 50, "citationCount": 87, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Parametric Contrastive Learning", "abstract": "In this paper, we propose Parametric Contrastive Learning (PaCo) to tackle long-tailed recognition. Based on theoretical analysis, we observe supervised contrastive loss tends to bias on high-frequency classes and thus increases the difficulty of imbalanced learning. We introduce a set of parametric class-wise learnable centers to rebalance from an optimization perspective. Further, we analyze our PaCo loss under a balanced setting. Our analysis demonstrates that PaCo can adaptively enhance the intensity of pushing samples of the same class close as more samples are pulled together with their corresponding centers and benefit hard example learning. Experiments on long-tailed CIFAR, ImageNet, Places, and iNaturalist 2018 manifest the new state-of-the-art for long-tailed recognition. On full ImageNet, models trained with PaCo loss surpass supervised contrastive learning across various ResNet backbones, e.g., our ResNet-200 achieves 81.8% top-1 accuracy. Our code is available at https://github.com/dvlab-research/Parametric-Contrastive-Learning.", "year": 2021, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Jiequan Cui", "Zhisheng Zhong", "Shu Liu", "Bei Yu", "Jiaya Jia" ], "externalIds": { "DBLP": "conf/iccv/CuiZ00J21", "ArXiv": "2107.12028", "DOI": "10.1109/ICCV48922.2021.00075", "CorpusId": 236428622 }, "url": "https://www.semanticscholar.org/paper/6900eaf98c9d35aaf2f1f8a411ca62343fe449a2", "referenceCount": 55, "citationCount": 227, "influentialCitationCount": 60, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "GistNet: a Geometric Structure Transfer Network for Long-Tailed Recognition", "abstract": "The problem of long-tailed recognition, where the number of examples per class is highly unbalanced, is considered. It is hypothesized that the well known tendency of standard classifier training to overfit to popular classes can be exploited for effective transfer learning. Rather than eliminating this overfitting, e.g. by adopting popular class-balanced sampling methods, the learning algorithm should instead leverage this overfitting to transfer geometric information from popular to low-shot classes. A new classifier architecture, GistNet, is proposed to support this goal, using constellations of classifier parameters to encode the class geometry. A new learning algorithm is then proposed for GeometrIc Structure Transfer (GIST), with resort to a combination of loss functions that combine class-balanced and random sampling to guarantee that, while overfitting to the popular classes is restricted to geometric parameters, it is leveraged to transfer class geometry from popular to few-shot classes. This enables better generalization for few-shot classes without the need for the manual specification of class weights, or even the explicit grouping of classes into different types. Experiments on two popular long-tailed recognition datasets show that GistNet outperforms existing solutions to this problem.", "year": 2021, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Bo Liu", "Haoxiang Li", "Hao Kang", "G. Hua", "N. Vasconcelos" ], "externalIds": { "ArXiv": "2105.00131", "DBLP": "journals/corr/abs-2105-00131", "DOI": "10.1109/ICCV48922.2021.00810", "CorpusId": 233481137 }, "url": "https://www.semanticscholar.org/paper/523392516245c929fff192fd9bc83999819c74bb", "referenceCount": 30, "citationCount": 31, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DropLoss for Long-Tail Instance Segmentation", "abstract": "Long-tailed class distributions are prevalent among the practical applications of object detection and instance segmentation. Prior work in long-tail instance segmentation addresses the imbalance of losses between rare and frequent categories by reducing the penalty for a model incorrectly predicting a rare class label. We demonstrate that the rare categories are heavily suppressed by correct background predictions, which reduces the probability for all foreground categories with equal weight. Due to the relative infrequency of rare categories, this leads to an imbalance that biases towards predicting more frequent categories. Based on this insight, we develop DropLoss -- a novel adaptive loss to compensate for this imbalance without a trade-off between rare and frequent categories. With this loss, we show state-of-the-art mAP across rare, common, and frequent categories on the LVIS dataset. Codes are available at https://github.com/timy90022/DropLoss.", "year": 2021, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Ting-I Hsieh", "Esther Robb", "Hwann-Tzong Chen", "Jia-Bin Huang" ], "externalIds": { "ArXiv": "2104.06402", "DBLP": "journals/corr/abs-2104-06402", "DOI": "10.1609/aaai.v35i2.16246", "CorpusId": 233219377 }, "url": "https://www.semanticscholar.org/paper/fee65304d2401d2c4bee5b846b7a9c055843a840", "referenceCount": 37, "citationCount": 40, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Active Learning for Deep Object Detection via Probabilistic Modeling", "abstract": "Active learning aims to reduce labeling costs by selecting only the most informative samples on a dataset. Few existing works have addressed active learning for object detection. Most of these methods are based on multiple models or are straightforward extensions of classification methods, hence estimate an image’s informativeness using only the classification head. In this paper, we propose a novel deep active learning approach for object detection. Our approach relies on mixture density networks that estimate a probabilistic distribution for each localization and classification head’s output. We explicitly estimate the aleatoric and epistemic uncertainty in a single forward pass of a single model. Our method uses a scoring function that aggregates these two types of uncertainties for both heads to obtain every image’s informativeness score. We demonstrate the efficacy of our approach in PASCAL VOC and MS-COCO datasets. Our approach outperforms single-model based methods and performs on par with multi-model based methods at a fraction of the computing cost. Code is available at https://github.com/NVlabs/AL-MDN.", "year": 2021, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Jiwoong Choi", "Ismail Elezi", "Hyuk-Jae Lee", "C. Farabet", "J. Álvarez" ], "externalIds": { "ArXiv": "2103.16130", "DBLP": "conf/iccv/ChoiELFA21", "DOI": "10.1109/ICCV48922.2021.01010", "CorpusId": 232417789 }, "url": "https://www.semanticscholar.org/paper/1edd99ae7c097dfe3b73750bde96a7722162ab25", "referenceCount": 41, "citationCount": 100, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Distribution Alignment: A Unified Framework for Long-tail Visual Recognition", "abstract": "Despite the recent success of deep neural networks, it remains challenging to effectively model the long-tail class distribution in visual recognition tasks. To address this problem, we first investigate the performance bottleneck of the two-stage learning framework via ablative study. Motivated by our discovery, we propose a unified distribution alignment strategy for long-tail visual recognition. Specifically, we develop an adaptive calibration function that enables us to adjust the classification scores for each data point. We then introduce a generalized re-weight method in the two-stage learning to balance the class prior, which provides a flexible and unified solution to diverse scenarios in visual recognition tasks. We validate our method by extensive experiments on four tasks, including image classification, semantic segmentation, object detection, and instance segmentation. Our approach achieves the state-of-the-art results across all four recognition tasks with a simple and unified framework.", "year": 2021, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Songyang Zhang", "Zeming Li", "Shipeng Yan", "Xuming He", "Jian Sun" ], "externalIds": { "DBLP": "conf/cvpr/ZhangLY0S21", "ArXiv": "2103.16370", "DOI": "10.1109/CVPR46437.2021.00239", "CorpusId": 232417730 }, "url": "https://www.semanticscholar.org/paper/51a33b04933f932c3a1425339c4412be89a2bdb5", "referenceCount": 54, "citationCount": 248, "influentialCitationCount": 26, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning Transferable Visual Models From Natural Language Supervision", "abstract": "State-of-the-art computer vision systems are trained to predict a fixed set of predetermined object categories. This restricted form of supervision limits their generality and usability since additional labeled data is needed to specify any other visual concept. Learning directly from raw text about images is a promising alternative which leverages a much broader source of supervision. We demonstrate that the simple pre-training task of predicting which caption goes with which image is an efficient and scalable way to learn SOTA image representations from scratch on a dataset of 400 million (image, text) pairs collected from the internet. After pre-training, natural language is used to reference learned visual concepts (or describe new ones) enabling zero-shot transfer of the model to downstream tasks. We study the performance of this approach by benchmarking on over 30 different existing computer vision datasets, spanning tasks such as OCR, action recognition in videos, geo-localization, and many types of fine-grained object classification. The model transfers non-trivially to most tasks and is often competitive with a fully supervised baseline without the need for any dataset specific training. For instance, we match the accuracy of the original ResNet-50 on ImageNet zero-shot without needing to use any of the 1.28 million training examples it was trained on. We release our code and pre-trained model weights at https://github.com/OpenAI/CLIP.", "year": 2021, "venue": "International Conference on Machine Learning", "authors": [ "Alec Radford", "Jong Wook Kim", "Chris Hallacy", "A. Ramesh", "Gabriel Goh", "Sandhini Agarwal", "Girish Sastry", "Amanda Askell", "Pamela Mishkin", "Jack Clark", "Gretchen Krueger", "I. Sutskever" ], "externalIds": { "ArXiv": "2103.00020", "DBLP": "conf/icml/RadfordKHRGASAM21", "CorpusId": 231591445 }, "url": "https://www.semanticscholar.org/paper/6f870f7f02a8c59c3e23f407f3ef00dd1dcf8fc4", "referenceCount": 220, "citationCount": 18886, "influentialCitationCount": 5013, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Equalization Loss v2: A New Gradient Balance Approach for Long-tailed Object Detection", "abstract": "Recently proposed decoupled training methods emerge as a dominant paradigm for long-tailed object detection. But they require an extra fine-tuning stage, and the dis-jointed optimization of representation and classifier might lead to suboptimal results. However, end-to-end training methods, like equalization loss (EQL), still perform worse than decoupled training methods. In this paper, we re-veal the main issue in long-tailed object detection is the imbalanced gradients between positives and negatives, and find that EQL does not solve it well. To address the problem of imbalanced gradients, we introduce a new version of equalization loss, called equalization loss v2 (EQL v2), a novel gradient guided reweighing mechanism that re-balances the training process for each category independently and equally. Extensive experiments are performed on the challenging LVIS benchmark. EQL v2 outperforms origin EQL by about 4 points overall AP with 14 ∼ 18 points improvements on the rare categories. More importantly, it also surpasses decoupled training methods. With-out further tuning for the Open Images dataset, EQL v2 improves EQL by 7.3 points AP, showing strong generalization ability. Codes have been released at https://github.com/tztztztztz/eqlv2", "year": 2020, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Jingru Tan", "Xin Lu", "Gang Zhang", "Changqing Yin", "Quanquan Li" ], "externalIds": { "MAG": "3111055164", "DBLP": "journals/corr/abs-2012-08548", "ArXiv": "2012.08548", "DOI": "10.1109/CVPR46437.2021.00173", "CorpusId": 229219948 }, "url": "https://www.semanticscholar.org/paper/f21aa5f92699e621b2a4110cc00fd61f41469347", "referenceCount": 48, "citationCount": 139, "influentialCitationCount": 17, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Long-tailed Recognition by Routing Diverse Distribution-Aware Experts", "abstract": "Natural data are often long-tail distributed over semantic classes. Existing recognition methods tend to focus on tail performance gain, often at the expense of head performance loss from increased classifier variance. The low tail performance manifests itself in large inter-class confusion and high classifier variance. We aim to reduce both the bias and the variance of a long-tailed classifier by RoutIng Diverse Experts (RIDE). It has three components: 1) a shared architecture for multiple classifiers (experts); 2) a distribution-aware diversity loss that encourages more diverse decisions for classes with fewer training instances; and 3) an expert routing module that dynamically assigns more ambiguous instances to additional experts. With on-par computational complexity, RIDE significantly outperforms the state-of-the-art methods by 5% to 7% on all the benchmarks including CIFAR100-LT, ImageNet-LT and iNaturalist. RIDE is also a universal framework that can be applied to different backbone networks and integrated into various long-tailed algorithms and training mechanisms for consistent performance gains.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Xudong Wang", "Long Lian", "Zhongqi Miao", "Ziwei Liu", "Stella X. Yu" ], "externalIds": { "DBLP": "conf/iclr/WangLM0Y21", "MAG": "3092063351", "ArXiv": "2010.01809", "CorpusId": 222134104 }, "url": "https://www.semanticscholar.org/paper/d618752d2e666d7b25f1bd6c7c3bd7c056e19d96", "referenceCount": 52, "citationCount": 337, "influentialCitationCount": 71, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Improving Calibration for Long-Tailed Recognition", "abstract": "Deep neural networks may perform poorly when training datasets are heavily class-imbalanced. Recently, two-stage methods decouple representation learning and classifier learning to improve performance. But there is still the vital issue of miscalibration. To address it, we design two methods to improve calibration and performance in such scenarios. Motivated by the fact that predicted probability distributions of classes are highly related to the numbers of class instances, we propose label-aware smoothing to deal with different degrees of over-confidence for classes and improve classifier learning. For dataset bias between these two stages due to different samplers, we further propose shifted batch normalization in the decoupling framework. Our proposed methods set new records on multiple popular long-tailed recognition benchmark datasets, including CIFAR-10-LT, CIFAR-100-LT, ImageNet-LT, Places-LT, and iNaturalist 2018.", "year": 2020, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Zhisheng Zhong", "Jiequan Cui", "Shu Liu", "Jiaya Jia" ], "externalIds": { "ArXiv": "2104.00466", "DBLP": "journals/corr/abs-2104-00466", "MAG": "3109758441", "DOI": "10.1109/CVPR46437.2021.01622", "CorpusId": 229534691 }, "url": "https://www.semanticscholar.org/paper/09ce0a5073dde62e6cff2d5dfed1944e024e951e", "referenceCount": 40, "citationCount": 247, "influentialCitationCount": 47, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Long-Tailed Classification by Keeping the Good and Removing the Bad Momentum Causal Effect", "abstract": "As the class size grows, maintaining a balanced dataset across many classes is challenging because the data are long-tailed in nature; it is even impossible when the sample-of-interest co-exists with each other in one collectable unit, e.g., multiple visual instances in one image. Therefore, long-tailed classification is the key to deep learning at scale. However, existing methods are mainly based on re-weighting/re-sampling heuristics that lack a fundamental theory. In this paper, we establish a causal inference framework, which not only unravels the whys of previous methods, but also derives a new principled solution. Specifically, our theory shows that the SGD momentum is essentially a confounder in long-tailed classification. On one hand, it has a harmful causal effect that misleads the tail prediction biased towards the head. On the other hand, its induced mediation also benefits the representation learning and head prediction. Our framework elegantly disentangles the paradoxical effects of the momentum, by pursuing the direct causal effect caused by an input sample. In particular, we use causal intervention in training, and counterfactual reasoning in inference, to remove the \"bad\" while keep the \"good\". We achieve new state-of-the-arts on three long-tailed visual recognition benchmarks: Long-tailed CIFAR-10/-100, ImageNet-LT for image classification and LVIS for instance segmentation.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Kaihua Tang", "Jianqiang Huang", "Hanwang Zhang" ], "externalIds": { "MAG": "3087788237", "DBLP": "journals/corr/abs-2009-12991", "ArXiv": "2009.12991", "CorpusId": 221970271 }, "url": "https://www.semanticscholar.org/paper/c5e4ed93580d50c46426642cd85b93d0e64629a3", "referenceCount": 61, "citationCount": 384, "influentialCitationCount": 49, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Seesaw Loss for Long-Tailed Instance Segmentation", "abstract": "Instance segmentation has witnessed a remarkable progress on class-balanced benchmarks. However, they fail to perform as accurately in real-world scenarios, where the category distribution of objects naturally comes with a long tail. Instances of head classes dominate a long-tailed dataset and they serve as negative samples of tail categories. The overwhelming gradients of negative samples on tail classes lead to a biased learning process for classifiers. Consequently, objects of tail categories are more likely to be misclassified as backgrounds or head categories. To tackle this problem, we propose Seesaw Loss to dynamically re-balance gradients of positive and negative samples for each category, with two complementary factors, i.e., mitigation factor and compensation factor. The mitigation factor reduces punishments to tail categories w.r.t. the ratio of cumulative training instances between different categories. Meanwhile, the compensation factor increases the penalty of misclassified instances to avoid false positives of tail categories. We conduct extensive experiments on Seesaw Loss with mainstream frameworks and different data sampling strategies. With a simple end-to-end training pipeline, Seesaw Loss obtains significant gains over Cross-Entropy Loss, and achieves state-of-the-art performance on LVIS dataset without bells and whistles. Code is available at https://github.com/open-mmlab/mmdetection.", "year": 2020, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Jiaqi Wang", "Wenwei Zhang", "Yuhang Zang", "Yuhang Cao", "Jiangmiao Pang", "Tao Gong", "Kai Chen", "Ziwei Liu", "Chen Change Loy", "Dahua Lin" ], "externalIds": { "ArXiv": "2008.10032", "DBLP": "conf/cvpr/WangZZCPGCLLL21", "MAG": "3081115330", "DOI": "10.1109/CVPR46437.2021.00957", "CorpusId": 221266194 }, "url": "https://www.semanticscholar.org/paper/4644b8204ff7fcc3b0835ffad2f4216911ac9426", "referenceCount": 60, "citationCount": 203, "influentialCitationCount": 40, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Forest R-CNN: Large-Vocabulary Long-Tailed Object Detection and Instance Segmentation", "abstract": "Despite the previous success of object analysis, detecting and segmenting a large number of object categories with a long-tailed data distribution remains a challenging problem and is less investigated. For a large-vocabulary classifier, the chance of obtaining noisy logits is much higher, which can easily lead to a wrong recognition. In this paper, we exploit prior knowledge of the relations among object categories to cluster fine-grained classes into coarser parent classes, and construct a classification tree that is responsible for parsing an object instance into a fine-grained category via its parent class. In the classification tree, as the number of parent class nodes are significantly less, their logits are less noisy and can be utilized to suppress the wrong/noisy logits existed in the fine-grained class nodes. As the way to construct the parent class is not unique, we further build multiple trees to form a classification forest where each tree contributes its vote to the fine-grained classification. To alleviate the imbalanced learning caused by the long-tail phenomena, we propose a simple yet effective resampling method, NMS Resampling, to re-balance the data distribution. Our method, termed as Forest R-CNN, can serve as a plug-and-play module being applied to most object recognition models for recognizing more than 1000 categories. Extensive experiments are performed on the large vocabulary dataset LVIS. Compared with the Mask R-CNN baseline, the Forest R-CNN significantly boosts the performance with 11.5% and 3.9% AP improvements on the rare categories and overall categories, respectively. Moreover, we achieve state-of-the-art results on the LVIS dataset. Code is available at https://github.com/JialianW/Forest_RCNN.", "year": 2020, "venue": "ACM Multimedia", "authors": [ "Jialian Wu", "Liangchen Song", "Tiancai Wang", "Qian Zhang", "Junsong Yuan" ], "externalIds": { "ArXiv": "2008.05676", "DBLP": "conf/mm/WuSWZY20", "MAG": "3093220398", "DOI": "10.1145/3394171.3413970", "CorpusId": 221112401 }, "url": "https://www.semanticscholar.org/paper/6c5da89caf33c6741e404eaf937694522e9ef1d9", "referenceCount": 47, "citationCount": 65, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Feature Space Augmentation for Long-Tailed Data", "abstract": null, "year": 2020, "venue": "European Conference on Computer Vision", "authors": [ "Peng Chu", "Xiao Bian", "Shaopeng Liu", "Haibin Ling" ], "externalIds": { "ArXiv": "2008.03673", "MAG": "3096688134", "DBLP": "journals/corr/abs-2008-03673", "DOI": "10.1007/978-3-030-58526-6_41", "CorpusId": 220644437 }, "url": "https://www.semanticscholar.org/paper/867ec3a4837213d0096fec75aa6d1dbbfd2c4b1d", "referenceCount": 55, "citationCount": 212, "influentialCitationCount": 13, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Balanced Meta-Softmax for Long-Tailed Visual Recognition", "abstract": "Deep classifiers have achieved great success in visual recognition. However, real-world data is long-tailed by nature, leading to the mismatch between training and testing distributions. In this paper, we show that Softmax function, though used in most classification tasks, gives a biased gradient estimation under the long-tailed setup. This paper presents Balanced Softmax, an elegant unbiased extension of Softmax, to accommodate the label distribution shift between training and testing. Theoretically, we derive the generalization bound for multiclass Softmax regression and show our loss minimizes the bound. In addition, we introduce Balanced Meta-Softmax, applying a complementary Meta Sampler to estimate the optimal class sample rate and further improve long-tailed learning. In our experiments, we demonstrate that Balanced Meta-Softmax outperforms state-of-the-art long-tailed classification solutions on both visual recognition and instance segmentation tasks.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Jiawei Ren", "Cunjun Yu", "Shunan Sheng", "Xiao Ma", "Haiyu Zhao", "Shuai Yi", "Hongsheng Li" ], "externalIds": { "DBLP": "journals/corr/abs-2007-10740", "MAG": "3044057088", "ArXiv": "2007.10740", "CorpusId": 220665471 }, "url": "https://www.semanticscholar.org/paper/d3806e9fefce863dfec7f6f83537bfb24edb278c", "referenceCount": 40, "citationCount": 453, "influentialCitationCount": 98, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Long-tail learning via logit adjustment", "abstract": "Real-world classification problems typically exhibit an imbalanced or long-tailed label distribution, wherein many labels are associated with only a few samples. This poses a challenge for generalisation on such labels, and also makes naive learning biased towards dominant labels. In this paper, we present two simple modifications of standard softmax cross-entropy training to cope with these challenges. Our techniques revisit the classic idea of logit adjustment based on the label frequencies, either applied post-hoc to a trained model, or enforced in the loss during training. Such adjustment encourages a large relative margin between logits of rare versus dominant labels. These techniques unify and generalise several recent proposals in the literature, while possessing firmer statistical grounding and empirical performance.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "A. Menon", "Sadeep Jayasumana", "A. Rawat", "Himanshu Jain", "Andreas Veit", "Sanjiv Kumar" ], "externalIds": { "DBLP": "journals/corr/abs-2007-07314", "MAG": "3042496707", "ArXiv": "2007.07314", "CorpusId": 220525799 }, "url": "https://www.semanticscholar.org/paper/4f65f604d3bf5fa91634484a3232b426267b71ef", "referenceCount": 82, "citationCount": 569, "influentialCitationCount": 130, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Center-based 3D Object Detection and Tracking", "abstract": "Three-dimensional objects are commonly represented as 3D boxes in a point-cloud. This representation mimics the well-studied image-based 2D bounding-box detection but comes with additional challenges. Objects in a 3D world do not follow any particular orientation, and box-based detectors have difficulties enumerating all orientations or fitting an axis-aligned bounding box to rotated objects. In this paper, we instead propose to represent, detect, and track 3D objects as points. Our framework, CenterPoint, first detects centers of objects using a keypoint detector and regresses to other attributes, including 3D size, 3D orientation, and velocity. In a second stage, it refines these estimates using additional point features on the object. In CenterPoint, 3D object tracking simplifies to greedy closest-point matching. The resulting detection and tracking algorithm is simple, efficient, and effective. CenterPoint achieved state-of-the-art performance on the nuScenes benchmark for both 3D detection and tracking, with 65.5 NDS and 63.8 AMOTA for a single model. On the Waymo Open Dataset, Center-Point outperforms all previous single model methods by a large margin and ranks first among all Lidar-only submissions. The code and pretrained models are available at https://github.com/tianweiy/CenterPoint.", "year": 2020, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Tianwei Yin", "Xingyi Zhou", "Philipp Krähenbühl" ], "externalIds": { "MAG": "3036067687", "DBLP": "journals/corr/abs-2006-11275", "ArXiv": "2006.11275", "DOI": "10.1109/CVPR46437.2021.01161", "CorpusId": 219956621 }, "url": "https://www.semanticscholar.org/paper/22d40963e633e1b4af4a9fefda68e1b8dc96ba63", "referenceCount": 70, "citationCount": 1261, "influentialCitationCount": 312, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Overcoming Classifier Imbalance for Long-Tail Object Detection With Balanced Group Softmax", "abstract": "Solving long-tail large vocabulary object detection with deep learning based models is a challenging and demanding task, which is however under-explored. In this work, we provide the first systematic analysis on the underperformance of state-of-the-art models in front of long-tail distribution. We find existing detection methods are unable to model few-shot classes when the dataset is extremely skewed, which can result in classifier imbalance in terms of parameter magnitude. Directly adapting long-tail classification models to detection frameworks can not solve this problem due to the intrinsic difference between detection and classification. In this work, we propose a novel balanced group softmax (BAGS) module for balancing the classifiers within the detection frameworks through group-wise training. It implicitly modulates the training process for the head and tail classes and ensures they are both sufficiently trained, without requiring any extra sampling for the instances from the tail classes. Extensive experiments on the very recent long-tail large vocabulary object recognition benchmark LVIS show that our proposed BAGS significantly improves the performance of detectors with various backbones and frameworks on both object detection and instance segmentation. It beats all state-of-the-art methods transferred from long-tail image classification and establishes new state-of-the-art. Code is available at https://github.com/FishYuLi/BalancedGroupSoftmax.", "year": 2020, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Yu Li", "Tao Wang", "Bingyi Kang", "Sheng Tang", "Chunfeng Wang", "Jintao Li", "Jiashi Feng" ], "externalIds": { "MAG": "3034933032", "DBLP": "conf/cvpr/LiWKTWLF20", "ArXiv": "2006.10408", "DOI": "10.1109/cvpr42600.2020.01100", "CorpusId": 219631425 }, "url": "https://www.semanticscholar.org/paper/5d25110b71e8f3ddb54aceabeb274dfc187c2991", "referenceCount": 43, "citationCount": 238, "influentialCitationCount": 32, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "M2m: Imbalanced Classification via Major-to-Minor Translation", "abstract": "In most real-world scenarios, labeled training datasets are highly class-imbalanced, where deep neural networks suffer from generalizing to a balanced testing criterion. In this paper, we explore a novel yet simple way to alleviate this issue by augmenting less-frequent classes via translating samples (e.g., images) from more-frequent classes. This simple approach enables a classifier to learn more generalizable features of minority classes, by transferring and leveraging the diversity of the majority information. Our experimental results on a variety of class-imbalanced datasets show that the proposed method improves the generalization on minority classes significantly compared to other existing re-sampling or re-weighting methods. The performance of our method even surpasses those of previous state-of-the-art methods for the imbalanced classification.", "year": 2020, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Jaehyung Kim", "Jongheon Jeong", "Jinwoo Shin" ], "externalIds": { "DBLP": "conf/cvpr/KimJS20", "MAG": "3034711780", "ArXiv": "2004.00431", "DOI": "10.1109/CVPR42600.2020.01391", "CorpusId": 214743026 }, "url": "https://www.semanticscholar.org/paper/cde953181c09f1ed5ebc10e5525f216f0f31ff4b", "referenceCount": 50, "citationCount": 197, "influentialCitationCount": 21, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Equalization Loss for Long-Tailed Object Recognition", "abstract": "Object recognition techniques using convolutional neural networks (CNN) have achieved great success. However, state-of-the-art object detection methods still perform poorly on large vocabulary and long-tailed datasets, e.g. LVIS. In this work, we analyze this problem from a novel perspective: each positive sample of one category can be seen as a negative sample for other categories, making the tail categories receive more discouraging gradients. Based on it, we propose a simple but effective loss, named equalization loss, to tackle the problem of long-tailed rare categories by simply ignoring those gradients for rare categories. The equalization loss protects the learning of rare categories from being at a disadvantage during the network parameter updating. Thus the model is capable of learning better discriminative features for objects of rare classes. Without any bells and whistles, our method achieves AP gains of 4.1% and 4.8% for the rare and common categories on the challenging LVIS benchmark, compared to the Mask R-CNN baseline. With the utilization of the effective equalization loss, we finally won the 1st place in the LVIS Challenge 2019. Code has been made available at: https://github.com/tztztztztz/eql.detectron2", "year": 2020, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Jingru Tan", "Changbao Wang", "Buyu Li", "Quanquan Li", "Wanli Ouyang", "Changqing Yin", "Junjie Yan" ], "externalIds": { "ArXiv": "2003.05176", "DBLP": "conf/cvpr/TanWLLOYY20", "MAG": "3012424925", "DOI": "10.1109/cvpr42600.2020.01168", "CorpusId": 212657779 }, "url": "https://www.semanticscholar.org/paper/6b7fac87b4ef98eceabfc47fd00a7190b1a48900", "referenceCount": 45, "citationCount": 407, "influentialCitationCount": 57, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep Active Learning for Biased Datasets via Fisher Kernel Self-Supervision", "abstract": "Active learning (AL) aims to minimize labeling efforts for data-demanding deep neural networks (DNNs) by selecting the most representative data points for annotation. However, currently used methods are ill-equipped to deal with biased data. The main motivation of this paper is to consider a realistic setting for pool-based semi-supervised AL, where the unlabeled collection of train data is biased. We theoretically derive an optimal acquisition function for AL in this setting. It can be formulated as distribution shift minimization between unlabeled train data and weakly-labeled validation dataset. To implement such acquisition function, we propose a low-complexity method for feature density matching using self-supervised Fisher kernel (FK) as well as several novel pseudo-label estimators. Our FK-based method outperforms state-of-the-art methods on MNIST, SVHN, and ImageNet classification while requiring only 1/10th of processing. The conducted experiments show at least 40% drop in labeling efforts for the biased class-imbalanced data compared to existing methods.", "year": 2020, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Denis A. Gudovskiy", "Alec Hodgkinson", "Takuya Yamaguchi", "Sotaro Tsukizawa" ], "externalIds": { "MAG": "3008778264", "DBLP": "journals/corr/abs-2003-00393", "ArXiv": "2003.00393", "DOI": "10.1109/cvpr42600.2020.00906", "CorpusId": 211677857 }, "url": "https://www.semanticscholar.org/paper/03c83e16a909eb4c9abe3ea173f8d71cbb706d6a", "referenceCount": 29, "citationCount": 54, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning From Multiple Experts: Self-paced Knowledge Distillation for Long-tailed Classification", "abstract": null, "year": 2020, "venue": "European Conference on Computer Vision", "authors": [ "Liuyu Xiang", "Guiguang Ding" ], "externalIds": { "DBLP": "journals/corr/abs-2001-01536", "MAG": "3096121526", "ArXiv": "2001.01536", "DOI": "10.1007/978-3-030-58558-7_15", "CorpusId": 209862398 }, "url": "https://www.semanticscholar.org/paper/029c31eee72ee0eb4d3058d48e276a2710f92325", "referenceCount": 55, "citationCount": 250, "influentialCitationCount": 19, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Scalability in Perception for Autonomous Driving: Waymo Open Dataset", "abstract": "The research community has increasing interest in autonomous driving research, despite the resource intensity of obtaining representative real world data. Existing self-driving datasets are limited in the scale and variation of the environments they capture, even though generalization within and between operating regions is crucial to the over-all viability of the technology. In an effort to help align the research community’s contributions with real-world self-driving problems, we introduce a new large scale, high quality, diverse dataset. Our new dataset consists of 1150 scenes that each span 20 seconds, consisting of well synchronized and calibrated high quality LiDAR and camera data captured across a range of urban and suburban geographies. It is 15x more diverse than the largest camera+LiDAR dataset available based on our proposed diversity metric. We exhaustively annotated this data with 2D (camera image) and 3D (LiDAR) bounding boxes, with consistent identifiers across frames. Finally, we provide strong baselines for 2D as well as 3D detection and tracking tasks. We further study the effects of dataset size and generalization across geographies on 3D detection methods. Find data, code and more up-to-date information at http://www.waymo.com/open.", "year": 2019, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Pei Sun", "Henrik Kretzschmar", "Xerxes Dotiwalla", "Aurelien Chouard", "Vijaysai Patnaik", "P. Tsui", "James Guo", "Yin Zhou", "Yuning Chai", "Benjamin Caine", "Vijay Vasudevan", "Wei Han", "Jiquan Ngiam", "Hang Zhao", "Aleksei Timofeev", "S. Ettinger", "Maxim Krivokon", "A. Gao", "Aditya Joshi", "Yu Zhang", "Jonathon Shlens", "Zhifeng Chen", "Dragomir Anguelov" ], "externalIds": { "MAG": "3035172746", "DBLP": "conf/cvpr/SunKDCPTGZCCVHN20", "ArXiv": "1912.04838", "DOI": "10.1109/CVPR42600.2020.00252", "CorpusId": 209140225 }, "url": "https://www.semanticscholar.org/paper/2f7e8d0cfe601b9bb3d07d7783ecd80424994517", "referenceCount": 26, "citationCount": 2250, "influentialCitationCount": 380, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Decoupling Representation and Classifier for Long-Tailed Recognition", "abstract": "The long-tail distribution of the visual world poses great challenges for deep learning based classification models on how to handle the class imbalance problem. Existing solutions usually involve class-balancing strategies, e.g., by loss re-weighting, data re-sampling, or transfer learning from head- to tail-classes, but most of them adhere to the scheme of jointly learning representations and classifiers. In this work, we decouple the learning procedure into representation learning and classification, and systematically explore how different balancing strategies affect them for long-tailed recognition. The findings are surprising: (1) data imbalance might not be an issue in learning high-quality representations; (2) with representations learned with the simplest instance-balanced (natural) sampling, it is also possible to achieve strong long-tailed recognition ability by adjusting only the classifier. We conduct extensive experiments and set new state-of-the-art performance on common long-tailed benchmarks like ImageNet-LT, Places-LT and iNaturalist, showing that it is possible to outperform carefully designed losses, sampling strategies, even complex modules with memory, by using a straightforward approach that decouples representation and classification. Our code is available at this https URL.", "year": 2019, "venue": "International Conference on Learning Representations", "authors": [ "Bingyi Kang", "Saining Xie", "Marcus Rohrbach", "Zhicheng Yan", "Albert Gordo", "Jiashi Feng", "Yannis Kalantidis" ], "externalIds": { "MAG": "2981188200", "DBLP": "conf/iclr/KangXRYGFK20", "ArXiv": "1910.09217", "CorpusId": 204800400 }, "url": "https://www.semanticscholar.org/paper/dcc4c760c3f1cb17f953c487190b735030c33b78", "referenceCount": 39, "citationCount": 1058, "influentialCitationCount": 256, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning Imbalanced Datasets with Label-Distribution-Aware Margin Loss", "abstract": "Deep learning algorithms can fare poorly when the training dataset suffers from heavy class-imbalance but the testing criterion requires good generalization on less frequent classes. We design two novel methods to improve performance in such scenarios. First, we propose a theoretically-principled label-distribution-aware margin (LDAM) loss motivated by minimizing a margin-based generalization bound. This loss replaces the standard cross-entropy objective during training and can be applied with prior strategies for training with class-imbalance such as re-weighting or re-sampling. Second, we propose a simple, yet effective, training schedule that defers re-weighting until after the initial stage, allowing the model to learn an initial representation while avoiding some of the complications associated with re-weighting or re-sampling. We test our methods on several benchmark vision tasks including the real-world imbalanced dataset iNaturalist 2018. Our experiments show that either of these methods alone can already improve over existing techniques and their combination achieves even better performance gains.", "year": 2019, "venue": "Neural Information Processing Systems", "authors": [ "Kaidi Cao", "Colin Wei", "Adrien Gaidon", "N. Aréchiga", "Tengyu Ma" ], "externalIds": { "MAG": "2970941190", "ArXiv": "1906.07413", "DBLP": "journals/corr/abs-1906-07413", "CorpusId": 189998981 }, "url": "https://www.semanticscholar.org/paper/bcfba69c2fadf2efea83be12fda2601f8d4681af", "referenceCount": 63, "citationCount": 1358, "influentialCitationCount": 337, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Large-Scale Long-Tailed Recognition in an Open World", "abstract": "Real world data often have a long-tailed and open-ended distribution. A practical recognition system must classify among majority and minority classes, generalize from a few known instances, and acknowledge novelty upon a never seen instance. We define Open Long-Tailed Recognition (OLTR) as learning from such naturally distributed data and optimizing the classification accuracy over a balanced test set which include head, tail, and open classes. OLTR must handle imbalanced classification, few-shot learning, and open-set recognition in one integrated algorithm, whereas existing classification approaches focus only on one aspect and deliver poorly over the entire class spectrum. The key challenges are how to share visual knowledge between head and tail classes and how to reduce confusion between tail and open classes. We develop an integrated OLTR algorithm that maps an image to a feature space such that visual concepts can easily relate to each other based on a learned metric that respects the closed-world classification while acknowledging the novelty of the open world. Our so-called dynamic meta-embedding combines a direct image feature and an associated memory feature, with the feature norm indicating the familiarity to known classes. On three large-scale OLTR datasets we curate from object-centric ImageNet, scene-centric Places, and face-centric MS1M data, our method consistently outperforms the state-of-the-art. Our code, datasets, and models enable future OLTR research and are publicly available at \\url{https://liuziwei7.github.io/projects/LongTail.html}.", "year": 2019, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Ziwei Liu", "Zhongqi Miao", "Xiaohang Zhan", "Jiayun Wang", "Boqing Gong", "Stella X. Yu" ], "externalIds": { "MAG": "2939117106", "ArXiv": "1904.05160", "DBLP": "journals/corr/abs-1904-05160", "DOI": "10.1109/CVPR.2019.00264", "CorpusId": 115137311 }, "url": "https://www.semanticscholar.org/paper/73c07e0a998576bb9d9409e5eed713788c0be037", "referenceCount": 68, "citationCount": 1026, "influentialCitationCount": 230, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Variational Adversarial Active Learning", "abstract": "Active learning aims to develop label-efficient algorithms by sampling the most representative queries to be labeled by an oracle. We describe a pool-based semi-supervised active learning algorithm that implicitly learns this sampling mechanism in an adversarial manner. Our method learns a latent space using a variational autoencoder (VAE) and an adversarial network trained to discriminate between unlabeled and labeled data. The mini-max game between the VAE and the adversarial network is played such that while the VAE tries to trick the adversarial network into predicting that all data points are from the labeled pool, the adversarial network learns how to discriminate between dissimilarities in the latent space. We extensively evaluate our method on various image classification and semantic segmentation benchmark datasets and establish a new state of the art on CIFAR10/100, Caltech-256, ImageNet, Cityscapes, and BDD100K. Our results demonstrate that our adversarial approach learns an effective low dimensional latent space in large-scale settings and provides for a computationally efficient sampling method. Our code is available at \\url{https://github.com/sinhasam/vaal}.", "year": 2019, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Samarth Sinha", "Sayna Ebrahimi", "Trevor Darrell" ], "externalIds": { "MAG": "2927805125", "DBLP": "conf/iccv/SinhaED19", "ArXiv": "1904.00370", "DOI": "10.1109/ICCV.2019.00607", "CorpusId": 90258881 }, "url": "https://www.semanticscholar.org/paper/affd0a8391ce0941f148bf768b8132217119bbb0", "referenceCount": 64, "citationCount": 510, "influentialCitationCount": 108, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "BayesOD: A Bayesian Approach for Uncertainty Estimation in Deep Object Detectors", "abstract": "When incorporating deep neural networks into robotic systems, a major challenge is the lack of uncertainty measures associated with their output predictions. Methods for uncertainty estimation in the output of deep object detectors (DNNs) have been proposed in recent works, but have had limited success due to 1) information loss at the detectors nonmaximum suppression (NMS) stage, and 2) failure to take into account the multitask, many-to-one nature of anchor-based object detection. To that end, we introduce BayesOD, an uncertainty estimation approach that reformulates the standard object detector inference and Non-Maximum suppression components from a Bayesian perspective. Experiments performed on four common object detection datasets show that BayesOD provides uncertainty estimates that are better correlated with the accuracy of detections, manifesting as a significant reduction of 9.77%-13.13% on the minimum Gaussian uncertainty error metric and a reduction of 1.63%-5.23% on the minimum Categorical uncertainty error metric. Code will be released at https://github.com/asharakeh/bayes-od-rc.", "year": 2019, "venue": "IEEE International Conference on Robotics and Automation", "authors": [ "Ali Harakeh", "Michael H. W. Smart", "Steven L. Waslander" ], "externalIds": { "MAG": "2921523090", "DBLP": "conf/icra/HarakehSW20", "ArXiv": "1903.03838", "DOI": "10.1109/ICRA40945.2020.9196544", "CorpusId": 73728668 }, "url": "https://www.semanticscholar.org/paper/c0b74cb17b226f395f1e9efa8ff459b7a1a7c55b", "referenceCount": 34, "citationCount": 106, "influentialCitationCount": 9, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Striking the Right Balance With Uncertainty", "abstract": "Learning unbiased models on imbalanced datasets is a significant challenge. Rare classes tend to get a concentrated representation in the classification space which hampers the generalization of learned boundaries to new test examples. In this paper, we demonstrate that the Bayesian uncertainty estimates directly correlate with the rarity of classes and the difficulty level of individual samples. Subsequently, we present a novel framework for uncertainty based class imbalance learning that follows two key insights: First, classification boundaries should be extended further away from a more uncertain (rare) class to avoid over-fitting and enhance its generalization. Second, each sample should be modeled as a multi-variate Gaussian distribution with a mean vector and a covariance matrix defined by the sample's uncertainty. The learned boundaries should respect not only the individual samples but also their distribution in the feature space. Our proposed approach efficiently utilizes sample and class uncertainty information to learn robust features and more generalizable classifiers. We systematically study the class imbalance problem and derive a novel loss formulation for max-margin learning based on Bayesian uncertainty measure. The proposed method shows significant performance improvements on six benchmark datasets for face verification, attribute prediction, digit/object classification and skin lesion detection.", "year": 2019, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Salman Hameed Khan", "Munawar Hayat", "Waqas Zamir", "Jianbing Shen", "Ling Shao" ], "externalIds": { "ArXiv": "1901.07590", "DBLP": "conf/cvpr/KhanHZS019", "MAG": "2960055608", "DOI": "10.1109/CVPR.2019.00019", "CorpusId": 59158907 }, "url": "https://www.semanticscholar.org/paper/0728e595e19d35616157c33e6f166d57a4fc5dc8", "referenceCount": 64, "citationCount": 167, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Class-Balanced Loss Based on Effective Number of Samples", "abstract": "With the rapid increase of large-scale, real-world datasets, it becomes critical to address the problem of long-tailed data distribution (i.e., a few classes account for most of the data, while most classes are under-represented). Existing solutions typically adopt class re-balancing strategies such as re-sampling and re-weighting based on the number of observations for each class. In this work, we argue that as the number of samples increases, the additional benefit of a newly added data point will diminish. We introduce a novel theoretical framework to measure data overlap by associating with each sample a small neighboring region rather than a single point. The effective number of samples is defined as the volume of samples and can be calculated by a simple formula $(1-\\beta^{n})/(1-\\beta)$, where $n$ is the number of samples and $\\beta \\in [0,1)$ is a hyperparameter. We design a re-weighting scheme that uses the effective number of samples for each class to re-balance the loss, thereby yielding a class-balanced loss. Comprehensive experiments are conducted on artificially induced long-tailed CIFAR datasets and large-scale datasets including ImageNet and iNaturalist. Our results show that when trained with the proposed class-balanced loss, the network is able to achieve significant performance gains on long-tailed datasets.", "year": 2019, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Yin Cui", "Menglin Jia", "Tsung-Yi Lin", "Yang Song", "Serge J. Belongie" ], "externalIds": { "MAG": "2910580498", "ArXiv": "1901.05555", "DBLP": "journals/corr/abs-1901-05555", "DOI": "10.1109/CVPR.2019.00949", "CorpusId": 58014111 }, "url": "https://www.semanticscholar.org/paper/54036f43acc6c9b49b334270c7237217685f52fb", "referenceCount": 52, "citationCount": 1943, "influentialCitationCount": 315, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Deep Imbalanced Learning for Face Recognition and Attribute Prediction", "abstract": "Data for face analysis often exhibit highly-skewed class distribution, i.e., most data belong to a few majority classes, while the minority classes only contain a scarce amount of instances. To mitigate this issue, contemporary deep learning methods typically follow classic strategies such as class re-sampling or cost-sensitive training. In this paper, we conduct extensive and systematic experiments to validate the effectiveness of these classic schemes for representation learning on class-imbalanced data. We further demonstrate that more discriminative deep representation can be learned by enforcing a deep network to maintain inter-cluster margins both within and between classes. This tight constraint effectively reduces the class imbalance inherent in the local data neighborhood, thus carving much more balanced class boundaries locally. We show that it is easy to deploy angular margins between the cluster distributions on a hypersphere manifold. Such learned Cluster-based Large Margin Local Embedding (CLMLE), when combined with a simple k-nearest cluster algorithm, shows significant improvements in accuracy over existing methods on both face recognition and face attribute prediction tasks that exhibit imbalanced class distribution.", "year": 2018, "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", "authors": [ "Chen Huang", "Yining Li", "Chen Change Loy", "Xiaoou Tang" ], "externalIds": { "ArXiv": "1806.00194", "MAG": "2953156491", "DBLP": "journals/corr/abs-1806-00194", "DOI": "10.1109/TPAMI.2019.2914680", "CorpusId": 44117253, "PubMed": "31071017" }, "url": "https://www.semanticscholar.org/paper/380d5138cadccc9b5b91c707ba0a9220b0f39271", "referenceCount": 77, "citationCount": 287, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "The Power of Ensembles for Active Learning in Image Classification", "abstract": "Deep learning methods have become the de-facto standard for challenging image processing tasks such as image classification. One major hurdle of deep learning approaches is that large sets of labeled data are necessary, which can be prohibitively costly to obtain, particularly in medical image diagnosis applications. Active learning techniques can alleviate this labeling effort. In this paper we investigate some recently proposed methods for active learning with high-dimensional data and convolutional neural network classifiers. We compare ensemble-based methods against Monte-Carlo Dropout and geometric approaches. We find that ensembles perform better and lead to more calibrated predictive uncertainties, which are the basis for many active learning algorithms. To investigate why Monte-Carlo Dropout uncertainties perform worse, we explore potential differences in isolation in a series of experiments. We show results for MNIST and CIFAR-10, on which we achieve a test set accuracy of 90% with roughly 12,200 labeled images, and initial results on ImageNet. Additionally, we show results on a large, highly class-imbalanced diabetic retinopathy dataset. We observe that the ensemble-based active learning effectively counteracts this imbalance during acquisition.", "year": 2018, "venue": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition", "authors": [ "William H. Beluch", "Tim Genewein", "A. Nürnberger", "Jan M. Köhler" ], "externalIds": { "MAG": "2798820905", "DBLP": "conf/cvpr/BeluchGNK18", "DOI": "10.1109/CVPR.2018.00976", "CorpusId": 52838058 }, "url": "https://www.semanticscholar.org/paper/e81c70bc8b81797645332e5db726add973a5633a", "referenceCount": 61, "citationCount": 595, "influentialCitationCount": 71, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Active Learning for Convolutional Neural Networks: A Core-Set Approach", "abstract": "Convolutional neural networks (CNNs) have been successfully applied to many recognition and learning tasks using a universal recipe; training a deep model on a very large dataset of supervised examples. However, this approach is rather restrictive in practice since collecting a large set of labeled images is very expensive. One way to ease this problem is coming up with smart ways for choosing images to be labelled from a very large collection (ie. active learning). \nOur empirical study suggests that many of the active learning heuristics in the literature are not effective when applied to CNNs in batch setting. Inspired by these limitations, we define the problem of active learning as core-set selection, ie. choosing set of points such that a model learned over the selected subset is competitive for the remaining data points. We further present a theoretical result characterizing the performance of any selected subset using the geometry of the datapoints. As an active learning algorithm, we choose the subset which is expected to yield best result according to our characterization. Our experiments show that the proposed method significantly outperforms existing approaches in image classification experiments by a large margin.", "year": 2017, "venue": "International Conference on Learning Representations", "authors": [ "Ozan Sener", "S. Savarese" ], "externalIds": { "MAG": "2774918944", "DBLP": "conf/iclr/SenerS18", "CorpusId": 3383786 }, "url": "https://www.semanticscholar.org/paper/c342c71cb23199f112d0bc644fcce56a7306bf94", "referenceCount": 53, "citationCount": 1659, "influentialCitationCount": 346, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Deep Bayesian Active Learning with Image Data", "abstract": "Even though active learning forms an important pillar of machine learning, deep learning tools are not prevalent within it. Deep learning poses several difficulties when used in an active learning setting. First, active learning (AL) methods generally rely on being able to learn and update models from small amounts of data. Recent advances in deep learning, on the other hand, are notorious for their dependence on large amounts of data. Second, many AL acquisition functions rely on model uncertainty, yet deep learning methods rarely represent such model uncertainty. In this paper we combine recent advances in Bayesian deep learning into the active learning framework in a practical way. We develop an active learning framework for high dimensional data, a task which has been extremely challenging so far, with very sparse existing literature. Taking advantage of specialised models such as Bayesian convolutional neural networks, we demonstrate our active learning techniques with image data, obtaining a significant improvement on existing active learning approaches. We demonstrate this on both the MNIST dataset, as well as for skin cancer diagnosis from lesion images (ISIC2016 task).", "year": 2017, "venue": "International Conference on Machine Learning", "authors": [ "Y. Gal", "Riashat Islam", "Zoubin Ghahramani" ], "externalIds": { "DBLP": "conf/icml/GalIG17", "MAG": "2951786554", "ArXiv": "1703.02910", "DOI": "10.17863/CAM.11070", "CorpusId": 6318455 }, "url": "https://www.semanticscholar.org/paper/da5c65b0ac8b525c3d3d4889bf44d8a48d254a07", "referenceCount": 43, "citationCount": 1550, "influentialCitationCount": 214, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Deep Residual Learning for Image Recognition", "abstract": "Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. We provide comprehensive empirical evidence showing that these residual networks are easier to optimize, and can gain accuracy from considerably increased depth. On the ImageNet dataset we evaluate residual nets with a depth of up to 152 layers - 8× deeper than VGG nets [40] but still having lower complexity. An ensemble of these residual nets achieves 3.57% error on the ImageNet test set. This result won the 1st place on the ILSVRC 2015 classification task. We also present analysis on CIFAR-10 with 100 and 1000 layers. The depth of representations is of central importance for many visual recognition tasks. Solely due to our extremely deep representations, we obtain a 28% relative improvement on the COCO object detection dataset. Deep residual nets are foundations of our submissions to ILSVRC & COCO 2015 competitions1, where we also won the 1st places on the tasks of ImageNet detection, ImageNet localization, COCO detection, and COCO segmentation.", "year": 2015, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Kaiming He", "X. Zhang", "Shaoqing Ren", "Jian Sun" ], "externalIds": { "DBLP": "conf/cvpr/HeZRS16", "MAG": "2949650786", "ArXiv": "1512.03385", "DOI": "10.1109/cvpr.2016.90", "CorpusId": 206594692 }, "url": "https://www.semanticscholar.org/paper/2c03df8b48bf3fa39054345bafabfeff15bfd11d", "referenceCount": 53, "citationCount": 172713, "influentialCitationCount": 28229, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Entropy-based active learning for object recognition", "abstract": "Most methods for learning object categories require large amounts of labeled training data. However, obtaining such data can be a difficult and time-consuming endeavor. We have developed a novel, entropy-based ldquoactive learningrdquo approach which makes significant progress towards this problem. The main idea is to sequentially acquire labeled data by presenting an oracle (the user) with unlabeled images that will be particularly informative when labeled. Active learning adaptively prioritizes the order in which the training examples are acquired, which, as shown by our experiments, can significantly reduce the overall number of training examples required to reach near-optimal performance. At first glance this may seem counter-intuitive: how can the algorithm know whether a group of unlabeled images will be informative, when, by definition, there is no label directly associated with any of the images? Our approach is based on choosing an image to label that maximizes the expected amount of information we gain about the set of unlabeled images. The technique is demonstrated in several contexts, including improving the efficiency of Web image-search queries and open-world visual learning by an autonomous agent. Experiments on a large set of 140 visual object categories taken directly from text-based Web image searches show that our technique can provide large improvements (up to 10 x reduction in the number of training examples needed) over baseline techniques.", "year": 2008, "venue": "IEEE Computer Society Conference on Computer Vision and Pattern Recognition workshops", "authors": [ "Alex Holub", "P. Perona", "M. Burl" ], "externalIds": { "DBLP": "conf/cvpr/HolubPB08", "MAG": "2098742124", "DOI": "10.1109/CVPRW.2008.4563068", "CorpusId": 15855239 }, "url": "https://www.semanticscholar.org/paper/777b96abef29da63d9b4b1a583fa25c24a5ee029", "referenceCount": 18, "citationCount": 274, "influentialCitationCount": 12, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Borderline-SMOTE: A New Over-Sampling Method in Imbalanced Data Sets Learning", "abstract": null, "year": 2005, "venue": "International Conference on Intelligent Computing", "authors": [ "Hui Han", "Wenyuan Wang", "Binghuan Mao" ], "externalIds": { "MAG": "2132791018", "DBLP": "conf/icic/HanWM05", "DOI": "10.1007/11538059_91", "CorpusId": 12126950 }, "url": "https://www.semanticscholar.org/paper/b618f88ebaab51c4d38182e773419478abe44cf8", "referenceCount": 28, "citationCount": 3161, "influentialCitationCount": 280, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Elementary Applied Statistics for students in Behavioral Science", "abstract": null, "year": 1965, "venue": "", "authors": [ "L. G. Freeman" ], "externalIds": { "MAG": "2797764714", "DOI": "10.2307/3538646", "CorpusId": 133464683 }, "url": "https://www.semanticscholar.org/paper/55a9432ef56d13dfbd9942f0964e99ac67b56d60", "referenceCount": 0, "citationCount": 134, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Psychology", "Computer Science", "Sociology" ] }, { "title": "Just Label What You Need: Fine-Grained Active Selection for P&P through Partially Labeled Scenes", "abstract": ": Self-driving vehicles must perceive and predict the future positions of nearby actors to avoid collisions and drive safely. A deep learning module is often responsible for this task, requiring large-scale, high-quality training datasets. Due to high labeling costs, active learning approaches are an appealing solution to maximizing model performance for a given labeling budget. However, despite its appeal, there has been little scientific analysis of active learning approaches for the perception and prediction (P&P) problem. In this work, we study active learning techniques for P&P and find that the traditional active learning formulation is ill-suited. We thus introduce generalizations that ensure that our approach is both cost-aware and allows for fine-grained selection of examples through partially labeled scenes. Extensive experiments on a real-world dataset suggest significant improvements across perception, prediction, and downstream planning tasks.", "year": 2021, "venue": "Conference on Robot Learning", "authors": [ "Sean Segal", "Nishanth Kumar", "S. Casas", "Wenyuan Zeng", "Mengye Ren", "Jingkang Wang", "R. Urtasun" ], "externalIds": { "DBLP": "conf/corl/SegalK0ZRWU21", "CorpusId": 237263084 }, "url": "https://www.semanticscholar.org/paper/1270d31f93f76f3c53f6b754ce62587d997a3db1", "referenceCount": 41, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning to Model the Tail", "abstract": "We describe an approach to learning from long-tailed, imbalanced datasets that are prevalent in real-world settings. Here, the challenge is to learn accurate \"few-shot'' models for classes in the tail of the class distribution, for which little data is available. We cast this problem as transfer learning, where knowledge from the data-rich classes in the head of the distribution is transferred to the data-poor classes in the tail. Our key insights are as follows. First, we propose to transfer meta-knowledge about learning-to-learn from the head classes. This knowledge is encoded with a meta-network that operates on the space of model parameters, that is trained to predict many-shot model parameters from few-shot model parameters. Second, we transfer this meta-knowledge in a progressive manner, from classes in the head to the \"body'', and from the \"body'' to the tail. That is, we transfer knowledge in a gradual fashion, regularizing meta-networks for few-shot regression with those trained with more training data. This allows our final network to capture a notion of model dynamics, that predicts how model parameters are likely to change as more training data is gradually added. We demonstrate results on image classification datasets (SUN, Places, and ImageNet) tuned for the long-tailed setting, that significantly outperform common heuristics, such as data resampling or reweighting.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "Yu-Xiong Wang", "Deva Ramanan", "M. Hebert" ], "externalIds": { "MAG": "2753300133", "DBLP": "conf/nips/WangRH17", "CorpusId": 26537848 }, "url": "https://www.semanticscholar.org/paper/93f9607034c9b7b7693c60e9d2631adc15a2a524", "referenceCount": 65, "citationCount": 591, "influentialCitationCount": 37, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Active Learning Literature Survey", "abstract": "The key idea behind active learning is that a machine learning algorithm can achieve greater accuracy with fewer labeled training instances if it is allowed to choose the data from which is learns. An active learner may ask queries in the form of unlabeled instances to be labeled by an oracle (e.g., a human annotator). Active learning is well-motivated in many modern machine learning problems, where unlabeled data may be abundant but labels are difficult, time-consuming, or expensive to obtain. This report provides a general introduction to active learning and a survey of the literature. This includes a discussion of the scenarios in which queries can be formulated, and an overview of the query strategy frameworks proposed in the literature to date. An analysis of the empirical and theoretical evidence for active learning, a summary of several problem setting variants, and a discussion of related topics in machine learning research are also presented.", "year": 2009, "venue": "", "authors": [ "Burr Settles" ], "externalIds": { "MAG": "2903158431", "CorpusId": 324600 }, "url": "https://www.semanticscholar.org/paper/818826f356444f3daa3447755bf63f171f39ec47", "referenceCount": 167, "citationCount": 5868, "influentialCitationCount": 610, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Mathematical Theory of Communication", "abstract": "This paper opened the new area the information theory. Before this paper, most people believed that the only way to make the error probability of transmission as small as desired is to reduce the data rate (such as a long repetition scheme). However, surprisingly this paper revealed that it does not need to reduce the data rate for achieving that much of small errors. It proved that we can get some positive data rate that has the same small error probability and also there is an upper bound of the data rate, which means we cannot achieve the data rate with any encoding scheme that has small enough error probability over the upper bound.", "year": 2006, "venue": "", "authors": [ "Jin Woo Shin", "Sang Joon Kim" ], "externalIds": { "CorpusId": 5747983 }, "url": "https://www.semanticscholar.org/paper/6d12a1d23b21a9b170118a56386552bc5d4727de", "referenceCount": 0, "citationCount": 62860, "influentialCitationCount": 5070, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "SMOTE: Synthetic Minority Over-sampling Technique", "abstract": "An approach to the construction of classifiers from imbalanced datasets is described. A dataset is imbalanced if the classification categories are not approximately equally represented. Often real-world data sets are predominately composed of \"normal\" examples with only a small percentage of \"abnormal\" or \"interesting\" examples. It is also the case that the cost of misclassifying an abnormal (interesting) example as a normal example is often much higher than the cost of the reverse error. Under-sampling of the majority (normal) class has been proposed as a good means of increasing the sensitivity of a classifier to the minority class. This paper shows that a combination of our method of oversampling the minority (abnormal)cla ss and under-sampling the majority (normal) class can achieve better classifier performance (in ROC space)tha n only under-sampling the majority class. This paper also shows that a combination of our method of over-sampling the minority class and under-sampling the majority class can achieve better classifier performance (in ROC space)t han varying the loss ratios in Ripper or class priors in Naive Bayes. Our method of over-sampling the minority class involves creating synthetic minority class examples. Experiments are performed using C4.5, Ripper and a Naive Bayes classifier. The method is evaluated using the area under the Receiver Operating Characteristic curve (AUC)and the ROC convex hull strategy.", "year": 2002, "venue": "Journal of Artificial Intelligence Research", "authors": [ "N. Chawla", "K. Bowyer", "L. Hall", "W. Kegelmeyer" ], "externalIds": { "ArXiv": "1106.1813", "DBLP": "journals/corr/abs-1106-1813", "MAG": "3100785508", "DOI": "10.1613/jair.953", "CorpusId": 1554582 }, "url": "https://www.semanticscholar.org/paper/8cb44f06586f609a29d9b496cc752ec01475dffe", "referenceCount": 42, "citationCount": 22523, "influentialCitationCount": 2431, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "2024. Negative Label Guided OOD Detection", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2023. Openflamingo: An open-source framework for training large autoregressive vision-language models", "abstract": null, "year": null, "venue": "arXiv preprint", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2024. Visual instruction tuning", "abstract": null, "year": null, "venue": "Advances in neural information processing systems", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "with Pretrained Vision-Language Models", "abstract": null, "year": null, "venue": "The Twelfth International Conference on Learning Representations", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2023. Vicuna: An open-source chatbot impressing gpt-4 with 90%* chatgpt quality", "abstract": null, "year": null, "venue": "vicuna. lmsys. org", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2024. MiniGPT-4: Enhancing Vision-Language Understanding with Advanced Large Language Models", "abstract": null, "year": null, "venue": "The Twelfth International Conference on Learning Representations", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Behavioral Bias of Vision-Language Models: A Behavioral Finance View": { "paper_title": "Behavioral Bias of Vision-Language Models: A Behavioral Finance View", "arxiv_id": "2409.15256v1", "keyword": "vision language model", "authors": [ "Yuhang Xiao", "Yudi Lin", "Ming-Chang Chiu" ], "references": [ { "title": "A Survey on Large Language Models for Critical Societal Domains: Finance, Healthcare, and Law", "abstract": "In the fast-evolving domain of artificial intelligence, large language models (LLMs) such as GPT-3 and GPT-4 are revolutionizing the landscapes of finance, healthcare, and law: domains characterized by their reliance on professional expertise, challenging data acquisition, high-stakes, and stringent regulatory compliance. This survey offers a detailed exploration of the methodologies, applications, challenges, and forward-looking opportunities of LLMs within these high-stakes sectors. We highlight the instrumental role of LLMs in enhancing diagnostic and treatment methodologies in healthcare, innovating financial analytics, and refining legal interpretation and compliance strategies. Moreover, we critically examine the ethics for LLM applications in these fields, pointing out the existing ethical concerns and the need for transparent, fair, and robust AI systems that respect regulatory norms. By presenting a thorough review of current literature and practical applications, we showcase the transformative impact of LLMs, and outline the imperative for interdisciplinary cooperation, methodological advancements, and ethical vigilance. Through this lens, we aim to spark dialogue and inspire future research dedicated to maximizing the benefits of LLMs while mitigating their risks in these precision-dependent sectors. To facilitate future research on LLMs in these critical societal domains, we also initiate a reading list that tracks the latest advancements under this topic, which will be continually updated: \\url{https://github.com/czyssrs/LLM_X_papers}.", "year": 2024, "venue": "arXiv.org", "authors": [ "Z. Chen", "Jing Ma", "Xinlu Zhang", "Nan Hao", "An Yan", "Armineh Nourbakhsh", "Xianjun Yang", "Julian J. McAuley", "L. Petzold", "William Yang Wang" ], "externalIds": { "DBLP": "journals/corr/abs-2405-01769", "ArXiv": "2405.01769", "DOI": "10.48550/arXiv.2405.01769", "CorpusId": 269587715 }, "url": "https://www.semanticscholar.org/paper/0b5439b6c22f4e48ed34cce1409057a358033c81", "referenceCount": 0, "citationCount": 10, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MobileVLM V2: Faster and Stronger Baseline for Vision Language Model", "abstract": "We introduce MobileVLM V2, a family of significantly improved vision language models upon MobileVLM, which proves that a delicate orchestration of novel architectural design, an improved training scheme tailored for mobile VLMs, and rich high-quality dataset curation can substantially benefit VLMs' performance. Specifically, MobileVLM V2 1.7B achieves better or on-par performance on standard VLM benchmarks compared with much larger VLMs at the 3B scale. Notably, our 3B model outperforms a large variety of VLMs at the 7B+ scale. Our models will be released at https://github.com/Meituan-AutoML/MobileVLM .", "year": 2024, "venue": "arXiv.org", "authors": [ "Xiangxiang Chu", "Limeng Qiao", "Xinyu Zhang", "Shuang Xu", "Fei Wei", "Yang Yang", "Xiaofei Sun", "Yiming Hu", "Xinyang Lin", "Bo Zhang", "Chunhua Shen" ], "externalIds": { "ArXiv": "2402.03766", "DBLP": "journals/corr/abs-2402-03766", "DOI": "10.48550/arXiv.2402.03766", "CorpusId": 267500104 }, "url": "https://www.semanticscholar.org/paper/a091bf215c716a146140f81c751712db628c8e20", "referenceCount": 68, "citationCount": 50, "influentialCitationCount": 7, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MMMU: A Massive Multi-Discipline Multimodal Understanding and Reasoning Benchmark for Expert AGI", "abstract": "We introduce MMMU: a new benchmark designed to evaluate multimodal models on massive multi-discipline tasks demanding college-level subject knowledge and deliberate reasoning. MMMU includes 11.5K meticulously collected multimodal questions from college exams, quizzes, and text-books, covering six core disciplines: Art & Design, Busi-ness, Science, Health & Medicine, Humanities & Social Science, and Tech & Engineering. These questions span 30 subjects and 183 subfields, comprising 30 highly het-erogeneous image types, such as charts, diagrams, maps, tables, music sheets, and chemical structures. Unlike existing benchmarks, MMMU focuses on advanced perception and reasoning with domain-specific knowledge, challenging models to perform tasks akin to those faced by experts. The evaluation of 28 open-source LMMs as well as the propri-etary GPT-4V(ision) and Gemini highlights the substantial challenges posed by MMMU. Even the advanced GPT-4V and Gemini Ultra only achieve accuracies of 56% and 59% respectively, indicating significant room for improvement. We believe MMMU will stimulate the community to build next-generation multimodal foundation models towards expert artificial general intelligence.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Xiang Yue", "Yuansheng Ni", "Kai Zhang", "Tianyu Zheng", "Ruoqi Liu", "Ge Zhang", "Samuel Stevens", "Dongfu Jiang", "Weiming Ren", "Yuxuan Sun", "Cong Wei", "Botao Yu", "Ruibin Yuan", "Renliang Sun", "Ming Yin", "Boyuan Zheng", "Zhenzhu Yang", "Yibo Liu", "Wenhao Huang", "Huan Sun", "Yu Su", "Wenhu Chen" ], "externalIds": { "ArXiv": "2311.16502", "DBLP": "journals/corr/abs-2311-16502", "DOI": "10.1109/CVPR52733.2024.00913", "CorpusId": 265466525 }, "url": "https://www.semanticscholar.org/paper/b50d19c5c298f6562c3b3c6c3822a351bdc89260", "referenceCount": 84, "citationCount": 277, "influentialCitationCount": 36, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "InvestLM: A Large Language Model for Investment using Financial Domain Instruction Tuning", "abstract": "We present a new financial domain large language model, InvestLM, tuned on LLaMA-65B (Touvron et al., 2023), using a carefully curated instruction dataset related to financial investment. Inspired by less-is-more-for-alignment (Zhou et al., 2023), we manually curate a small yet diverse instruction dataset, covering a wide range of financial related topics, from Chartered Financial Analyst (CFA) exam questions to SEC filings to Stackexchange quantitative finance discussions. InvestLM shows strong capabilities in understanding financial text and provides helpful responses to investment related questions. Financial experts, including hedge fund managers and research analysts, rate InvestLM's response as comparable to those of state-of-the-art commercial models (GPT-3.5, GPT-4 and Claude-2). Zero-shot evaluation on a set of financial NLP benchmarks demonstrates strong generalizability. From a research perspective, this work suggests that a high-quality domain specific LLM can be tuned using a small set of carefully curated instructions on a well-trained foundation model, which is consistent with the Superficial Alignment Hypothesis (Zhou et al., 2023). From a practical perspective, this work develops a state-of-the-art financial domain LLM with superior capability in understanding financial texts and providing helpful investment advice, potentially enhancing the work efficiency of financial professionals. We release the model parameters to the research community.", "year": 2023, "venue": "arXiv.org", "authors": [ "Yi Yang", "Yixuan Tang", "K. Tam" ], "externalIds": { "DBLP": "journals/corr/abs-2309-13064", "ArXiv": "2309.13064", "DOI": "10.48550/arXiv.2309.13064", "CorpusId": 262459267 }, "url": "https://www.semanticscholar.org/paper/844bc3b26b5c63ec3b251ae634c194dcfb41a7d2", "referenceCount": 29, "citationCount": 33, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Economics" ] }, { "title": "Lost in the Middle: How Language Models Use Long Contexts", "abstract": "While recent language models have the ability to take long contexts as input, relatively little is known about how well they use longer context. We analyze the performance of language models on two tasks that require identifying relevant information in their input contexts: multi-document question answering and key-value retrieval. We find that performance can degrade significantly when changing the position of relevant information, indicating that current language models do not robustly make use of information in long input contexts. In particular, we observe that performance is often highest when relevant information occurs at the beginning or end of the input context, and significantly degrades when models must access relevant information in the middle of long contexts, even for explicitly long-context models. Our analysis provides a better understanding of how language models use their input context and provides new evaluation protocols for future long-context language models.", "year": 2023, "venue": "Transactions of the Association for Computational Linguistics", "authors": [ "Nelson F. Liu", "Kevin Lin", "John Hewitt", "Ashwin Paranjape", "Michele Bevilacqua", "F. Petroni", "Percy Liang" ], "externalIds": { "ArXiv": "2307.03172", "DBLP": "journals/tacl/LiuLHPBPL24", "ACL": "2024.tacl-1.9", "DOI": "10.1162/tacl_a_00638", "CorpusId": 259360665 }, "url": "https://www.semanticscholar.org/paper/1733eb7792f7a43dd21f51f4d1017a1bffd217b5", "referenceCount": 58, "citationCount": 767, "influentialCitationCount": 61, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Large Language Models are Zero-Shot Reasoners", "abstract": "Pretrained large language models (LLMs) are widely used in many sub-fields of natural language processing (NLP) and generally known as excellent few-shot learners with task-specific exemplars. Notably, chain of thought (CoT) prompting, a recent technique for eliciting complex multi-step reasoning through step-by-step answer examples, achieved the state-of-the-art performances in arithmetics and symbolic reasoning, difficult system-2 tasks that do not follow the standard scaling laws for LLMs. While these successes are often attributed to LLMs' ability for few-shot learning, we show that LLMs are decent zero-shot reasoners by simply adding\"Let's think step by step\"before each answer. Experimental results demonstrate that our Zero-shot-CoT, using the same single prompt template, significantly outperforms zero-shot LLM performances on diverse benchmark reasoning tasks including arithmetics (MultiArith, GSM8K, AQUA-RAT, SVAMP), symbolic reasoning (Last Letter, Coin Flip), and other logical reasoning tasks (Date Understanding, Tracking Shuffled Objects), without any hand-crafted few-shot examples, e.g. increasing the accuracy on MultiArith from 17.7% to 78.7% and GSM8K from 10.4% to 40.7% with large InstructGPT model (text-davinci-002), as well as similar magnitudes of improvements with another off-the-shelf large model, 540B parameter PaLM. The versatility of this single prompt across very diverse reasoning tasks hints at untapped and understudied fundamental zero-shot capabilities of LLMs, suggesting high-level, multi-task broad cognitive capabilities may be extracted by simple prompting. We hope our work not only serves as the minimal strongest zero-shot baseline for the challenging reasoning benchmarks, but also highlights the importance of carefully exploring and analyzing the enormous zero-shot knowledge hidden inside LLMs before crafting finetuning datasets or few-shot exemplars.", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Takeshi Kojima", "S. Gu", "Machel Reid", "Yutaka Matsuo", "Yusuke Iwasawa" ], "externalIds": { "DBLP": "journals/corr/abs-2205-11916", "ArXiv": "2205.11916", "CorpusId": 249017743 }, "url": "https://www.semanticscholar.org/paper/e7ad08848d5d7c5c47673ffe0da06af443643bda", "referenceCount": 61, "citationCount": 2722, "influentialCitationCount": 259, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Show Your Work: Scratchpads for Intermediate Computation with Language Models", "abstract": "Large pre-trained language models perform remarkably well on tasks that can be done\"in one pass\", such as generating realistic text or synthesizing computer programs. However, they struggle with tasks that require unbounded multi-step computation, such as adding integers or executing programs. Surprisingly, we find that these same models are able to perform complex multi-step computations -- even in the few-shot regime -- when asked to perform the operation\"step by step\", showing the results of intermediate computations. In particular, we train transformers to perform multi-step computations by asking them to emit intermediate computation steps into a\"scratchpad\". On a series of increasingly complex tasks ranging from long addition to the execution of arbitrary programs, we show that scratchpads dramatically improve the ability of language models to perform multi-step computations.", "year": 2021, "venue": "arXiv.org", "authors": [ "Maxwell Nye", "Anders Andreassen", "Guy Gur-Ari", "H. Michalewski", "Jacob Austin", "David Bieber", "David Dohan", "Aitor Lewkowycz", "Maarten Bosma", "D. Luan", "Charles Sutton", "Augustus Odena" ], "externalIds": { "DBLP": "journals/corr/abs-2112-00114", "ArXiv": "2112.00114", "CorpusId": 244773644 }, "url": "https://www.semanticscholar.org/paper/92173d081b15824d22a9ef070e118744ceee8052", "referenceCount": 30, "citationCount": 541, "influentialCitationCount": 41, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Artificial intelligence in financial services: a qualitative research to discover robo-advisory services", "abstract": "\nPurpose\nThe purpose of this study is to gauge the awareness and perception of Indian individual investors about a new fintech innovation known as robo-advisors in the wealth management scenario. Robo-advisors are comprehensive automated online advisory platforms that help investors in managing wealth by recommending portfolio allocations, which are based on certain algorithms.\n\n\nDesign/methodology/approach\nThis is a phenomenological qualitative study that used five focussed group discussions to gather the stipulated information. Purposive sampling was used and the sample comprised investors who actively invest in the Indian stock market. A semi-structured questionnaire and homogeneous discussions were used for this study. Discussion time for all the groups was 203 min. One of the authors moderated the discussions and translated the audio recordings verbatim. Subsequently, content analysis was carried out by using the NVIVO 12 software (QSR International) to derive different themes.\n\n\nFindings\nFactors such as cost-effectiveness, trust, data security, behavioural biases and sentiments of the investors were observed as crucial points which significantly impacted the perception of the investors. Furthermore, several suggestions on different ways to enhance the awareness levels of investors were brought up by the participants during the discussions. It was observed that some investors perceive robo-advisors as only an alternative for fund/wealth managers/brokers for quantitative analysis. Also, they strongly believe that human intervention is necessary to gauge the emotions of the investors. Hence, at present, robo-advisors for the Indian stock market, act only as a supplementary service rather than a substitute for financial advisors.\n\n\nResearch limitations/implications\nDue to the explorative nature of the study and limited participants, the findings of the study cannot be generalised to the overall population. Future research is imperative to study the dynamic nature of artificial intelligence (AI) theories and investigate whether they are able to capture the sentiments of individual investors and human sentiments impacting the market.\n\n\nPractical implications\nThis study gives an insight into the awareness, perception and opinion of the investors about robo-advisory services. From a managerial perspective, the findings suggest that additional attention needs to be devoted to the adoption and inculcation of AI and machine learning theories while building algorithms or logic to come up with effective models. Many investors expressed discontent with the current design of risk profiles of the investors. This helps to provide feedback for developers and designers of robo-advisors to include advanced and detailed programming to be able to do risk profiling in a more comprehensive and precise manner.\n\n\nSocial implications\nIn the future, robo-advisors will change the wealth management scenario. It is well-established that data is the new oil for all businesses in the present times. Technologies such as robo-advisor, need to evolve further in terms of predicting unstructured data, improvising qualitative analysis techniques to include the ability to gauge emotions of investors and markets in real-time. Additionally, the behavioural biases of both the programmers and the investors need to be taken care of simultaneously while designing these automated decision support systems.\n\n\nOriginality/value\nThis study fulfils an identified gap in the literature regarding the investors’ perception of new fintech innovation, that is, robo-advisors. It also clarifies the confusion about the awareness level of robo-advisors amongst Indian individual investors by examining their attitudes and by suggesting innovations for future research. To the best of the authors’ knowledge, this study is the first to investigate the awareness, perception and attitudes of individual investors towards robo-advisors.\n", "year": 2021, "venue": "Qualitative Research in Financial Markets", "authors": [ "Ankita Bhatia", "Arti Chandani", "R. Atiq", "Mita Mehta", "Rajiv Divekar" ], "externalIds": { "MAG": "3197295500", "DOI": "10.1108/qrfm-10-2020-0199", "CorpusId": 239660328 }, "url": "https://www.semanticscholar.org/paper/5f666abc83fa3ccfe24b55b01234e0bbb97e9f37", "referenceCount": 36, "citationCount": 21, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Business" ] }, { "title": "Measuring Massive Multitask Language Understanding", "abstract": "We propose a new test to measure a text model's multitask accuracy. The test covers 57 tasks including elementary mathematics, US history, computer science, law, and more. To attain high accuracy on this test, models must possess extensive world knowledge and problem solving ability. We find that while most recent models have near random-chance accuracy, the very largest GPT-3 model improves over random chance by almost 20 percentage points on average. However, on every one of the 57 tasks, the best models still need substantial improvements before they can reach expert-level accuracy. Models also have lopsided performance and frequently do not know when they are wrong. Worse, they still have near-random accuracy on some socially important subjects such as morality and law. By comprehensively evaluating the breadth and depth of a model's academic and professional understanding, our test can be used to analyze models across many tasks and to identify important shortcomings.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Dan Hendrycks", "Collin Burns", "Steven Basart", "Andy Zou", "Mantas Mazeika", "D. Song", "J. Steinhardt" ], "externalIds": { "DBLP": "conf/iclr/HendrycksBBZMSS21", "ArXiv": "2009.03300", "MAG": "3083410900", "CorpusId": 221516475 }, "url": "https://www.semanticscholar.org/paper/814a4f680b9ba6baba23b93499f4b48af1a27678", "referenceCount": 35, "citationCount": 2248, "influentialCitationCount": 471, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Behavioral Finance", "abstract": "People tend to be penny wise and pound foolish and cry over spilt milk, even though we are taught to do neither. Focusing on the present at the expense of the future and basing decisions on lost value are two mistakes common to decision-making that are particularly costly in the world of finance. Behavioral Finance: What Everyone Needs to KnowR provides an overview of common shortcuts and mistakes people make in managing their finances. It covers the common cognitive biases or errors that occur when people are collecting, processing, and interpreting information. These include emotional biases and the influence of social factors, from culture to the behavior of one’s peers. These effects vary during one’s life, reflecting differences in due to age, experience, and gender. Among the questions to be addressed are: How did the financial crisis of 2007-2008 spur understanding human behavior? What are market anomalies and how do they relate to behavioral biases? What role does overconfidence play in financial decision- making? And how does getting older affect risk tolerance?", "year": 2019, "venue": "World Scientific Lecture Notes in Finance", "authors": [ "Cfa Michael M. Pompian", "Mba Fia Fsip Colin McLean", "PhD Cfa Alistair Byrne" ], "externalIds": { "DOI": "10.1142/9789813234109_0007", "CorpusId": 11731170 }, "url": "https://www.semanticscholar.org/paper/49f04232f9c02987d493031fed77d9b81949f66a", "referenceCount": 3, "citationCount": 87, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Availability, Recency, and Sophistication in the Repurchasing Behavior of Retail Investors", "abstract": null, "year": 2013, "venue": "", "authors": [ "John R. Nofsinger", "A. Varma" ], "externalIds": { "MAG": "1974902447", "DOI": "10.1016/J.JBANKFIN.2013.02.023", "CorpusId": 153373061 }, "url": "https://www.semanticscholar.org/paper/ab3154eb0d838b066f4aaaf31393fbbaf97ae119", "referenceCount": 26, "citationCount": 78, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Business" ] }, { "title": "The Effects of Expert and Consumer Endorsements on Audience Response", "abstract": "ABSTRACT This study examines the process by which audiences integrate expert and consumer endorsements into their product evaluations and how endorsement consensus affects this process. The results suggest that positive expert and consumer endorsements both enhance audiences' attitudes toward the endorsed product. However, positive consumer endorsements and higher perceived credibility of consumer endorsements, rather than expert endorsements, enhance audiences' behavioral intents when audiences are already interested in the endorsed product.", "year": 2005, "venue": "Journal of Advertising Research", "authors": [ "Alex Wang" ], "externalIds": { "MAG": "2119209319", "DOI": "10.1017/S0021849905050452", "CorpusId": 167938504 }, "url": "https://www.semanticscholar.org/paper/27f6b61c2036468d3fc8eb0f49cf362360f4c63e", "referenceCount": 24, "citationCount": 95, "influentialCitationCount": 10, "isOpenAccess": true, "fieldsOfStudy": [ "Business" ] }, { "title": "mplfinance: Matplotlib utilities for the visualization, and visual analysis, of financial data", "abstract": null, "year": 2019, "venue": "github", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Surprised by the hot hand fallacy? a truth in the law of small numbers", "abstract": null, "year": 2018, "venue": "Econometrica", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "alpha_vantage", "abstract": null, "year": 2017, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "How Earning Per Share (EPS) Affects on Share Price and Firm Value", "abstract": "Earnings per Share (EPS) is generally considered most important factor to determine share price and firm value. Literature shows that most of the individual investors take their individual investment decision based on the EPS. This paper attempts to provide empirical evidence on how EPS affect the share price movement. We have collected and analyzed 22 scheduled banks 110 firm year data and found that share price does not move as fast as the EPS move. We also further found that the share price movement depends on micro and macro economic factors on the economy. We suggest that investors must consider other factors as well as EPS in order to invest in the security market.", "year": 2014, "venue": "", "authors": [ "M. Islam", "Tahsan Rahman Khan", "T. Choudhury", "Ashique Mahmood Adnan" ], "externalIds": { "MAG": "1583627315", "CorpusId": 166477894 }, "url": "https://www.semanticscholar.org/paper/809b1312eb8f2db9533d138b29180b2522bb4a56", "referenceCount": 10, "citationCount": 76, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Business" ] }, { "title": "OpenAI", "abstract": null, "year": null, "venue": "openai.com/index/ gpt-4o-and-more-tools-to-chatgpt-free/", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "References", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2023. Mini-gemini: Mining the potential of multi-modality vision language models", "abstract": null, "year": null, "venue": "arXiv", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2024. Are large language models", "abstract": null, "year": null, "venue": "rational investors? Preprint", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Wikipedia contributors. 2024. List of S&P 500 companies — Wikipedia", "abstract": null, "year": null, "venue": "The Free Encyclopedia", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2024. Financial statement analysis with large language models", "abstract": null, "year": null, "venue": "Chicago Booth Research Paper Forth-coming, Fama-Miller Working Paper", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "A-VL: Adaptive Attention for Large Vision-Language Models": { "paper_title": "A-VL: Adaptive Attention for Large Vision-Language Models", "arxiv_id": "2409.14846v1", "keyword": "vision language model", "authors": [ "Junyang Zhang", "Mu Yuan", "Ruiguang Zhong", "Puhan Luo", "Huiyou Zhan", "Ningkang Zhang", "Chengchen Hu", "Xiangyang Li" ], "references": [ { "title": "LMMs-Eval: Reality Check on the Evaluation of Large Multimodal Models", "abstract": "The advances of large foundation models necessitate wide-coverage, low-cost, and zero-contamination benchmarks. Despite continuous exploration of language model evaluations, comprehensive studies on the evaluation of Large Multi-modal Models (LMMs) remain limited. In this work, we introduce LMMS-EVAL, a unified and standardized multimodal benchmark framework with over 50 tasks and more than 10 models to promote transparent and reproducible evaluations. Although LMMS-EVAL offers comprehensive coverage, we find it still falls short in achieving low cost and zero contamination. To approach this evaluation trilemma, we further introduce LMMS-EVAL LITE, a pruned evaluation toolkit that emphasizes both coverage and efficiency. Additionally, we present Multimodal LIVEBENCH that utilizes continuously updating news and online forums to assess models' generalization abilities in the wild, featuring a low-cost and zero-contamination evaluation approach. In summary, our work highlights the importance of considering the evaluation trilemma and provides practical solutions to navigate the trade-offs in evaluating large multi-modal models, paving the way for more effective and reliable benchmarking of LMMs. We opensource our codebase and maintain leaderboard of LIVEBENCH at https://github.com/EvolvingLMMs-Lab/lmms-eval and https://huggingface.co/spaces/lmms-lab/LiveBench.", "year": 2024, "venue": "arXiv.org", "authors": [ "Kaichen Zhang", "Bo Li", "Peiyuan Zhang", "Fanyi Pu", "Joshua Adrian Cahyono", "Kairui Hu", "Shuai Liu", "Yuanhan Zhang", "Jingkang Yang", "Chunyuan Li", "Ziwei Liu" ], "externalIds": { "ArXiv": "2407.12772", "DBLP": "journals/corr/abs-2407-12772", "DOI": "10.48550/arXiv.2407.12772", "CorpusId": 271244782 }, "url": "https://www.semanticscholar.org/paper/84da5687bdb173c1ca00078c17317f1532e7a033", "referenceCount": 64, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Unlocking Efficiency in Large Language Model Inference: A Comprehensive Survey of Speculative Decoding", "abstract": "To mitigate the high inference latency stemming from autoregressive decoding in Large Language Models (LLMs), Speculative Decoding has emerged as a novel decoding paradigm for LLM inference. In each decoding step, this method first drafts several future tokens efficiently and then verifies them in parallel. Unlike autoregressive decoding, Speculative Decoding facilitates the simultaneous decoding of multiple tokens per step, thereby accelerating inference. This paper presents a comprehensive overview and analysis of this promising decoding paradigm. We begin by providing a formal definition and formulation of Speculative Decoding. Then, we organize in-depth discussions on its key facets, such as drafter selection and verification strategies. Furthermore, we present a comparative analysis of leading methods under third-party testing environments. We aim for this work to serve as a catalyst for further research on Speculative Decoding, ultimately contributing to more efficient LLM inference.", "year": 2024, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Heming Xia", "Zhe Yang", "Qingxiu Dong", "Peiyi Wang", "Yongqi Li", "Tao Ge", "Tianyu Liu", "Wenjie Li", "Zhifang Sui" ], "externalIds": { "DBLP": "journals/corr/abs-2401-07851", "ArXiv": "2401.07851", "DOI": "10.48550/arXiv.2401.07851", "CorpusId": 266999159 }, "url": "https://www.semanticscholar.org/paper/0cee098244c9978032702862a43a09f468f691a4", "referenceCount": 60, "citationCount": 39, "influentialCitationCount": 7, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Fast and Robust Early-Exiting Framework for Autoregressive Language Models with Synchronized Parallel Decoding", "abstract": "To tackle the high inference latency exhibited by autoregressive language models, previous studies have proposed an early-exiting framework that allocates adaptive computation paths for each token based on the complexity of generating the subsequent token. However, we observed several shortcomings, including performance degradation caused by a state copying mechanism or numerous exit paths, and sensitivity to exit confidence thresholds. Consequently, we propose a Fast and Robust Early-Exiting (FREE) framework, which incorporates a shallow-deep module and a synchronized parallel decoding. Our framework enables faster inference by synchronizing the decoding process of the current token with previously stacked early-exited tokens. Furthermore, as parallel decoding allows us to observe predictions from both shallow and deep models, we present a novel adaptive threshold estimator that exploits a Beta mixture model to determine suitable confidence thresholds. We empirically demonstrated the superiority of our proposed framework on extensive generation tasks.", "year": 2023, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Sangmin Bae", "Jongwoo Ko", "Hwanjun Song", "SeYoung Yun" ], "externalIds": { "DBLP": "journals/corr/abs-2310-05424", "ArXiv": "2310.05424", "DOI": "10.48550/arXiv.2310.05424", "CorpusId": 263830054 }, "url": "https://www.semanticscholar.org/paper/564855d475ed9197dd7516594557ff886ff623e5", "referenceCount": 54, "citationCount": 31, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Model Tells You What to Discard: Adaptive KV Cache Compression for LLMs", "abstract": "In this study, we introduce adaptive KV cache compression, a plug-and-play method that reduces the memory footprint of generative inference for Large Language Models (LLMs). Different from the conventional KV cache that retains key and value vectors for all context tokens, we conduct targeted profiling to discern the intrinsic structure of attention modules. Based on the recognized structure, we then construct the KV cache in an adaptive manner: evicting long-range contexts on attention heads emphasizing local contexts, discarding non-special tokens on attention heads centered on special tokens, and only employing the standard KV cache for attention heads that broadly attend to all tokens. Moreover, with the lightweight attention profiling used to guide the construction of the adaptive KV cache, FastGen can be deployed without resource-intensive fine-tuning or re-training. In our experiments across various asks, FastGen demonstrates substantial reduction on GPU memory consumption with negligible generation quality loss. We will release our code and the compatible CUDA kernel for reproducibility.", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Suyu Ge", "Yunan Zhang", "Liyuan Liu", "Minjia Zhang", "Jiawei Han", "Jianfeng Gao" ], "externalIds": { "DBLP": "journals/corr/abs-2310-01801", "ArXiv": "2310.01801", "DOI": "10.48550/arXiv.2310.01801", "CorpusId": 263609075 }, "url": "https://www.semanticscholar.org/paper/6c323c535365e1c7cbfd9703cbec3b5650a3346b", "referenceCount": 38, "citationCount": 81, "influentialCitationCount": 10, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Efficient Streaming Language Models with Attention Sinks", "abstract": "Deploying Large Language Models (LLMs) in streaming applications such as multi-round dialogue, where long interactions are expected, is urgently needed but poses two major challenges. Firstly, during the decoding stage, caching previous tokens' Key and Value states (KV) consumes extensive memory. Secondly, popular LLMs cannot generalize to longer texts than the training sequence length. Window attention, where only the most recent KVs are cached, is a natural approach -- but we show that it fails when the text length surpasses the cache size. We observe an interesting phenomenon, namely attention sink, that keeping the KV of initial tokens will largely recover the performance of window attention. In this paper, we first demonstrate that the emergence of attention sink is due to the strong attention scores towards initial tokens as a\"sink\"even if they are not semantically important. Based on the above analysis, we introduce StreamingLLM, an efficient framework that enables LLMs trained with a finite length attention window to generalize to infinite sequence lengths without any fine-tuning. We show that StreamingLLM can enable Llama-2, MPT, Falcon, and Pythia to perform stable and efficient language modeling with up to 4 million tokens and more. In addition, we discover that adding a placeholder token as a dedicated attention sink during pre-training can further improve streaming deployment. In streaming settings, StreamingLLM outperforms the sliding window recomputation baseline by up to 22.2x speedup. Code and datasets are provided at https://github.com/mit-han-lab/streaming-llm.", "year": 2023, "venue": "International Conference on Learning Representations", "authors": [ "Guangxuan Xiao", "Yuandong Tian", "Beidi Chen", "Song Han", "Mike Lewis" ], "externalIds": { "DBLP": "conf/iclr/XiaoTCHL24", "ArXiv": "2309.17453", "DOI": "10.48550/arXiv.2309.17453", "CorpusId": 263310483 }, "url": "https://www.semanticscholar.org/paper/fdc53c2c10742464087c0525f77e32604827a21d", "referenceCount": 62, "citationCount": 274, "influentialCitationCount": 48, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Efficient Memory Management for Large Language Model Serving with PagedAttention", "abstract": "High throughput serving of large language models (LLMs) requires batching sufficiently many requests at a time. However, existing systems struggle because the key-value cache (KV cache) memory for each request is huge and grows and shrinks dynamically. When managed inefficiently, this memory can be significantly wasted by fragmentation and redundant duplication, limiting the batch size. To address this problem, we propose PagedAttention, an attention algorithm inspired by the classical virtual memory and paging techniques in operating systems. On top of it, we build vLLM, an LLM serving system that achieves (1) near-zero waste in KV cache memory and (2) flexible sharing of KV cache within and across requests to further reduce memory usage. Our evaluations show that vLLM improves the throughput of popular LLMs by 2--4× with the same level of latency compared to the state-of-the-art systems, such as FasterTransformer and Orca. The improvement is more pronounced with longer sequences, larger models, and more complex decoding algorithms. vLLM's source code is publicly available at https://github.com/vllm-project/vllm.", "year": 2023, "venue": "Symposium on Operating Systems Principles", "authors": [ "Woosuk Kwon", "Zhuohan Li", "Siyuan Zhuang", "Ying Sheng", "Lianmin Zheng", "Cody Hao Yu", "Joseph E. Gonzalez", "Haotong Zhang", "Ion Stoica" ], "externalIds": { "DBLP": "journals/corr/abs-2309-06180", "ArXiv": "2309.06180", "DOI": "10.1145/3600006.3613165", "CorpusId": 261697361 }, "url": "https://www.semanticscholar.org/paper/83b90f4a0ae4cc214eb3cc140ccfef9cd99fac05", "referenceCount": 58, "citationCount": 725, "influentialCitationCount": 110, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Qwen-VL: A Versatile Vision-Language Model for Understanding, Localization, Text Reading, and Beyond", "abstract": "In this work, we introduce the Qwen-VL series, a set of large-scale vision-language models (LVLMs) designed to perceive and understand both texts and images. Starting from the Qwen-LM as a foundation, we endow it with visual capacity by the meticulously designed (i) visual receptor, (ii) input-output interface, (iii) 3-stage training pipeline, and (iv) multilingual multimodal cleaned corpus. Beyond the conventional image description and question-answering, we implement the grounding and text-reading ability of Qwen-VLs by aligning image-caption-box tuples. The resulting models, including Qwen-VL and Qwen-VL-Chat, set new records for generalist models under similar model scales on a broad range of visual-centric benchmarks (e.g., image captioning, question answering, visual grounding) and different settings (e.g., zero-shot, few-shot). Moreover, on real-world dialog benchmarks, our instruction-tuned Qwen-VL-Chat also demonstrates superiority compared to existing vision-language chatbots. Code, demo and models are available at https://github.com/QwenLM/Qwen-VL.", "year": 2023, "venue": "", "authors": [ "Jinze Bai", "Shuai Bai", "Shusheng Yang", "Shijie Wang", "Sinan Tan", "Peng Wang", "Junyang Lin", "Chang Zhou", "Jingren Zhou" ], "externalIds": { "ArXiv": "2308.12966", "CorpusId": 261101015 }, "url": "https://www.semanticscholar.org/paper/fc6a2f7478f68adefd69e2071f27e38aa1647f2f", "referenceCount": 86, "citationCount": 322, "influentialCitationCount": 74, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Dynamic Context Pruning for Efficient and Interpretable Autoregressive Transformers", "abstract": "Autoregressive Transformers adopted in Large Language Models (LLMs) are hard to scale to long sequences. Despite several works trying to reduce their computational cost, most of LLMs still adopt attention layers between all pairs of tokens in the sequence, thus incurring a quadratic cost. In this study, we present a novel approach that dynamically prunes contextual information while preserving the model's expressiveness, resulting in reduced memory and computational requirements during inference. Our method employs a learnable mechanism that determines which uninformative tokens can be dropped from the context at any point across the generation process. By doing so, our approach not only addresses performance concerns but also enhances interpretability, providing valuable insight into the model's decision-making process. Our technique can be applied to existing pre-trained models through a straightforward fine-tuning process, and the pruning strength can be specified by a sparsity parameter. Notably, our empirical findings demonstrate that we can effectively prune up to 80\\% of the context without significant performance degradation on downstream tasks, offering a valuable tool for mitigating inference costs. Our reference implementation achieves up to $2\\times$ increase in inference throughput and even greater memory savings.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Sotiris Anagnostidis", "Dario Pavllo", "Luca Biggio", "Lorenzo Noci", "Aurélien Lucchi", "Thomas Hofmann" ], "externalIds": { "DBLP": "journals/corr/abs-2305-15805", "ArXiv": "2305.15805", "DOI": "10.48550/arXiv.2305.15805", "CorpusId": 258888224 }, "url": "https://www.semanticscholar.org/paper/c193eb176985a81ae64f63c5e50b2f11cfb7c4e6", "referenceCount": 66, "citationCount": 29, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "On the Hidden Mystery of OCR in Large Multimodal Models", "abstract": "Large models have recently played a dominant role in natural language processing and multimodal vision-language learning. However, their effectiveness in text-related visual tasks remains relatively unexplored. In this paper, we conducted a comprehensive evaluation of Large Multimodal Models, such as GPT4V and Gemini, in various text-related visual tasks including Text Recognition, Scene Text-Centric Visual Question Answering (VQA), Document-Oriented VQA, Key Information Extraction (KIE), and Handwritten Mathematical Expression Recognition (HMER). To facilitate the assessment of Optical Character Recognition (OCR) capabilities in Large Multimodal Models, we propose OCRBench, a comprehensive evaluation benchmark. OCRBench contains 29 datasets, making it the most comprehensive OCR evaluation benchmark available. Furthermore, our study reveals both the strengths and weaknesses of these models, particularly in handling multilingual text, handwritten text, non-semantic text, and mathematical expression recognition. Most importantly, the baseline results presented in this study could provide a foundational framework for the conception and assessment of innovative strategies targeted at enhancing zero-shot multimodal techniques. The evaluation pipeline and benchmark are available at https://github.com/Yuliang-Liu/MultimodalOCR.", "year": 2023, "venue": "arXiv.org", "authors": [ "Yuliang Liu", "Zhang Li", "Hongliang Li", "Wenwen Yu", "Mingxin Huang", "Dezhi Peng", "Mingyu Liu", "Mingrui Chen", "Chunyuan Li", "Lianwen Jin", "Xiang Bai" ], "externalIds": { "ArXiv": "2305.07895", "DBLP": "journals/corr/abs-2305-07895", "DOI": "10.48550/arXiv.2305.07895", "CorpusId": 258685422 }, "url": "https://www.semanticscholar.org/paper/848e690a62c327e1210532d58a6b914097cac763", "referenceCount": 106, "citationCount": 120, "influentialCitationCount": 10, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "LLaMA: Open and Efficient Foundation Language Models", "abstract": "We introduce LLaMA, a collection of foundation language models ranging from 7B to 65B parameters. We train our models on trillions of tokens, and show that it is possible to train state-of-the-art models using publicly available datasets exclusively, without resorting to proprietary and inaccessible datasets. In particular, LLaMA-13B outperforms GPT-3 (175B) on most benchmarks, and LLaMA-65B is competitive with the best models, Chinchilla-70B and PaLM-540B. We release all our models to the research community.", "year": 2023, "venue": "arXiv.org", "authors": [ "Hugo Touvron", "Thibaut Lavril", "Gautier Izacard", "Xavier Martinet", "Marie-Anne Lachaux", "Timothée Lacroix", "Baptiste Rozière", "Naman Goyal", "Eric Hambro", "Faisal Azhar", "Aurelien Rodriguez", "Armand Joulin", "Edouard Grave", "Guillaume Lample" ], "externalIds": { "DBLP": "journals/corr/abs-2302-13971", "ArXiv": "2302.13971", "CorpusId": 257219404 }, "url": "https://www.semanticscholar.org/paper/57e849d0de13ed5f91d086936296721d4ff75a75", "referenceCount": 80, "citationCount": 8031, "influentialCitationCount": 1073, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "SPViT: Enabling Faster Vision Transformers via Latency-Aware Soft Token Pruning", "abstract": null, "year": 2021, "venue": "European Conference on Computer Vision", "authors": [ "Zhenglun Kong", "Peiyan Dong", "Xiaolong Ma", "Xin Meng", "Wei Niu", "Mengshu Sun", "Bin Ren", "Minghai Qin", "H. Tang", "Yanzhi Wang" ], "externalIds": { "DBLP": "conf/eccv/KongDMMNSSYRTQW22", "ArXiv": "2112.13890", "DOI": "10.1007/978-3-031-20083-0_37", "CorpusId": 245537400 }, "url": "https://www.semanticscholar.org/paper/722d71a19e4049b30a03d1028158881560432135", "referenceCount": 110, "citationCount": 90, "influentialCitationCount": 13, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DocVQA: A Dataset for VQA on Document Images", "abstract": "We present a new dataset for Visual Question Answering (VQA) on document images called DocVQA. The dataset consists of 50,000 questions defined on 12,000+ document images. Detailed analysis of the dataset in comparison with similar datasets for VQA and reading comprehension is presented. We report several baseline results by adopting existing VQA and reading comprehension models. Although the existing models perform reasonably well on certain types of questions, there is large performance gap compared to human performance (94.36% accuracy). The models need to improve specifically on questions where understanding structure of the document is crucial. The dataset, code and leaderboard are available at docvqa.org", "year": 2020, "venue": "IEEE Workshop/Winter Conference on Applications of Computer Vision", "authors": [ "Minesh Mathew", "Dimosthenis Karatzas", "R. Manmatha", "C. V. Jawahar" ], "externalIds": { "DBLP": "conf/wacv/MathewKJ21", "ArXiv": "2007.00398", "MAG": "3040138106", "DOI": "10.1109/WACV48630.2021.00225", "CorpusId": 220280200 }, "url": "https://www.semanticscholar.org/paper/b40bfcf339de3f0dba08fabb2b58b9368ff4c51a", "referenceCount": 42, "citationCount": 371, "influentialCitationCount": 71, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Scene Text Visual Question Answering", "abstract": "Current visual question answering datasets do not consider the rich semantic information conveyed by text within an image. In this work, we present a new dataset, ST-VQA, that aims to highlight the importance of exploiting high-level semantic information present in images as textual cues in the Visual Question Answering process. We use this dataset to define a series of tasks of increasing difficulty for which reading the scene text in the context provided by the visual information is necessary to reason and generate an appropriate answer. We propose a new evaluation metric for these tasks to account both for reasoning errors as well as shortcomings of the text recognition module. In addition we put forward a series of baseline methods, which provide further insight to the newly released dataset, and set the scene for further research.", "year": 2019, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Ali Furkan Biten", "Rubèn Pérez Tito", "Andrés Mafla", "Lluís Gómez", "Marçal Rusiñol", "Ernest Valveny", "C. V. Jawahar", "Dimosthenis Karatzas" ], "externalIds": { "ArXiv": "1905.13648", "DBLP": "conf/iccv/BitenTMBRJVK19", "MAG": "2947555604", "DOI": "10.1109/ICCV.2019.00439", "CorpusId": 173188651 }, "url": "https://www.semanticscholar.org/paper/0033346700dc450ac22c9b704eab0e906d868662", "referenceCount": 68, "citationCount": 267, "influentialCitationCount": 44, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Towards VQA Models That Can Read", "abstract": "Studies have shown that a dominant class of questions asked by visually impaired users on images of their surroundings involves reading text in the image. But today’s VQA models can not read! Our paper takes a first step towards addressing this problem. First, we introduce a new “TextVQA” dataset to facilitate progress on this important problem. Existing datasets either have a small proportion of questions about text (e.g., the VQA dataset) or are too small (e.g., the VizWiz dataset). TextVQA contains 45,336 questions on 28,408 images that require reasoning about text to answer. Second, we introduce a novel model architecture that reads text in the image, reasons about it in the context of the image and the question, and predicts an answer which might be a deduction based on the text and the image or composed of the strings found in the image. Consequently, we call our approach Look, Read, Reason & Answer (LoRRA). We show that LoRRA outperforms existing state-of-the-art VQA models on our TextVQA dataset. We find that the gap between human performance and machine performance is significantly larger on TextVQA than on VQA 2.0, suggesting that TextVQA is well-suited to benchmark progress along directions complementary to VQA 2.0.", "year": 2019, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Amanpreet Singh", "Vivek Natarajan", "Meet Shah", "Yu Jiang", "Xinlei Chen", "Dhruv Batra", "Devi Parikh", "Marcus Rohrbach" ], "externalIds": { "MAG": "2936135081", "DBLP": "journals/corr/abs-1904-08920", "ArXiv": "1904.08920", "DOI": "10.1109/CVPR.2019.00851", "CorpusId": 85553602 }, "url": "https://www.semanticscholar.org/paper/af1f7739283bdbd2b7a94903041f6d6afd991907", "referenceCount": 53, "citationCount": 716, "influentialCitationCount": 139, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Flickr30k Entities: Collecting Region-to-Phrase Correspondences for Richer Image-to-Sentence Models", "abstract": null, "year": 2015, "venue": "International Journal of Computer Vision", "authors": [ "Bryan A. Plummer", "Liwei Wang", "Christopher M. Cervantes", "Juan C. Caicedo", "J. Hockenmaier", "Svetlana Lazebnik" ], "externalIds": { "ArXiv": "1505.04870", "DBLP": "conf/iccv/PlummerWCCHL15", "MAG": "2568262903", "DOI": "10.1007/s11263-016-0965-7", "CorpusId": 6941275 }, "url": "https://www.semanticscholar.org/paper/11c9c31dff70de92ada9160c78ff8bb46b2912d6", "referenceCount": 83, "citationCount": 1772, "influentialCitationCount": 330, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "CIDEr: Consensus-based image description evaluation", "abstract": "Automatically describing an image with a sentence is a long-standing challenge in computer vision and natural language processing. Due to recent progress in object detection, attribute classification, action recognition, etc., there is renewed interest in this area. However, evaluating the quality of descriptions has proven to be challenging. We propose a novel paradigm for evaluating image descriptions that uses human consensus. This paradigm consists of three main parts: a new triplet-based method of collecting human annotations to measure consensus, a new automated metric that captures consensus, and two new datasets: PASCAL-50S and ABSTRACT-50S that contain 50 sentences describing each image. Our simple metric captures human judgment of consensus better than existing metrics across sentences generated by various sources. We also evaluate five state-of-the-art image description approaches using this new protocol and provide a benchmark for future comparisons. A version of CIDEr named CIDEr-D is available as a part of MS COCO evaluation server to enable systematic evaluation and benchmarking.", "year": 2014, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Ramakrishna Vedantam", "C. L. Zitnick", "Devi Parikh" ], "externalIds": { "DBLP": "journals/corr/VedantamZP14a", "MAG": "2952574180", "ArXiv": "1411.5726", "DOI": "10.1109/CVPR.2015.7299087", "CorpusId": 9026666 }, "url": "https://www.semanticscholar.org/paper/258986132bf17755fe8263e42429fe73218c1534", "referenceCount": 58, "citationCount": 3907, "influentialCitationCount": 820, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions", "abstract": "We propose to use the visual denotations of linguistic expressions (i.e. the set of images they describe) to define novel denotational similarity metrics, which we show to be at least as beneficial as distributional similarities for two tasks that require semantic inference. To compute these denotational similarities, we construct a denotation graph, i.e. a subsumption hierarchy over constituents and their denotations, based on a large corpus of 30K images and 150K descriptive captions.", "year": 2014, "venue": "Transactions of the Association for Computational Linguistics", "authors": [ "Peter Young", "Alice Lai", "Micah Hodosh", "J. Hockenmaier" ], "externalIds": { "DBLP": "journals/tacl/YoungLHH14", "MAG": "2185175083", "ACL": "Q14-1006", "DOI": "10.1162/tacl_a_00166", "CorpusId": 3104920 }, "url": "https://www.semanticscholar.org/paper/44040913380206991b1991daf1192942e038fe31", "referenceCount": 39, "citationCount": 2317, "influentialCitationCount": 362, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "2024b. Visual instruction tuning", "abstract": null, "year": null, "venue": "Advances in neural information processing systems", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2024. An image is worth 1/2 tokens after layer 2: Plug-and-play inference acceleration for large vision-language models", "abstract": null, "year": null, "venue": "arXiv", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2024b. H2o: Heavy-hitter oracle for efficient generative inference of large language models", "abstract": null, "year": null, "venue": "Advances in Neural Information Processing Systems", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2023. Cogvlm: Visual expert for pretrained language models", "abstract": null, "year": null, "venue": "arXiv preprint", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2024. Fluctuation-based adaptive structured pruning for large language models", "abstract": null, "year": null, "venue": "Proceedings of the AAAI Conference on Artificial Intelligence , volume 38", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "MobileVLM: A Vision-Language Model for Better Intra- and Inter-UI Understanding": { "paper_title": "MobileVLM: A Vision-Language Model for Better Intra- and Inter-UI Understanding", "arxiv_id": "2409.14818v1", "keyword": "vision language model", "authors": [ "Qinzhuo Wu", "Weikai Xu", "Wei Liu", "Tao Tan", "Jianfeng Liu", "Ang Li", "Jian Luan", "Bin Wang", "Shuo Shang" ], "references": [ { "title": "Ferret-UI: Grounded Mobile UI Understanding with Multimodal LLMs", "abstract": "Recent advancements in multimodal large language models (MLLMs) have been noteworthy, yet, these general-domain MLLMs often fall short in their ability to comprehend and interact effectively with user interface (UI) screens. In this paper, we present Ferret-UI, a new MLLM tailored for enhanced understanding of mobile UI screens, equipped with referring, grounding, and reasoning capabilities. Given that UI screens typically exhibit a more elongated aspect ratio and contain smaller objects of interest (e.g., icons, texts) than natural images, we incorporate\"any resolution\"on top of Ferret to magnify details and leverage enhanced visual features. Specifically, each screen is divided into 2 sub-images based on the original aspect ratio (i.e., horizontal division for portrait screens and vertical division for landscape screens). Both sub-images are encoded separately before being sent to LLMs. We meticulously gather training samples from an extensive range of elementary UI tasks, such as icon recognition, find text, and widget listing. These samples are formatted for instruction-following with region annotations to facilitate precise referring and grounding. To augment the model's reasoning ability, we further compile a dataset for advanced tasks, including detailed description, perception/interaction conversations, and function inference. After training on the curated datasets, Ferret-UI exhibits outstanding comprehension of UI screens and the capability to execute open-ended instructions. For model evaluation, we establish a comprehensive benchmark encompassing all the aforementioned tasks. Ferret-UI excels not only beyond most open-source UI MLLMs, but also surpasses GPT-4V on all the elementary UI tasks.", "year": 2024, "venue": "arXiv.org", "authors": [ "Keen You", "Haotian Zhang", "E. Schoop", "Floris Weers", "Amanda Swearngin", "Jeffrey Nichols", "Yinfei Yang", "Zhe Gan" ], "externalIds": { "DBLP": "journals/corr/abs-2404-05719", "ArXiv": "2404.05719", "DOI": "10.48550/arXiv.2404.05719", "CorpusId": 269005503 }, "url": "https://www.semanticscholar.org/paper/3687e55cf21c4d0041d9bf0b74988319bfe3402b", "referenceCount": 63, "citationCount": 18, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Android in the Zoo: Chain-of-Action-Thought for GUI Agents", "abstract": "Large language model (LLM) leads to a surge of autonomous GUI agents for smartphone, which completes a task triggered by natural language through predicting a sequence of actions of API. Even though the task highly relies on past actions and visual observations, existing studies typically consider little semantic information carried out by intermediate screenshots and screen operations. To address this, this work presents Chain-of-Action-Thought (dubbed CoAT), which takes the description of the previous actions, the current screen, and more importantly the action thinking of what actions should be performed and the outcomes led by the chosen action. We demonstrate that, in a zero-shot setting upon three off-the-shelf LMMs, CoAT significantly improves the action prediction compared to previous proposed context modeling. To further facilitate the research in this line, we construct a dataset Android-In-The-Zoo (AitZ), which contains 18,643 screen-action pairs together with chain-of-action-thought annotations. Experiments show that fine-tuning a 1B model (i.e. AUTO-UI-base) on our AitZ dataset achieves on-par performance with CogAgent-Chat-18B.", "year": 2024, "venue": "arXiv.org", "authors": [ "Jiwen Zhang", "Jihao Wu", "Yihua Teng", "Minghui Liao", "Nuo Xu", "Xiao Xiao", "Zhongyu Wei", "Duyu Tang" ], "externalIds": { "ArXiv": "2403.02713", "DBLP": "journals/corr/abs-2403-02713", "DOI": "10.48550/arXiv.2403.02713", "CorpusId": 268248442 }, "url": "https://www.semanticscholar.org/paper/2199a5ac145edbc46db9fcbf97b01461a2367cda", "referenceCount": 50, "citationCount": 8, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ScreenAI: A Vision-Language Model for UI and Infographics Understanding", "abstract": "Screen user interfaces (UIs) and infographics, sharing similar visual language and design principles, play important roles in human communication and human-machine interaction.\n\nWe introduce ScreenAI, a vision-language model that specializes in UI and infographics understanding.\n\nOur model improves upon the PaLI architecture with the flexible patching strategy of pix2struct and is trained on a unique mixture of datasets.\n\nAt the heart of this mixture is a novel screen annotation task in which the model has to identify the type and location of UI elements.\n\nWe use these text annotations to describe screens to Large Language Models and automatically generate question-answering (QA), UI navigation, and summarization training datasets at scale.\n\nWe run ablation studies to demonstrate the impact of these design choices.\n\nAt only 5B parameters, ScreenAI achieves new state-of-the-art results\n\non UI- and infographics-based tasks (Multipage DocVQA, WebSRC, and MoTIF), and new best-in-class performance on others (ChartQA, DocVQA, and InfographicVQA) compared to models of similar size.\n\nFinally, we release three new datasets: one focused on the screen annotation task and two others focused on question answering.", "year": 2024, "venue": "Proceedings of the Thirty-ThirdInternational Joint Conference on Artificial Intelligence", "authors": [ "Gilles Baechler", "Srinivas Sunkara", "Maria Wang", "Fedir Zubach", "Hassan Mansoor", "Vincent Etter", "Victor Carbune", "Jason Lin", "Jindong Chen", "Abhanshu Sharma" ], "externalIds": { "DBLP": "journals/corr/abs-2402-04615", "ArXiv": "2402.04615", "DOI": "10.48550/arXiv.2402.04615", "CorpusId": 267523393 }, "url": "https://www.semanticscholar.org/paper/73619a1084d7e50191cea7d0393ba2604659da31", "referenceCount": 68, "citationCount": 17, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "LCVO: An Efficient Pretraining-Free Framework for Visual Question Answering Grounding", "abstract": "In this paper, the LCV2 modular method is proposed for the Grounded Visual Question Answering task in the vision-language multimodal domain. This approach relies on a frozen large language model (LLM) as intermediate mediator between the off-the-shelf VQA model and the off-the-shelf visual grounding (VG) model, where the LLM transforms and conveys textual information between the two modules based on a designed prompt. LCV2 establish an integrated plug-and-play framework without the need for any pre-training process. This framework can be deployed for VQA Grounding tasks under low computational resources. The modularized model within the framework allows application with various state-of-the-art pre-trained models, exhibiting significant potential to be advance with the times. Experimental implementations were conducted under constrained computational and memory resources, evaluating the proposed method's performance on benchmark datasets including GQA, CLEVR, and VizWiz-VQA-Grounding. Comparative analyses with baseline methods demonstrate the robust competitiveness of LCV2.", "year": 2024, "venue": "arXiv.org", "authors": [ "Yuhan Chen", "Lumei Su", "Lihua Chen", "Zhiwei Lin" ], "externalIds": { "DBLP": "journals/corr/abs-2401-15842", "ArXiv": "2401.15842", "DOI": "10.48550/arXiv.2401.15842", "CorpusId": 268681641 }, "url": "https://www.semanticscholar.org/paper/4f319e1fa291a6c2e5df861e4dfebe6c3eaa5d3e", "referenceCount": 4, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MobileAgent: enhancing mobile control via human-machine interaction and SOP integration", "abstract": "Agents centered around Large Language Models (LLMs) are now capable of automating mobile device operations for users. After fine-tuning to learn a user's mobile operations, these agents can adhere to high-level user instructions online. They execute tasks such as goal decomposition, sequencing of sub-goals, and interactive environmental exploration, until the final objective is achieved. However, privacy concerns related to personalized user data arise during mobile operations, requiring user confirmation. Moreover, users' real-world operations are exploratory, with action data being complex and redundant, posing challenges for agent learning. To address these issues, in our practical application, we have designed interactive tasks between agents and humans to identify sensitive information and align with personalized user needs. Additionally, we integrated Standard Operating Procedure (SOP) information within the model's in-context learning to enhance the agent's comprehension of complex task execution. Our approach is evaluated on the new device control benchmark AitW, which encompasses 30K unique instructions across multi-step tasks, including application operation, web searching, and web shopping. Experimental results show that the SOP-based agent achieves state-of-the-art performance in LLMs without incurring additional inference costs, boasting an overall action success rate of 66.92\\%. The code and data examples are available at https://github.com/alipay/mobile-agent.", "year": 2024, "venue": "arXiv.org", "authors": [ "Tinghe Ding" ], "externalIds": { "DBLP": "journals/corr/abs-2401-04124", "ArXiv": "2401.04124", "DOI": "10.48550/arXiv.2401.04124", "CorpusId": 266899697 }, "url": "https://www.semanticscholar.org/paper/09d75875813ecee343f81bdd90667df41d7b2b54", "referenceCount": 41, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "AppAgent: Multimodal Agents as Smartphone Users", "abstract": "Recent advancements in large language models (LLMs) have led to the creation of intelligent agents capable of performing complex tasks. This paper introduces a novel LLM-based multimodal agent framework designed to operate smartphone applications. Our framework enables the agent to operate smartphone applications through a simplified action space, mimicking human-like interactions such as tapping and swiping. This novel approach bypasses the need for system back-end access, thereby broadening its applicability across diverse apps. Central to our agent's functionality is its innovative learning method. The agent learns to navigate and use new apps either through autonomous exploration or by observing human demonstrations. This process generates a knowledge base that the agent refers to for executing complex tasks across different applications. To demonstrate the practicality of our agent, we conducted extensive testing over 50 tasks in 10 different applications, including social media, email, maps, shopping, and sophisticated image editing tools. The results affirm our agent's proficiency in handling a diverse array of high-level tasks.", "year": 2023, "venue": "arXiv.org", "authors": [ "C. Zhang", "Zhao Yang", "Jiaxuan Liu", "Yucheng Han", "Xin Chen", "Zebiao Huang", "Bin Fu", "Gang Yu" ], "externalIds": { "ArXiv": "2312.13771", "DBLP": "journals/corr/abs-2312-13771", "DOI": "10.48550/arXiv.2312.13771", "CorpusId": 266435868 }, "url": "https://www.semanticscholar.org/paper/53cb57c8ca1ce950b0588d481ae399296acb8f5b", "referenceCount": 26, "citationCount": 78, "influentialCitationCount": 6, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "CogAgent: A Visual Language Model for GUI Agents", "abstract": "People are spending an enormous amount of time on dig-ital devices through graphical user interfaces (GUIs), e.g., computer or smartphone screens. Large language models (LLMs) such as ChatGPT can assist people in tasks like writing emails, but struggle to understand and interact with GUIs, thus limiting their potential to increase automation levels. In this paper, we introduce CogAgent, an 18-billion-parameter visual language model (VLM) specializing in GUI understanding and navigation. By utilizing both low-resolution and high-resolution image encoders, CogA-gent supports input at a resolution of1120 × 1120, enabling it to recognize tiny page elements and text. As a general-ist visual language model, CogAgent achieves the state of the art on five text-rich and four general VQA benchmarks, including VQAv2, OK- VQA, Text- Vqa, St- Vqa, ChartQA, infoVQA, DocVQA, MM-Vet, and POPE. CogAgent, using only screenshots as input, outperforms LLM-based methods that consume extracted HTML text on both PC and Android GUI navigation tasks-Mind2Web and AITW, ad-vancing the state of the art. The model and codes are available at https://github.com/THUDM/CogVLM.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Wenyi Hong", "Weihan Wang", "Qingsong Lv", "Jiazheng Xu", "Wenmeng Yu", "Junhui Ji", "Yan Wang", "Zihan Wang", "Yuxiao Dong", "Ming Ding", "Jie Tang" ], "externalIds": { "ArXiv": "2312.08914", "DBLP": "journals/corr/abs-2312-08914", "DOI": "10.1109/CVPR52733.2024.01354", "CorpusId": 266210390 }, "url": "https://www.semanticscholar.org/paper/1608e505c3e749a00ce0c56e0c2d53e0e9ae7fe4", "referenceCount": 42, "citationCount": 150, "influentialCitationCount": 29, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "GPT-4V in Wonderland: Large Multimodal Models for Zero-Shot Smartphone GUI Navigation", "abstract": "We present MM-Navigator, a GPT-4V-based agent for the smartphone graphical user interface (GUI) navigation task. MM-Navigator can interact with a smartphone screen as human users, and determine subsequent actions to fulfill given instructions. Our findings demonstrate that large multimodal models (LMMs), specifically GPT-4V, excel in zero-shot GUI navigation through its advanced screen interpretation, action reasoning, and precise action localization capabilities. We first benchmark MM-Navigator on our collected iOS screen dataset. According to human assessments, the system exhibited a 91\\% accuracy rate in generating reasonable action descriptions and a 75\\% accuracy rate in executing the correct actions for single-step instructions on iOS. Additionally, we evaluate the model on a subset of an Android screen navigation dataset, where the model outperforms previous GUI navigators in a zero-shot fashion. Our benchmark and detailed analyses aim to lay a robust groundwork for future research into the GUI navigation task. The project page is at https://github.com/zzxslp/MM-Navigator.", "year": 2023, "venue": "arXiv.org", "authors": [ "An Yan", "Zhengyuan Yang", "Wanrong Zhu", "K. Lin", "Linjie Li", "Jianfeng Wang", "Jianwei Yang", "Yiwu Zhong", "Julian J. McAuley", "Jianfeng Gao", "Zicheng Liu", "Lijuan Wang" ], "externalIds": { "ArXiv": "2311.07562", "DBLP": "journals/corr/abs-2311-07562", "DOI": "10.48550/arXiv.2311.07562", "CorpusId": 265149992 }, "url": "https://www.semanticscholar.org/paper/2fb605f67fee79cad94952ddfe0f686e926f49f5", "referenceCount": 56, "citationCount": 62, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "You Only Look at Screens: Multimodal Chain-of-Action Agents", "abstract": "Autonomous graphical user interface (GUI) agents aim to facilitate task automation by interacting with the user interface without manual intervention. Recent studies have investigated eliciting the capabilities of large language models (LLMs) for effective engagement in diverse environments. To align with the input-output requirement of LLMs, most existing approaches are developed under a sandbox setting where they rely on external tools and application-specific APIs to parse the environment into textual elements and interpret the predicted actions. Consequently, those approaches often grapple with inference inefficiency and error propagation risks. To mitigate the challenges, we introduce Auto-GUI, a multimodal solution that directly interacts with the interface, bypassing the need for environment parsing or reliance on application-dependent APIs. Moreover, we propose a chain-of-action technique -- leveraging a series of intermediate previous action histories and future action plans -- to help the agent decide what action to execute. We evaluate our approach on a new device-control benchmark AITW with 30$K$ unique instructions, spanning multi-step tasks such as application operation, web searching, and web shopping. Experimental results show that Auto-GUI achieves state-of-the-art performance with an action type prediction accuracy of 90\\% and an overall action success rate of 74\\%. Code is publicly available at https://github.com/cooelf/Auto-GUI.", "year": 2023, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Zhuosheng Zhang", "Aston Zhang" ], "externalIds": { "ArXiv": "2309.11436", "DBLP": "journals/corr/abs-2309-11436", "DOI": "10.48550/arXiv.2309.11436", "CorpusId": 262053313 }, "url": "https://www.semanticscholar.org/paper/6ab33b17cd45e7cbd2cb9b0c5a2d56e5eac1c814", "referenceCount": 59, "citationCount": 42, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Android in the Wild: A Large-Scale Dataset for Android Device Control", "abstract": "There is a growing interest in device-control systems that can interpret human natural language instructions and execute them on a digital device by directly controlling its user interface. We present a dataset for device-control research, Android in the Wild (AITW), which is orders of magnitude larger than current datasets. The dataset contains human demonstrations of device interactions, including the screens and actions, and corresponding natural language instructions. It consists of 715k episodes spanning 30k unique instructions, four versions of Android (v10-13),and eight device types (Pixel 2 XL to Pixel 6) with varying screen resolutions. It contains multi-step tasks that require semantic understanding of language and visual context. This dataset poses a new challenge: actions available through the user interface must be inferred from their visual appearance. And, instead of simple UI element-based actions, the action space consists of precise gestures (e.g., horizontal scrolls to operate carousel widgets). We organize our dataset to encourage robustness analysis of device-control systems, i.e., how well a system performs in the presence of new task descriptions, new applications, or new platform versions. We develop two agents and report performance across the dataset. The dataset is available at https://github.com/google-research/google-research/tree/master/android_in_the_wild.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Christopher Rawles", "Alice Li", "Daniel Rodriguez", "Oriana Riva", "T. Lillicrap" ], "externalIds": { "ArXiv": "2307.10088", "DBLP": "journals/corr/abs-2307-10088", "DOI": "10.48550/arXiv.2307.10088", "CorpusId": 259983067 }, "url": "https://www.semanticscholar.org/paper/060e0df71620f1844839f6a993dfa5fb8e4c3bf6", "referenceCount": 48, "citationCount": 64, "influentialCitationCount": 18, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Mind2Web: Towards a Generalist Agent for the Web", "abstract": "We introduce Mind2Web, the first dataset for developing and evaluating generalist agents for the web that can follow language instructions to complete complex tasks on any website. Existing datasets for web agents either use simulated websites or only cover a limited set of websites and tasks, thus not suitable for generalist web agents. With over 2,000 open-ended tasks collected from 137 websites spanning 31 domains and crowdsourced action sequences for the tasks, Mind2Web provides three necessary ingredients for building generalist web agents: 1) diverse domains, websites, and tasks, 2) use of real-world websites instead of simulated and simplified ones, and 3) a broad spectrum of user interaction patterns. Based on Mind2Web, we conduct an initial exploration of using large language models (LLMs) for building generalist web agents. While the raw HTML of real-world websites are often too large to be fed to LLMs, we show that first filtering it with a small LM significantly improves the effectiveness and efficiency of LLMs. Our solution demonstrates a decent level of performance, even on websites or entire domains the model has never seen before, but there is still a substantial room to improve towards truly generalizable agents. We open-source our dataset, model implementation, and trained models (https://osu-nlp-group.github.io/Mind2Web) to facilitate further research on building a generalist agent for the web.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Xiang Deng", "Yu Gu", "Boyuan Zheng", "Shijie Chen", "Samuel Stevens", "Boshi Wang", "Huan Sun", "Yu Su" ], "externalIds": { "DBLP": "journals/corr/abs-2306-06070", "ArXiv": "2306.06070", "DOI": "10.48550/arXiv.2306.06070", "CorpusId": 259129428 }, "url": "https://www.semanticscholar.org/paper/58f8925a8b87054ad0635a6398a7fe24935b1604", "referenceCount": 44, "citationCount": 189, "influentialCitationCount": 37, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Multi-modal Queried Object Detection in the Wild", "abstract": "We introduce MQ-Det, an efficient architecture and pre-training strategy design to utilize both textual description with open-set generalization and visual exemplars with rich description granularity as category queries, namely, Multi-modal Queried object Detection, for real-world detection with both open-vocabulary categories and various granularity. MQ-Det incorporates vision queries into existing well-established language-queried-only detectors. A plug-and-play gated class-scalable perceiver module upon the frozen detector is proposed to augment category text with class-wise visual information. To address the learning inertia problem brought by the frozen detector, a vision conditioned masked language prediction strategy is proposed. MQ-Det's simple yet effective architecture and training strategy design is compatible with most language-queried object detectors, thus yielding versatile applications. Experimental results demonstrate that multi-modal queries largely boost open-world detection. For instance, MQ-Det significantly improves the state-of-the-art open-set detector GLIP by +7.8% AP on the LVIS benchmark via multi-modal queries without any downstream finetuning, and averagely +6.3% AP on 13 few-shot downstream tasks, with merely additional 3% modulating time required by GLIP. Code is available at https://github.com/YifanXu74/MQ-Det.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Yifan Xu", "Mengdan Zhang", "Chaoyou Fu", "Peixian Chen", "Xiaoshan Yang", "Ke Li", "Changsheng Xu" ], "externalIds": { "ArXiv": "2305.18980", "DBLP": "conf/nips/0008ZFCYLX23", "DOI": "10.48550/arXiv.2305.18980", "CorpusId": 258967442 }, "url": "https://www.semanticscholar.org/paper/dd8f69a4c298d66df844c648a6fd001aef43938f", "referenceCount": 55, "citationCount": 17, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "On the Hidden Mystery of OCR in Large Multimodal Models", "abstract": "Large models have recently played a dominant role in natural language processing and multimodal vision-language learning. However, their effectiveness in text-related visual tasks remains relatively unexplored. In this paper, we conducted a comprehensive evaluation of Large Multimodal Models, such as GPT4V and Gemini, in various text-related visual tasks including Text Recognition, Scene Text-Centric Visual Question Answering (VQA), Document-Oriented VQA, Key Information Extraction (KIE), and Handwritten Mathematical Expression Recognition (HMER). To facilitate the assessment of Optical Character Recognition (OCR) capabilities in Large Multimodal Models, we propose OCRBench, a comprehensive evaluation benchmark. OCRBench contains 29 datasets, making it the most comprehensive OCR evaluation benchmark available. Furthermore, our study reveals both the strengths and weaknesses of these models, particularly in handling multilingual text, handwritten text, non-semantic text, and mathematical expression recognition. Most importantly, the baseline results presented in this study could provide a foundational framework for the conception and assessment of innovative strategies targeted at enhancing zero-shot multimodal techniques. The evaluation pipeline and benchmark are available at https://github.com/Yuliang-Liu/MultimodalOCR.", "year": 2023, "venue": "arXiv.org", "authors": [ "Yuliang Liu", "Zhang Li", "Hongliang Li", "Wenwen Yu", "Mingxin Huang", "Dezhi Peng", "Mingyu Liu", "Mingrui Chen", "Chunyuan Li", "Lianwen Jin", "Xiang Bai" ], "externalIds": { "ArXiv": "2305.07895", "DBLP": "journals/corr/abs-2305-07895", "DOI": "10.48550/arXiv.2305.07895", "CorpusId": 258685422 }, "url": "https://www.semanticscholar.org/paper/848e690a62c327e1210532d58a6b914097cac763", "referenceCount": 106, "citationCount": 120, "influentialCitationCount": 10, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Grounding DINO: Marrying DINO with Grounded Pre-Training for Open-Set Object Detection", "abstract": "In this paper, we present an open-set object detector, called Grounding DINO, by marrying Transformer-based detector DINO with grounded pre-training, which can detect arbitrary objects with human inputs such as category names or referring expressions. The key solution of open-set object detection is introducing language to a closed-set detector for open-set concept generalization. To effectively fuse language and vision modalities, we conceptually divide a closed-set detector into three phases and propose a tight fusion solution, which includes a feature enhancer, a language-guided query selection, and a cross-modality decoder for cross-modality fusion. While previous works mainly evaluate open-set object detection on novel categories, we propose to also perform evaluations on referring expression comprehension for objects specified with attributes. Grounding DINO performs remarkably well on all three settings, including benchmarks on COCO, LVIS, ODinW, and RefCOCO/+/g. Grounding DINO achieves a $52.5$ AP on the COCO detection zero-shot transfer benchmark, i.e., without any training data from COCO. It sets a new record on the ODinW zero-shot benchmark with a mean $26.1$ AP. Code will be available at \\url{https://github.com/IDEA-Research/GroundingDINO}.", "year": 2023, "venue": "arXiv.org", "authors": [ "Shilong Liu", "Zhaoyang Zeng", "Tianhe Ren", "Feng Li", "Hao Zhang", "Jie Yang", "Chun-yue Li", "Jianwei Yang", "Hang Su", "Jun-Juan Zhu", "Lei Zhang" ], "externalIds": { "DBLP": "journals/corr/abs-2303-05499", "ArXiv": "2303.05499", "DOI": "10.48550/arXiv.2303.05499", "CorpusId": 257427307 }, "url": "https://www.semanticscholar.org/paper/c3e5a20b844c042d2174263d2fd5b30d8cc8f0b0", "referenceCount": 68, "citationCount": 988, "influentialCitationCount": 147, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "LAION-5B: An open large-scale dataset for training next generation image-text models", "abstract": "Groundbreaking language-vision architectures like CLIP and DALL-E proved the utility of training on large amounts of noisy image-text data, without relying on expensive accurate labels used in standard vision unimodal supervised learning. The resulting models showed capabilities of strong text-guided image generation and transfer to downstream tasks, while performing remarkably at zero-shot classification with noteworthy out-of-distribution robustness. Since then, large-scale language-vision models like ALIGN, BASIC, GLIDE, Flamingo and Imagen made further improvements. Studying the training and capabilities of such models requires datasets containing billions of image-text pairs. Until now, no datasets of this size have been made openly available for the broader research community. To address this problem and democratize research on large-scale multi-modal models, we present LAION-5B - a dataset consisting of 5.85 billion CLIP-filtered image-text pairs, of which 2.32B contain English language. We show successful replication and fine-tuning of foundational models like CLIP, GLIDE and Stable Diffusion using the dataset, and discuss further experiments enabled with an openly available dataset of this scale. Additionally we provide several nearest neighbor indices, an improved web-interface for dataset exploration and subset generation, and detection scores for watermark, NSFW, and toxic content detection. Announcement page https://laion.ai/laion-5b-a-new-era-of-open-large-scale-multi-modal-datasets/", "year": 2022, "venue": "Neural Information Processing Systems", "authors": [ "Christoph Schuhmann", "Romain Beaumont", "Richard Vencu", "Cade Gordon", "Ross Wightman", "Mehdi Cherti", "Theo Coombes", "Aarush Katta", "Clayton Mullis", "Mitchell Wortsman", "P. Schramowski", "Srivatsa Kundurthy", "Katherine Crowson", "Ludwig Schmidt", "R. Kaczmarczyk", "J. Jitsev" ], "externalIds": { "DBLP": "conf/nips/SchuhmannBVGWCC22", "ArXiv": "2210.08402", "DOI": "10.48550/arXiv.2210.08402", "CorpusId": 252917726 }, "url": "https://www.semanticscholar.org/paper/e5c8960eb2ec034ffbd353ef39fd1cb541d3c7c9", "referenceCount": 109, "citationCount": 2214, "influentialCitationCount": 256, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Pix2Struct: Screenshot Parsing as Pretraining for Visual Language Understanding", "abstract": "Visually-situated language is ubiquitous -- sources range from textbooks with diagrams to web pages with images and tables, to mobile apps with buttons and forms. Perhaps due to this diversity, previous work has typically relied on domain-specific recipes with limited sharing of the underlying data, model architectures, and objectives. We present Pix2Struct, a pretrained image-to-text model for purely visual language understanding, which can be finetuned on tasks containing visually-situated language. Pix2Struct is pretrained by learning to parse masked screenshots of web pages into simplified HTML. The web, with its richness of visual elements cleanly reflected in the HTML structure, provides a large source of pretraining data well suited to the diversity of downstream tasks. Intuitively, this objective subsumes common pretraining signals such as OCR, language modeling, image captioning. In addition to the novel pretraining strategy, we introduce a variable-resolution input representation and a more flexible integration of language and vision inputs, where language prompts such as questions are rendered directly on top of the input image. For the first time, we show that a single pretrained model can achieve state-of-the-art results in six out of nine tasks across four domains: documents, illustrations, user interfaces, and natural images.", "year": 2022, "venue": "International Conference on Machine Learning", "authors": [ "Kenton Lee", "Mandar Joshi", "Iulia Turc", "Hexiang Hu", "Fangyu Liu", "Julian Martin Eisenschlos", "Urvashi Khandelwal", "Peter Shaw", "Ming-Wei Chang", "Kristina Toutanova" ], "externalIds": { "DBLP": "journals/corr/abs-2210-03347", "ArXiv": "2210.03347", "DOI": "10.48550/arXiv.2210.03347", "CorpusId": 252762394 }, "url": "https://www.semanticscholar.org/paper/e1484706c0fab932fc9804df328044b3cb2f110d", "referenceCount": 62, "citationCount": 183, "influentialCitationCount": 28, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ScreenQA: Large-Scale Question-Answer Pairs over Mobile App Screenshots", "abstract": "We present a new benchmark and dataset, ScreenQA, for screen content understanding via question answering. The existing screen datasets are focused either on structure and component-level understanding, or on a much higher-level composite task such as navigation and task completion. We attempt to bridge the gap between these two by annotating 86K question-answer pairs over the RICO dataset in hope to benchmark the screen reading comprehension capacity. This work is also the first to annotate answers for different application scenarios, including both full sentences and short forms, as well as supporting UI contents on screen and their bounding boxes. With the rich annotation, we discuss and define the evaluation metrics of the benchmark, show applications of the dataset, and provide a few baselines using closed and open source models.", "year": 2022, "venue": "arXiv.org", "authors": [ "Yu-Chung Hsiao", "Fedir Zubach", "Maria Wang", "Jindong Chen" ], "externalIds": { "ArXiv": "2209.08199", "DBLP": "journals/corr/abs-2209-08199", "DOI": "10.48550/arXiv.2209.08199", "CorpusId": 252367252 }, "url": "https://www.semanticscholar.org/paper/fe351bc1bda19583a7d6b1a96a9b18f59c40b213", "referenceCount": 62, "citationCount": 13, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "META-GUI: Towards Multi-modal Conversational Agents on Mobile GUI", "abstract": "Task-oriented dialogue (TOD) systems have been widely used by mobile phone intelligent assistants to accomplish tasks such as calendar scheduling or hotel reservation. Current TOD systems usually focus on multi-turn text/speech interaction, then they would call back-end APIs designed for TODs to perform the task. However, this API-based architecture greatly limits the information-searching capability of intelligent assistants and may even lead to task failure if TOD-specific APIs are not available or the task is too complicated to be executed by the provided APIs. In this paper, we propose a new TOD architecture: GUI-based task-oriented dialogue system (GUI-TOD). A GUI-TOD system can directly perform GUI operations on real APPs and execute tasks without invoking TOD-specific backend APIs. Furthermore, we release META-GUI, a dataset for training a Multi-modal convErsaTional Agent on mobile GUI. We also propose a multi-model action prediction and response model, which show promising results on META-GUI. The dataset, codes and leaderboard are publicly available.", "year": 2022, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Liangtai Sun", "Xingyu Chen", "Lu Chen", "Tianle Dai", "Zichen Zhu", "Kai Yu" ], "externalIds": { "ACL": "2022.emnlp-main.449", "ArXiv": "2205.11029", "DBLP": "conf/emnlp/SunCCDZY22", "DOI": "10.48550/arXiv.2205.11029", "CorpusId": 248986378 }, "url": "https://www.semanticscholar.org/paper/780d4919f861ca68825d264bccf47411772e17c0", "referenceCount": 42, "citationCount": 34, "influentialCitationCount": 8, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "VUT: Versatile UI Transformer for Multi-Modal Multi-Task User Interface Modeling", "abstract": "User interface modeling is inherently multimodal, which involves several distinct types of data: images, structures and language. The tasks are also diverse, including object detection, language generation and grounding. In this paper, we present VUT, a Versatile UI Transformer that takes multimodal input and simultaneously accomplishes 5 distinct tasks with the same model. Our model consists of a multimodal Transformer encoder that jointly encodes UI images and structures, and performs UI object detection when the UI structures are absent in the input. Our model also consists of an auto-regressive Transformer model that encodes the language input and decodes output, for both question-answering and command grounding with respect to the UI. Our experiments show that for most of the tasks, when trained jointly for multi-tasks, VUT substantially reduces the number of models and footprints needed for performing multiple tasks, while achieving accuracy exceeding or on par with baseline models trained for each individual task.", "year": 2021, "venue": "arXiv.org", "authors": [ "Yang Li", "Gang Li", "Xin Zhou", "Mostafa Dehghani", "A. Gritsenko" ], "externalIds": { "ArXiv": "2112.05692", "DBLP": "journals/corr/abs-2112-05692", "CorpusId": 245117509 }, "url": "https://www.semanticscholar.org/paper/0edfd6f14f533d65d125d70207fd01fbcd429ef7", "referenceCount": 28, "citationCount": 26, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Tip-Adapter: Training-free CLIP-Adapter for Better Vision-Language Modeling", "abstract": "Contrastive Vision-Language Pre-training, known as CLIP, has provided a new paradigm for learning visual representations by using large-scale contrastive image-text pairs. It shows impressive performance on zero-shot knowledge transfer to downstream tasks. To further enhance CLIP's few-shot capability, CLIP-Adapter proposed to fine-tune a lightweight residual feature adapter and significantly improves the performance for few-shot classification. However, such a process still needs extra training and computational resources. In this paper, we propose \\textbf{T}raining-Free CL\\textbf{IP}-\\textbf{Adapter} (\\textbf{Tip-Adapter}), which not only inherits CLIP's training-free advantage but also performs comparably or even better than CLIP-Adapter. Tip-Adapter does not require any back propagation for training the adapter, but creates the weights by a key-value cache model constructed from the few-shot training set. In this non-parametric manner, Tip-Adapter acquires well-performed adapter weights without any training, which is both efficient and effective. Moreover, the performance of Tip-Adapter can be further boosted by fine-tuning such properly initialized adapter for only a few epochs with super-fast convergence speed. We conduct extensive experiments of few-shot classification on ImageNet and other 10 datasets to demonstrate the superiority of proposed Tip-Adapter. The code will be released at \\url{https://github.com/gaopengcuhk/Tip-Adapter}.", "year": 2021, "venue": "arXiv.org", "authors": [ "Renrui Zhang", "Rongyao Fang", "Wei Zhang", "Peng Gao", "Kunchang Li", "Jifeng Dai", "Y. Qiao", "Hongsheng Li" ], "externalIds": { "ArXiv": "2111.03930", "DBLP": "journals/corr/abs-2111-03930", "CorpusId": 243847522 }, "url": "https://www.semanticscholar.org/paper/e70b7eb3b22f0a49eb5e645be646d5f35d1e693a", "referenceCount": 69, "citationCount": 297, "influentialCitationCount": 37, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "UIBert: Learning Generic Multimodal Representations for UI Understanding", "abstract": "To improve the accessibility of smart devices and to simplify their usage, building models which understand user interfaces (UIs) and assist users to complete their tasks is critical. However, unique challenges are proposed by UI-specific characteristics, such as how to effectively leverage multimodal UI features that involve image, text, and structural metadata and how to achieve good performance when high-quality labeled data is unavailable. To address such challenges we introduce UIBert, a transformer-based joint image-text model trained through novel pre-training tasks on large-scale unlabeled UI data to learn generic feature representations for a UI and its components. Our key intuition is that the heterogeneous features in a UI are self-aligned, i.e., the image and text features of UI components, are predictive of each other. We propose five pretraining tasks utilizing this self-alignment among different features of a UI component and across various components in the same UI. We evaluate our method on nine real-world downstream UI tasks where UIBert outperforms strong multimodal baselines by up to 9.26% accuracy.", "year": 2021, "venue": "International Joint Conference on Artificial Intelligence", "authors": [ "Chongyang Bai", "Xiaoxue Zang", "Ying Xu", "Srinivas Sunkara", "Abhinav Rastogi", "Jindong Chen", "B. A. Y. Arcas" ], "externalIds": { "DBLP": "journals/corr/abs-2107-13731", "ArXiv": "2107.13731", "DOI": "10.24963/ijcai.2021/235", "CorpusId": 236493482 }, "url": "https://www.semanticscholar.org/paper/39714122e7a903017456d7026dd91b872df9913b", "referenceCount": 28, "citationCount": 60, "influentialCitationCount": 8, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Mobile App Tasks with Iterative Feedback (MoTIF): Addressing Task Feasibility in Interactive Visual Environments", "abstract": "In recent years, vision-language research has shifted to study tasks which require more complex reasoning, such as interactive question answering, visual common sense reasoning, and question-answer plausibility prediction. However, the datasets used for these problems fail to capture the complexity of real inputs and multimodal environments, such as ambiguous natural language requests and diverse digital domains. We introduce Mobile app Tasks with Iterative Feedback (MoTIF), a dataset with natural language commands for the greatest number of interactive environments to date. MoTIF is the first to contain natural language requests for interactive environments that are not satisfiable, and we obtain follow-up questions on this subset to enable research on task uncertainty resolution. We perform initial feasibility classification experiments and only reach an F1 score of 37.3, verifying the need for richer vision-language representations and improved architectures to reason about task feasibility.", "year": 2021, "venue": "arXiv.org", "authors": [ "Andrea Burns", "Deniz Arsan", "Sanjna Agrawal", "Ranjitha Kumar", "Kate Saenko", "Bryan A. Plummer" ], "externalIds": { "ArXiv": "2104.08560", "DBLP": "journals/corr/abs-2104-08560", "CorpusId": 233296801 }, "url": "https://www.semanticscholar.org/paper/4234bc0e679c52421f52c810c5dbbd222954a074", "referenceCount": 18, "citationCount": 15, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Boundary IoU: Improving Object-Centric Image Segmentation Evaluation", "abstract": "We present Boundary IoU (Intersection-over-Union), a new segmentation evaluation measure focused on boundary quality. We perform an extensive analysis across different error types and object sizes and show that Boundary IoU is significantly more sensitive than the standard Mask IoU measure to boundary errors for large objects and does not over-penalize errors on smaller objects. The new quality measure displays several desirable characteristics like symmetry w.r.t. prediction/ground truth pairs and balanced responsiveness across scales, which makes it more suitable for segmentation evaluation than other boundary-focused measures like Trimap IoU and F-measure. Based on Boundary IoU, we update the standard evaluation protocols for instance and panoptic segmentation tasks by proposing the Boundary AP (Average Precision) and Boundary PQ (Panoptic Quality) metrics, respectively. Our experiments show that the new evaluation metrics track boundary quality improvements that are generally overlooked by current Mask IoU-based evaluation metrics. We hope that the adoption of the new boundary-sensitive evaluation metrics will lead to rapid progress in segmentation methods that improve boundary quality. 1", "year": 2021, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Bowen Cheng", "Ross B. Girshick", "Piotr Doll'ar", "A. Berg", "A. Kirillov" ], "externalIds": { "DBLP": "journals/corr/abs-2103-16562", "ArXiv": "2103.16562", "DOI": "10.1109/CVPR46437.2021.01508", "CorpusId": 232417671 }, "url": "https://www.semanticscholar.org/paper/363c260b6044bd35b0c200a4481228bbc6eb49a7", "referenceCount": 41, "citationCount": 220, "influentialCitationCount": 29, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Screen Recognition: Creating Accessibility Metadata for Mobile Applications from Pixels", "abstract": "Many accessibility features available on mobile platforms require applications (apps) to provide complete and accurate metadata describing user interface (UI) components. Unfortunately, many apps do not provide sufficient metadata for accessibility features to work as expected. In this paper, we explore inferring accessibility metadata for mobile apps from their pixels, as the visual interfaces often best reflect an app’s full functionality. We trained a robust, fast, memory-efficient, on-device model to detect UI elements using a dataset of 77,637 screens (from 4,068 iPhone apps) that we collected and annotated. To further improve UI detections and add semantic information, we introduced heuristics (e.g., UI grouping and ordering) and additional models (e.g., recognize UI content, state, interactivity). We built Screen Recognition to generate accessibility metadata to augment iOS VoiceOver. In a study with 9 screen reader users, we validated that our approach improves the accessibility of existing mobile apps, enabling even previously inaccessible apps to be used.", "year": 2021, "venue": "International Conference on Human Factors in Computing Systems", "authors": [ "Xiaoyi Zhang", "Lilian de Greef", "Amanda Swearngin", "Samuel White", "Kyle I. Murray", "Lisa Yu", "Qi Shan", "Jeffrey Nichols", "Jason Wu", "Chris Fleizach", "Aaron Everitt", "Jeffrey P. Bigham" ], "externalIds": { "DBLP": "journals/corr/abs-2101-04893", "ArXiv": "2101.04893", "DOI": "10.1145/3411764.3445186", "CorpusId": 231592643 }, "url": "https://www.semanticscholar.org/paper/d7ac291770832162b3e7a68b0246092a4115d091", "referenceCount": 80, "citationCount": 130, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale", "abstract": "While the Transformer architecture has become the de-facto standard for natural language processing tasks, its applications to computer vision remain limited. In vision, attention is either applied in conjunction with convolutional networks, or used to replace certain components of convolutional networks while keeping their overall structure in place. We show that this reliance on CNNs is not necessary and a pure transformer applied directly to sequences of image patches can perform very well on image classification tasks. When pre-trained on large amounts of data and transferred to multiple mid-sized or small image recognition benchmarks (ImageNet, CIFAR-100, VTAB, etc.), Vision Transformer (ViT) attains excellent results compared to state-of-the-art convolutional networks while requiring substantially fewer computational resources to train.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Alexey Dosovitskiy", "Lucas Beyer", "Alexander Kolesnikov", "Dirk Weissenborn", "Xiaohua Zhai", "Thomas Unterthiner", "Mostafa Dehghani", "Matthias Minderer", "G. Heigold", "S. Gelly", "Jakob Uszkoreit", "N. Houlsby" ], "externalIds": { "MAG": "3094502228", "ArXiv": "2010.11929", "DBLP": "conf/iclr/DosovitskiyB0WZ21", "CorpusId": 225039882 }, "url": "https://www.semanticscholar.org/paper/268d347e8a55b5eb82fb5e7d2f800e33c75ab18a", "referenceCount": 65, "citationCount": 28233, "influentialCitationCount": 4121, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Mapping Natural Language Instructions to Mobile UI Action Sequences", "abstract": "We present a new problem: grounding natural language instructions to mobile user interface actions, and create three new datasets for it. For full task evaluation, we create PixelHelp, a corpus that pairs English instructions with actions performed by people on a mobile UI emulator. To scale training, we decouple the language and action data by (a) annotating action phrase spans in How-To instructions and (b) synthesizing grounded descriptions of actions for mobile user interfaces. We use a Transformer to extract action phrase tuples from long-range natural language instructions. A grounding Transformer then contextually represents UI objects using both their content and screen position and connects them to object descriptions. Given a starting screen and instruction, our model achieves 70.59% accuracy on predicting complete ground-truth action sequences in PixelHelp.", "year": 2020, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Yang Li", "Jiacong He", "Xiaoxia Zhou", "Yuan Zhang", "Jason Baldridge" ], "externalIds": { "MAG": "3022955902", "DBLP": "journals/corr/abs-2005-03776", "ArXiv": "2005.03776", "ACL": "2020.acl-main.729", "DOI": "10.18653/v1/2020.acl-main.729", "CorpusId": 218571167 }, "url": "https://www.semanticscholar.org/paper/d9a8dfe020ee8c1afc94fca6f934876f85ad9c85", "referenceCount": 32, "citationCount": 146, "influentialCitationCount": 27, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Objects365: A Large-Scale, High-Quality Dataset for Object Detection", "abstract": "In this paper, we introduce a new large-scale object detection dataset, Objects365, which has 365 object categories over 600K training images. More than 10 million, high-quality bounding boxes are manually labeled through a three-step, carefully designed annotation pipeline. It is the largest object detection dataset (with full annotation) so far and establishes a more challenging benchmark for the community. Objects365 can serve as a better feature learning dataset for localization-sensitive tasks like object detection and semantic segmentation. The Objects365 pre-trained models significantly outperform ImageNet pre-trained models with 5.6 points gain (42 vs 36.4) based on the standard setting of 90K iterations on COCO benchmark. Even compared with much long training time like 540K iterations, our Objects365 pretrained model with 90K iterations still have 2.7 points gain (42 vs 39.3). Meanwhile, the finetuning time can be greatly reduced (up to 10 times) when reaching the same accuracy. Better generalization ability of Object365 has also been verified on CityPersons, VOC segmentation, and ADE tasks. The dataset as well as the pretrained-models have been released at www.objects365.org.", "year": 2019, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Shuai Shao", "Zeming Li", "Tianyuan Zhang", "Chao Peng", "Gang Yu", "Xiangyu Zhang", "Jing Li", "Jian Sun" ], "externalIds": { "MAG": "2983943451", "DBLP": "conf/iccv/0005LZPYZLS19", "DOI": "10.1109/ICCV.2019.00852", "CorpusId": 207967883 }, "url": "https://www.semanticscholar.org/paper/c5ff974a69fd0c760b4855b819e61e89f31cfffe", "referenceCount": 40, "citationCount": 574, "influentialCitationCount": 92, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Rico: A Mobile App Dataset for Building Data-Driven Design Applications", "abstract": "Data-driven models help mobile app designers understand best practices and trends, and can be used to make predictions about design performance and support the creation of adaptive UIs. This paper presents Rico, the largest repository of mobile app designs to date, created to support five classes of data-driven applications: design search, UI layout generation, UI code generation, user interaction modeling, and user perception prediction. To create Rico, we built a system that combines crowdsourcing and automation to scalably mine design and interaction data from Android apps at runtime. The Rico dataset contains design data from more than 9.7k Android apps spanning 27 categories. It exposes visual, textual, structural, and interactive design properties of more than 72k unique UI screens. To demonstrate the kinds of applications that Rico enables, we present results from training an autoencoder for UI layout similarity, which supports query- by-example search over UIs.", "year": 2017, "venue": "ACM Symposium on User Interface Software and Technology", "authors": [ "Biplab Deka", "Zifeng Huang", "Chad Franzen", "Joshua Hibschman", "Daniel Afergan", "Y. Li", "Jeffrey Nichols", "Ranjitha Kumar" ], "externalIds": { "MAG": "2765874585", "DBLP": "conf/uist/DekaHFHALNK17", "DOI": "10.1145/3126594.3126651", "CorpusId": 6623010 }, "url": "https://www.semanticscholar.org/paper/775f7845e4df2576762960943294bd28733e2046", "referenceCount": 40, "citationCount": 452, "influentialCitationCount": 81, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "COCO-Text: Dataset and Benchmark for Text Detection and Recognition in Natural Images", "abstract": "This paper describes the COCO-Text dataset. In recent years large-scale datasets like SUN and Imagenet drove the advancement of scene understanding and object recognition. The goal of COCO-Text is to advance state-of-the-art in text detection and recognition in natural images. The dataset is based on the MS COCO dataset, which contains images of complex everyday scenes. The images were not collected with text in mind and thus contain a broad variety of text instances. To reflect the diversity of text in natural scenes, we annotate text with (a) location in terms of a bounding box, (b) fine-grained classification into machine printed text and handwritten text, (c) classification into legible and illegible text, (d) script of the text and (e) transcriptions of legible text. The dataset contains over 173k text annotations in over 63k images. We provide a statistical analysis of the accuracy of our annotations. In addition, we present an analysis of three leading state-of-the-art photo Optical Character Recognition (OCR) approaches on our dataset. While scene text detection and recognition enjoys strong advances in recent years, we identify significant shortcomings motivating future work.", "year": 2016, "venue": "arXiv.org", "authors": [ "Andreas Veit", "Tomas Matera", "Lukás Neumann", "Jiri Matas", "Serge J. Belongie" ], "externalIds": { "MAG": "2253806798", "ArXiv": "1601.07140", "DBLP": "journals/corr/VeitMNMB16", "CorpusId": 2838551 }, "url": "https://www.semanticscholar.org/paper/b7325b788320f96f7b152768226f16e390ab6475", "referenceCount": 22, "citationCount": 459, "influentialCitationCount": 63, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The Probabilistic Relevance Framework: BM25 and Beyond", "abstract": "The Probabilistic Relevance Framework (PRF) is a formal framework for document retrieval, grounded in work done in the 1970—1980s, which led to the development of one of the most successful text-retrieval algorithms, BM25. In recent years, research in the PRF has yielded new retrieval models capable of taking into account document meta-data (especially structure and link-graph information). Again, this has led to one of the most successful Web-search and corporate-search algorithms, BM25F. This work presents the PRF from a conceptual point of view, describing the probabilistic modelling assumptions behind the framework and the different ranking algorithms that result from its application: the binary independence model, relevance feedback models, BM25 and BM25F. It also discusses the relation between the PRF and other statistical models for IR, and covers some related topics, such as the use of non-textual features, and parameter optimisation for models with free parameters.", "year": 2009, "venue": "Foundations and Trends in Information Retrieval", "authors": [ "S. Robertson", "H. Zaragoza" ], "externalIds": { "MAG": "2155482025", "DBLP": "journals/ftir/RobertsonZ09", "DOI": "10.1561/1500000019", "CorpusId": 207178704 }, "url": "https://www.semanticscholar.org/paper/47ced790a563344efae66588b5fb7fe6cca29ed3", "referenceCount": 60, "citationCount": 2946, "influentialCitationCount": 557, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Empowering LLM to use Smartphone for Intelligent Task Automation", "abstract": "Mobile task automation is an attractive technique that aims to enable voice-based hands-free user interaction with smart-phones. However, existing approaches suffer from poor scalability due to the limited language understanding ability and the non-trivial manual efforts required from developers or end-users. The recent advance of large language models (LLMs) in language understanding and reasoning inspires us to re-think the problem from a model-centric perspective, where task preparation, comprehension, and execution are handled by a unified language model. In this work, we introduce Auto-Droid, a mobile task automation system that can handle arbitrary tasks on any Android application without manual efforts. The key insight is to combine the commonsense knowledge of LLMs and domain-specific knowledge of apps through automated dynamic analysis. The main components include a functionality-aware UI representation method that bridges the UI with the LLM, exploration-based memory injection techniques that augment the app-specific domain knowledge of LLM, and a multi-granularity query optimization module that reduces the cost of model inference. We integrate AutoDroid with off-the-shelf LLMs including online GPT-4/GPT-3.5 and on-device Vicuna, and evaluate its performance on a new benchmark for memory-augmented Android task automation with 158 common tasks. The results demonstrated that Auto-Droid is able to precisely generate actions with an accuracy of 90.9%, and complete tasks with a success rate of 71.3%, outperforming the GPT-4-powered baselines by 36.4% and 39.7%. The demo, benchmark suites, and source code of AutoDroid will be released at https://autodroid-sys.github.io/.", "year": 2023, "venue": "arXiv.org", "authors": [ "Hao Wen", "Yuanchun Li", "Guohong Liu", "Shanhui Zhao", "Tao Yu", "Toby Jia-Jun Li", "Shiqi Jiang", "Yunhao Liu", "Yaqin Zhang", "Yunxin Liu" ], "externalIds": { "DBLP": "journals/corr/abs-2308-15272", "DOI": "10.48550/arXiv.2308.15272", "CorpusId": 268890279 }, "url": "https://www.semanticscholar.org/paper/69dd4df178437efad68a675cf8742ed09e7d53c8", "referenceCount": 70, "citationCount": 36, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Qwen-VL: A Frontier Large Vision-Language Model with Versatile Abilities", "abstract": "We introduce the Qwen-VL series, a set of large-scale vision-language models designed to perceive and understand both text and images. Comprising Qwen-VL and Qwen-VL-Chat, these models exhibit remarkable performance in tasks like image captioning, question answering, visual localization, and flexible interaction. The evaluation covers a wide range of tasks including zero-shot captioning, visual or document visual question answering, and grounding. We demonstrate the Qwen-VL outperforms existing Large Vision Language Models (LVLMs). We present their architecture, training, capabilities, and performance, highlighting their contributions to advancing multimodal artificial intelligence. Code, demo and models are available at https://github.com/QwenLM/Qwen-VL .", "year": 2023, "venue": "arXiv.org", "authors": [ "Jinze Bai", "Shuai Bai", "Shusheng Yang", "Shijie Wang", "Sinan Tan", "Peng Wang", "Junyang Lin", "Chang Zhou", "Jingren Zhou" ], "externalIds": { "DBLP": "journals/corr/abs-2308-12966", "DOI": "10.48550/arXiv.2308.12966", "CorpusId": 263875678 }, "url": "https://www.semanticscholar.org/paper/5ddb51ae85deca14dc7fc8adc07305c22a1ebe0a", "referenceCount": 80, "citationCount": 570, "influentialCitationCount": 133, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "2022. Coyo-700m", "abstract": null, "year": null, "venue": "Coyo-700m", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "instructions via graphical user interfaces", "abstract": null, "year": null, "venue": "arXiv preprint", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2023. Cogvlm: Visual expert for pretrained language models", "abstract": null, "year": null, "venue": "arXiv preprint", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2023. Gpt-4 technical report", "abstract": null, "year": null, "venue": "OpenAI", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Unified image classification, object detection, and visual relationship detection at scale", "abstract": null, "year": null, "venue": "International journal of computer vision", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "VLM's Eye Examination: Instruct and Inspect Visual Competency of Vision Language Models": { "paper_title": "VLM's Eye Examination: Instruct and Inspect Visual Competency of Vision Language Models", "arxiv_id": "2409.14759v1", "keyword": "vision language model", "authors": [ "Nam Hyeon-Woo", "Moon Ye-Bin", "Wonseok Choi", "Lee Hyun", "Tae-Hyun Oh" ], "references": [ { "title": "Improved Baselines with Visual Instruction Tuning", "abstract": "Large multimodal models (LMM) have recently shown encouraging progress with visual instruction tuning. In this paper, we present the first systematic study to investigate the design choices of LMMs in a controlled setting under the LLaVA framework. We show that the fully-connected vision-language connector in LLaVA is surprisingly power-ful and data-efficient. With simple modifications to LLa VA, namely, using CLIP- ViT-L-336px with an MLP projection and adding academic-task-oriented VQA data with response formatting prompts, we establish stronger baselines that achieve state-of-the-art across 11 benchmarks. Our final 13B checkpoint uses merely 1.2M publicly available data, and finishes full training in ~ 1 day on a single 8-AI00 node. Furthermore, we present some early exploration of open problems in LMMs, including scaling to higher resolution inputs, compositional capabilities, and model hallucination, etc. We hope this makes state-of-the-art LMM research more accessible. Code and model will be publicly available.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Haotian Liu", "Chunyuan Li", "Yuheng Li", "Yong Jae Lee" ], "externalIds": { "DBLP": "journals/corr/abs-2310-03744", "ArXiv": "2310.03744", "DOI": "10.1109/CVPR52733.2024.02484", "CorpusId": 263672058 }, "url": "https://www.semanticscholar.org/paper/124d4d374fbef2016fa9880489871a58a7450644", "referenceCount": 70, "citationCount": 1191, "influentialCitationCount": 284, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Physics of Language Models: Part 3.1, Knowledge Storage and Extraction", "abstract": "Large language models (LLMs) can store a vast amount of world knowledge, often extractable via question-answering (e.g.,\"What is Abraham Lincoln's birthday?\"). However, do they answer such questions based on exposure to similar questions during training (i.e., cheating), or by genuinely learning to extract knowledge from sources like Wikipedia? In this paper, we investigate this issue using a controlled biography dataset. We find a strong correlation between the model's ability to extract knowledge and various diversity measures of the training data. $\\textbf{Essentially}$, for knowledge to be reliably extracted, it must be sufficiently augmented (e.g., through paraphrasing, sentence shuffling, translations) $\\textit{during pretraining}$. Without such augmentation, knowledge may be memorized but not extractable, leading to 0% accuracy, regardless of subsequent instruction fine-tuning. To understand why this occurs, we employ (nearly) linear probing to demonstrate a strong connection between the observed correlation and how the model internally encodes knowledge -- whether it is linearly encoded in the hidden embeddings of entity names or distributed across other token embeddings in the training text. This paper provides $\\textbf{several key recommendations for LLM pretraining in the industry}$: (1) rewrite the pretraining data -- using small, auxiliary models -- to provide knowledge augmentation, and (2) incorporate more instruction-finetuning data into the pretraining stage before it becomes too late.", "year": 2023, "venue": "International Conference on Machine Learning", "authors": [ "Zeyuan Allen-Zhu", "Yuanzhi Li" ], "externalIds": { "ArXiv": "2309.14316", "DBLP": "journals/corr/abs-2309-14316", "DOI": "10.48550/arXiv.2309.14316", "CorpusId": 262825178 }, "url": "https://www.semanticscholar.org/paper/f29f8b8aa2b7e608199b65d3cf751969d4024132", "referenceCount": 43, "citationCount": 45, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Physics of Language Models: Part 3.2, Knowledge Manipulation", "abstract": "Language models can store vast factual knowledge, yet their ability to flexibly use this knowledge for downstream tasks (e.g., via instruction finetuning) remains questionable. This paper investigates four fundamental knowledge manipulation tasks: retrieval (e.g.,\"What is person A's attribute X?\"), classification (e.g.,\"Is A's attribute X even or odd?\"), comparison (e.g.,\"Is A greater than B in attribute X?\"), and inverse search (e.g.,\"Which person's attribute X equals T?\"). We show that language models excel in knowledge retrieval but struggle even in the simplest classification or comparison tasks unless Chain of Thoughts (CoTs) are employed during both training and inference. Moreover, their performance in inverse knowledge search is virtually 0%, regardless of the prompts. Our primary contribution is a controlled, synthetic experiment that confirms these weaknesses are inherent to language models: they cannot efficiently manipulate knowledge from pre-training data, even when such knowledge is perfectly stored in the models, despite adequate training and sufficient model size. Our findings also apply to modern pretrained language models such as GPT-4, thus giving rise to many Turing tests to distinguish Humans from contemporary AIs.", "year": 2023, "venue": "arXiv.org", "authors": [ "Zeyuan Allen-Zhu", "Yuanzhi Li" ], "externalIds": { "ArXiv": "2309.14402", "DBLP": "journals/corr/abs-2309-14402", "DOI": "10.48550/arXiv.2309.14402", "CorpusId": 262898066 }, "url": "https://www.semanticscholar.org/paper/47daf5f81470564f94adcac672405c2cd39dd186", "referenceCount": 37, "citationCount": 46, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Analyzing Chain-of-Thought Prompting in Large Language Models via Gradient-based Feature Attributions", "abstract": "Chain-of-thought (CoT) prompting has been shown to empirically improve the accuracy of large language models (LLMs) on various question answering tasks. While understanding why CoT prompting is effective is crucial to ensuring that this phenomenon is a consequence of desired model behavior, little work has addressed this; nonetheless, such an understanding is a critical prerequisite for responsible model deployment. We address this question by leveraging gradient-based feature attribution methods which produce saliency scores that capture the influence of input tokens on model output. Specifically, we probe several open-source LLMs to investigate whether CoT prompting affects the relative importances they assign to particular input tokens. Our results indicate that while CoT prompting does not increase the magnitude of saliency scores attributed to semantically relevant tokens in the prompt compared to standard few-shot prompting, it increases the robustness of saliency scores to question perturbations and variations in model output.", "year": 2023, "venue": "arXiv.org", "authors": [ "Skyler Wu", "Eric Meng Shen", "Charumathi Badrinath", "Jiaqi Ma", "Himabindu Lakkaraju" ], "externalIds": { "DBLP": "journals/corr/abs-2307-13339", "ArXiv": "2307.13339", "DOI": "10.48550/arXiv.2307.13339", "CorpusId": 260155139 }, "url": "https://www.semanticscholar.org/paper/71d68782c3da41b77866c2fd0cb65726f60b3af1", "referenceCount": 94, "citationCount": 17, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Shikra: Unleashing Multimodal LLM's Referential Dialogue Magic", "abstract": "In human conversations, individuals can indicate relevant regions within a scene while addressing others. In turn, the other person can then respond by referring to specific regions if necessary. This natural referential ability in dialogue remains absent in current Multimodal Large Language Models (MLLMs). To fill this gap, this paper proposes an MLLM called Shikra, which can handle spatial coordinate inputs and outputs in natural language. Its architecture consists of a vision encoder, an alignment layer, and a LLM. It is designed to be straightforward and simple, without the need for extra vocabularies, position encoder, pre-/post-detection modules, or external plug-in models. All inputs and outputs are in natural language form. Referential dialogue is a superset of various vision-language (VL) tasks. Shikra can naturally handle location-related tasks like REC and PointQA, as well as conventional VL tasks such as Image Captioning and VQA. Experimental results showcase Shikra's promising performance. Furthermore, it enables numerous exciting applications, like providing mentioned objects' coordinates in chains of thoughts and comparing user-pointed regions similarities. Our code, model and dataset are accessed at https://github.com/shikras/shikra.", "year": 2023, "venue": "arXiv.org", "authors": [ "Ke Chen", "Zhao Zhang", "Weili Zeng", "Richong Zhang", "Feng Zhu", "Rui Zhao" ], "externalIds": { "ArXiv": "2306.15195", "DBLP": "journals/corr/abs-2306-15195", "DOI": "10.48550/arXiv.2306.15195", "CorpusId": 259262082 }, "url": "https://www.semanticscholar.org/paper/e2a58fd18961c3941102989e3a3d0d27c615e015", "referenceCount": 57, "citationCount": 383, "influentialCitationCount": 71, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Playing repeated games with Large Language Models", "abstract": "Large Language Models (LLMs) are transforming society and permeating into diverse applications. As a result, LLMs will frequently interact with us and other agents. It is, therefore, of great societal value to understand how LLMs behave in interactive social settings. Here, we propose to use behavioral game theory to study LLM's cooperation and coordination behavior. To do so, we let different LLMs (GPT-3, GPT-3.5, and GPT-4) play finitely repeated games with each other and with other, human-like strategies. Our results show that LLMs generally perform well in such tasks and also uncover persistent behavioral signatures. In a large set of two players-two strategies games, we find that LLMs are particularly good at games where valuing their own self-interest pays off, like the iterated Prisoner's Dilemma family. However, they behave sub-optimally in games that require coordination. We, therefore, further focus on two games from these distinct families. In the canonical iterated Prisoner's Dilemma, we find that GPT-4 acts particularly unforgivingly, always defecting after another agent has defected only once. In the Battle of the Sexes, we find that GPT-4 cannot match the behavior of the simple convention to alternate between options. We verify that these behavioral signatures are stable across robustness checks. Finally, we show how GPT-4's behavior can be modified by providing further information about the other player as well as by asking it to predict the other player's actions before making a choice. These results enrich our understanding of LLM's social behavior and pave the way for a behavioral game theory for machines.", "year": 2023, "venue": "arXiv.org", "authors": [ "Elif Akata", "Lion Schulz", "Julian Coda-Forno", "Seong Joon Oh", "M. Bethge", "Eric Schulz" ], "externalIds": { "ArXiv": "2305.16867", "DBLP": "journals/corr/abs-2305-16867", "DOI": "10.48550/arXiv.2305.16867", "CorpusId": 258947115 }, "url": "https://www.semanticscholar.org/paper/3f98cf521222c65522200037c0eb95a17081b2dd", "referenceCount": 62, "citationCount": 82, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "InstructBLIP: Towards General-purpose Vision-Language Models with Instruction Tuning", "abstract": "Large-scale pre-training and instruction tuning have been successful at creating general-purpose language models with broad competence. However, building general-purpose vision-language models is challenging due to the rich input distributions and task diversity resulting from the additional visual input. Although vision-language pretraining has been widely studied, vision-language instruction tuning remains under-explored. In this paper, we conduct a systematic and comprehensive study on vision-language instruction tuning based on the pretrained BLIP-2 models. We gather 26 publicly available datasets, covering a wide variety of tasks and capabilities, and transform them into instruction tuning format. Additionally, we introduce an instruction-aware Query Transformer, which extracts informative features tailored to the given instruction. Trained on 13 held-in datasets, InstructBLIP attains state-of-the-art zero-shot performance across all 13 held-out datasets, substantially outperforming BLIP-2 and larger Flamingo models. Our models also lead to state-of-the-art performance when finetuned on individual downstream tasks (e.g., 90.7% accuracy on ScienceQA questions with image contexts). Furthermore, we qualitatively demonstrate the advantages of InstructBLIP over concurrent multimodal models. All InstructBLIP models are open-sourced at https://github.com/salesforce/LAVIS/tree/main/projects/instructblip.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Wenliang Dai", "Junnan Li", "Dongxu Li", "A. M. H. Tiong", "Junqi Zhao", "Weisheng Wang", "Boyang Albert Li", "Pascale Fung", "Steven C. H. Hoi" ], "externalIds": { "ArXiv": "2305.06500", "DBLP": "journals/corr/abs-2305-06500", "DOI": "10.48550/arXiv.2305.06500", "CorpusId": 258615266 }, "url": "https://www.semanticscholar.org/paper/8bd6a2a89503be083176f2cc26fabedb79238cbd", "referenceCount": 52, "citationCount": 1182, "influentialCitationCount": 281, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "LLaMA-Adapter V2: Parameter-Efficient Visual Instruction Model", "abstract": "How to efficiently transform large language models (LLMs) into instruction followers is recently a popular research direction, while training LLM for multi-modal reasoning remains less explored. Although the recent LLaMA-Adapter demonstrates the potential to handle visual inputs with LLMs, it still cannot generalize well to open-ended visual instructions and lags behind GPT-4. In this paper, we present LLaMA-Adapter V2, a parameter-efficient visual instruction model. Specifically, we first augment LLaMA-Adapter by unlocking more learnable parameters (e.g., norm, bias and scale), which distribute the instruction-following ability across the entire LLaMA model besides adapters. Secondly, we propose an early fusion strategy to feed visual tokens only into the early LLM layers, contributing to better visual knowledge incorporation. Thirdly, a joint training paradigm of image-text pairs and instruction-following data is introduced by optimizing disjoint groups of learnable parameters. This strategy effectively alleviates the interference between the two tasks of image-text alignment and instruction following and achieves strong multi-modal reasoning with only a small-scale image-text and instruction dataset. During inference, we incorporate additional expert models (e.g. captioning/OCR systems) into LLaMA-Adapter to further enhance its image understanding capability without incurring training costs. Compared to the original LLaMA-Adapter, our LLaMA-Adapter V2 can perform open-ended multi-modal instructions by merely introducing 14M parameters over LLaMA. The newly designed framework also exhibits stronger language-only instruction-following capabilities and even excels in chat interactions. Our code and models are available at https://github.com/ZrrSkywalker/LLaMA-Adapter.", "year": 2023, "venue": "arXiv.org", "authors": [ "Peng Gao", "Jiaming Han", "Renrui Zhang", "Ziyi Lin", "Shijie Geng", "Aojun Zhou", "W. Zhang", "Pan Lu", "Conghui He", "Xiangyu Yue", "Hongsheng Li", "Y. Qiao" ], "externalIds": { "ArXiv": "2304.15010", "DBLP": "journals/corr/abs-2304-15010", "DOI": "10.48550/arXiv.2304.15010", "CorpusId": 258418343 }, "url": "https://www.semanticscholar.org/paper/570079bbdd8758dfe865097e05719313c9c1301a", "referenceCount": 79, "citationCount": 429, "influentialCitationCount": 53, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Visual Instruction Tuning", "abstract": "Instruction tuning large language models (LLMs) using machine-generated instruction-following data has improved zero-shot capabilities on new tasks, but the idea is less explored in the multimodal field. In this paper, we present the first attempt to use language-only GPT-4 to generate multimodal language-image instruction-following data. By instruction tuning on such generated data, we introduce LLaVA: Large Language and Vision Assistant, an end-to-end trained large multimodal model that connects a vision encoder and LLM for general-purpose visual and language understanding.Our early experiments show that LLaVA demonstrates impressive multimodel chat abilities, sometimes exhibiting the behaviors of multimodal GPT-4 on unseen images/instructions, and yields a 85.1% relative score compared with GPT-4 on a synthetic multimodal instruction-following dataset. When fine-tuned on Science QA, the synergy of LLaVA and GPT-4 achieves a new state-of-the-art accuracy of 92.53%. We make GPT-4 generated visual instruction tuning data, our model and code base publicly available.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Haotian Liu", "Chunyuan Li", "Qingyang Wu", "Yong Jae Lee" ], "externalIds": { "DBLP": "journals/corr/abs-2304-08485", "ArXiv": "2304.08485", "DOI": "10.48550/arXiv.2304.08485", "CorpusId": 258179774 }, "url": "https://www.semanticscholar.org/paper/a5036f31f0e629dc661f120b8c3b1f374d479ab8", "referenceCount": 63, "citationCount": 2100, "influentialCitationCount": 564, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Why think step-by-step? Reasoning emerges from the locality of experience", "abstract": "Humans have a powerful and mysterious capacity to reason. By working through a series of purely mental steps, we can make inferences we would not be capable of making directly -- despite the fact that we get no additional data from the world. Similarly, when large language models generate a series of intermediate steps (a chain of thought) before answering a question, they often produce better answers than they otherwise would. We investigate why and how chain-of-thought reasoning is useful in language models, testing the hypothesis that reasoning is effective when training data consists of local clusters of variables that influence each other strongly. These training conditions enable the chaining of accurate local inferences in order to estimate relationships between variables that were not seen together in training. We prove that there will exist a\"reasoning gap\", where reasoning through intermediate variables improves inference, for the simple case of an autoregressive density estimator trained on local samples from a chain-structured probabilistic model. We then test our hypothesis empirically in more complex models, training an autoregressive language model on samples from Bayes nets but only including a subset of variables in each sample. We test language models' ability to match conditional probabilities with and without intermediate reasoning steps, finding that intermediate steps are only helpful when the training data is locally structured with respect to dependencies between variables and that the combination of locally-structured observations and reasoning is much more data-efficient than training on all variables. Our results illustrate how the effectiveness of reasoning step by step is rooted in the local statistical structure of the training data.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Ben Prystawski", "Michael Y. Li", "Noah D. Goodman" ], "externalIds": { "DBLP": "conf/nips/PrystawskiLG23", "ArXiv": "2304.03843", "CorpusId": 258048648 }, "url": "https://www.semanticscholar.org/paper/9a3edb5c6b0e8c84c94ea99a9ab647b1209f650f", "referenceCount": 28, "citationCount": 50, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "LLaMA-Adapter: Efficient Fine-tuning of Language Models with Zero-init Attention", "abstract": "We present LLaMA-Adapter, a lightweight adaption method to efficiently fine-tune LLaMA into an instruction-following model. Using 52K self-instruct demonstrations, LLaMA-Adapter only introduces 1.2M learnable parameters upon the frozen LLaMA 7B model, and costs less than one hour for fine-tuning on 8 A100 GPUs. Specifically, we adopt a set of learnable adaption prompts, and prepend them to the word tokens at higher transformer layers. Then, a zero-initialized attention mechanism with zero gating is proposed, which adaptively injects the new instructional cues into LLaMA, while effectively preserves its pre-trained knowledge. With our efficient training, LLaMA-Adapter can generate high-quality responses, comparable to Alpaca with fully fine-tuned 7B parameters. Besides language commands, our approach can be simply extended to multi-modal instructions for learning image-conditioned LLaMA model, which achieves superior reasoning performance on ScienceQA and COCO Caption benchmarks. Furthermore, we also evaluate the zero-initialized attention mechanism for fine-tuning other pre-trained models (ViT, RoBERTa) on traditional vision and language tasks, demonstrating the superior generalization capacity of our approach. Code is released at https://github.com/OpenGVLab/LLaMA-Adapter.", "year": 2023, "venue": "arXiv.org", "authors": [ "Renrui Zhang", "Jiaming Han", "Aojun Zhou", "Xiangfei Hu", "Shilin Yan", "Pan Lu", "Hongsheng Li", "Peng Gao", "Y. Qiao" ], "externalIds": { "ArXiv": "2303.16199", "DBLP": "journals/corr/abs-2303-16199", "DOI": "10.48550/arXiv.2303.16199", "CorpusId": 257771811 }, "url": "https://www.semanticscholar.org/paper/a757999ed260d7bc45484dc6b4456bf33fe6f679", "referenceCount": 137, "citationCount": 559, "influentialCitationCount": 53, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Is BERT Blind? Exploring the Effect of Vision-and-Language Pretraining on Visual Language Understanding", "abstract": "Most humans use visual imagination to understand and reason about language, but models such as BERT reason about language using knowledge acquired during text-only pretraining. In this work, we investigate whether vision-and-language pretraining can improve performance on text-only tasks that involve implicit visual reasoning, focusing primarily on zero-shot probing methods. We propose a suite of visual language understanding (VLU) tasks for probing the visual reasoning abilities of text encoder models, as well as various non-visual natural language understanding (NLU) tasks for comparison. We also contribute a novel zero-shot knowledge probing method, Stroop probing, for applying models such as CLIP to text-only tasks without needing a prediction head such as the masked language modelling head of models like BERT. We show that SOTA multimodally trained text encoders outperform unimodally trained text encoders on the VLU tasks while being under-performed by them on the NLU tasks, lending new context to previously mixed results regarding the NLU capabilities of multimodal models. We conclude that exposure to images during pretraining affords inherent visual reasoning knowledge that is reflected in language-only tasks that require implicit visual reasoning. Our findings bear importance in the broader context of multimodal learning, providing principled guidelines for the choice of text encoders used in such contexts11Our code will be made available at https://isbertblind.github.io/.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Morris Alper", "Michael Fiman", "Hadar Averbuch-Elor" ], "externalIds": { "DBLP": "conf/cvpr/AlperFA23", "ArXiv": "2303.12513", "DOI": "10.1109/CVPR52729.2023.00655", "CorpusId": 257663918 }, "url": "https://www.semanticscholar.org/paper/8d1187eebe9e1d5631f30629d5bf0c3988e6e3da", "referenceCount": 97, "citationCount": 15, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "LLaMA: Open and Efficient Foundation Language Models", "abstract": "We introduce LLaMA, a collection of foundation language models ranging from 7B to 65B parameters. We train our models on trillions of tokens, and show that it is possible to train state-of-the-art models using publicly available datasets exclusively, without resorting to proprietary and inaccessible datasets. In particular, LLaMA-13B outperforms GPT-3 (175B) on most benchmarks, and LLaMA-65B is competitive with the best models, Chinchilla-70B and PaLM-540B. We release all our models to the research community.", "year": 2023, "venue": "arXiv.org", "authors": [ "Hugo Touvron", "Thibaut Lavril", "Gautier Izacard", "Xavier Martinet", "Marie-Anne Lachaux", "Timothée Lacroix", "Baptiste Rozière", "Naman Goyal", "Eric Hambro", "Faisal Azhar", "Aurelien Rodriguez", "Armand Joulin", "Edouard Grave", "Guillaume Lample" ], "externalIds": { "DBLP": "journals/corr/abs-2302-13971", "ArXiv": "2302.13971", "CorpusId": 257219404 }, "url": "https://www.semanticscholar.org/paper/57e849d0de13ed5f91d086936296721d4ff75a75", "referenceCount": 80, "citationCount": 8031, "influentialCitationCount": 1073, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Reproducible Scaling Laws for Contrastive Language-Image Learning", "abstract": "Scaling up neural networks has led to remarkable performance across a wide range of tasks. Moreover, performance often follows reliable scaling laws as a function of training set size, model size, and compute, which offers valuable guidance as large-scale experiments are becoming increasingly expensive. However, previous work on scaling laws has primarily used private data & models or focused on uni-modal language or vision learning. To address these limitations, we investigate scaling laws for contrastive language-image pre-training (CLIP) with the public LAION dataset and the open-source OpenCLIP repository. Our large-scale experiments involve models trained on up to two billion image-text pairs and identify power law scaling for multiple downstream tasks including zero-shot classification, retrieval, linear probing, and end-to-end fine-tuning. We find that the training distribution plays a key role in scaling laws as the OpenAI and OpenCLIP models exhibit different scaling behavior despite identical model architectures and similar training recipes. We open-source our evaluation workflow and all models, including the largest public CLIP models, to ensure reproducibility and make scaling laws research more accessible. Source code and instructions to reproduce this study is available at https://github.eom/LAION-AI/sealing-laws-openelip.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Mehdi Cherti", "Romain Beaumont", "Ross Wightman", "Mitchell Wortsman", "Gabriel Ilharco", "Cade Gordon", "Christoph Schuhmann", "Ludwig Schmidt", "J. Jitsev" ], "externalIds": { "ArXiv": "2212.07143", "DBLP": "journals/corr/abs-2212-07143", "DOI": "10.1109/CVPR52729.2023.00276", "CorpusId": 254636568 }, "url": "https://www.semanticscholar.org/paper/16de2006e2960ba410772c6b6d460b83c0a5cc4b", "referenceCount": 90, "citationCount": 446, "influentialCitationCount": 61, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "EVA: Exploring the Limits of Masked Visual Representation Learning at Scale", "abstract": "We launch EVA, a vision-centric foundation model to Explore the limits of Visual representation at scAle using only publicly accessible data. EVA is a vanilla ViT pre-trained to reconstruct the masked out image-text aligned vision features conditioned on visible image patches. Via this pretext task, we can efficiently scale up EVA to one billion parameters, and sets new records on a broad range of representative vision downstream tasks, such as image recognition, video action recognition, object detection, instance segmentation and semantic segmentation without heavy supervised training. Moreover, we observe quantitative changes in scaling EVA result in qualitative changes in transfer learning performance that are not present in other models. For instance, EVA takes a great leap in the challenging large vocabulary instance segmentation task: our model achieves almost the same state-of-the-art performance on LVIS dataset with over a thousand categories and COCO dataset with only eighty categories. Beyond a pure vision encoder, EVA can also serve as a vision-centric, multi-modal pivot to connect images and text. We find initializing the vision tower of a giant CLIP from EVA can greatly stabilize the training and outperform the training from scratch counterpart with much fewer samples and less compute, providing a new direction for scaling up and accelerating the costly training of multi-modal foundation models.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Yuxin Fang", "Wen Wang", "Binhui Xie", "Quan-Sen Sun", "Ledell Yu Wu", "Xinggang Wang", "Tiejun Huang", "Xinlong Wang", "Yue Cao" ], "externalIds": { "DBLP": "conf/cvpr/FangWXSWW0WC23", "ArXiv": "2211.07636", "DOI": "10.1109/CVPR52729.2023.01855", "CorpusId": 253510587 }, "url": "https://www.semanticscholar.org/paper/78281482c1fdad8e167bab39cc9955c73d58ae8f", "referenceCount": 133, "citationCount": 471, "influentialCitationCount": 46, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Scaling Laws for Reward Model Overoptimization", "abstract": "In reinforcement learning from human feedback, it is common to optimize against a reward model trained to predict human preferences. Because the reward model is an imperfect proxy, optimizing its value too much can hinder ground truth performance, in accordance with Goodhart's law. This effect has been frequently observed, but not carefully measured due to the expense of collecting human preference data. In this work, we use a synthetic setup in which a fixed\"gold-standard\"reward model plays the role of humans, providing labels used to train a proxy reward model. We study how the gold reward model score changes as we optimize against the proxy reward model using either reinforcement learning or best-of-$n$ sampling. We find that this relationship follows a different functional form depending on the method of optimization, and that in both cases its coefficients scale smoothly with the number of reward model parameters. We also study the effect on this relationship of the size of the reward model dataset, the number of reward model and policy parameters, and the coefficient of the KL penalty added to the reward in the reinforcement learning setup. We explore the implications of these empirical results for theoretical considerations in AI alignment.", "year": 2022, "venue": "International Conference on Machine Learning", "authors": [ "Leo Gao", "John Schulman", "Jacob Hilton" ], "externalIds": { "DBLP": "conf/icml/GaoSH23", "ArXiv": "2210.10760", "CorpusId": 252992904 }, "url": "https://www.semanticscholar.org/paper/fb3dc5e20e0a71134ca916f0d6d8d41f01225b4b", "referenceCount": 59, "citationCount": 299, "influentialCitationCount": 39, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Visual Commonsense in Pretrained Unimodal and Multimodal Models", "abstract": "Our commonsense knowledge about objects includes their typical visual attributes; we know that bananas are typically yellow or green, and not purple. Text and image corpora, being subject to reporting bias, represent this world-knowledge to varying degrees of faithfulness. In this paper, we investigate to what degree unimodal (language-only) and multimodal (image and language) models capture a broad range of visually salient attributes. To that end, we create the Visual Commonsense Tests (ViComTe) dataset covering 5 property types (color, shape, material, size, and visual co-occurrence) for over 5000 subjects. We validate this dataset by showing that our grounded color data correlates much better than ungrounded text-only data with crowdsourced color judgments provided by Paik et al. (2021). We then use our dataset to evaluate pretrained unimodal models and multimodal models. Our results indicate that multimodal models better reconstruct attribute distributions, but are still subject to reporting bias. Moreover, increasing model size does not enhance performance, suggesting that the key to visual commonsense lies in the data.", "year": 2022, "venue": "North American Chapter of the Association for Computational Linguistics", "authors": [ "Chenyu Zhang", "Benjamin Van Durme", "Zhuowan Li", "Elias Stengel-Eskin" ], "externalIds": { "ArXiv": "2205.01850", "DBLP": "journals/corr/abs-2205-01850", "ACL": "2022.naacl-main.390", "DOI": "10.48550/arXiv.2205.01850", "CorpusId": 248512651 }, "url": "https://www.semanticscholar.org/paper/7e69986581d477f7051bb77ba1ba30e04e38ad8b", "referenceCount": 40, "citationCount": 34, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "OPT: Open Pre-trained Transformer Language Models", "abstract": "Large language models, which are often trained for hundreds of thousands of compute days, have shown remarkable capabilities for zero- and few-shot learning. Given their computational cost, these models are difficult to replicate without significant capital. For the few that are available through APIs, no access is granted to the full model weights, making them difficult to study. We present Open Pre-trained Transformers (OPT), a suite of decoder-only pre-trained transformers ranging from 125M to 175B parameters, which we aim to fully and responsibly share with interested researchers. We show that OPT-175B is comparable to GPT-3, while requiring only 1/7th the carbon footprint to develop. We are also releasing our logbook detailing the infrastructure challenges we faced, along with code for experimenting with all of the released models.", "year": 2022, "venue": "arXiv.org", "authors": [ "Susan Zhang", "Stephen Roller", "Naman Goyal", "Mikel Artetxe", "Moya Chen", "Shuohui Chen", "Christopher Dewan", "Mona T. Diab", "Xian Li", "Xi Victoria Lin", "Todor Mihaylov", "Myle Ott", "Sam Shleifer", "Kurt Shuster", "Daniel Simig", "Punit Singh Koura", "Anjali Sridhar", "Tianlu Wang", "Luke Zettlemoyer" ], "externalIds": { "DBLP": "journals/corr/abs-2205-01068", "ArXiv": "2205.01068", "CorpusId": 248496292 }, "url": "https://www.semanticscholar.org/paper/13a0d8bb38f739990c8cd65a44061c6534f17221", "referenceCount": 120, "citationCount": 2763, "influentialCitationCount": 318, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "PaLM: Scaling Language Modeling with Pathways", "abstract": "Large language models have been shown to achieve remarkable performance across a variety of natural language tasks using few-shot learning, which drastically reduces the number of task-specific training examples needed to adapt the model to a particular application. To further our understanding of the impact of scale on few-shot learning, we trained a 540-billion parameter, densely activated, Transformer language model, which we call Pathways Language Model PaLM. We trained PaLM on 6144 TPU v4 chips using Pathways, a new ML system which enables highly efficient training across multiple TPU Pods. We demonstrate continued benefits of scaling by achieving state-of-the-art few-shot learning results on hundreds of language understanding and generation benchmarks. On a number of these tasks, PaLM 540B achieves breakthrough performance, outperforming the finetuned state-of-the-art on a suite of multi-step reasoning tasks, and outperforming average human performance on the recently released BIG-bench benchmark. A significant number of BIG-bench tasks showed discontinuous improvements from model scale, meaning that performance steeply increased as we scaled to our largest model. PaLM also has strong capabilities in multilingual tasks and source code generation, which we demonstrate on a wide array of benchmarks. We additionally provide a comprehensive analysis on bias and toxicity, and study the extent of training data memorization with respect to model scale. Finally, we discuss the ethical considerations related to large language models and discuss potential mitigation strategies.", "year": 2022, "venue": "Journal of machine learning research", "authors": [ "Aakanksha Chowdhery", "Sharan Narang", "Jacob Devlin", "Maarten Bosma", "Gaurav Mishra", "Adam Roberts", "P. Barham", "Hyung Won Chung", "Charles Sutton", "Sebastian Gehrmann", "Parker Schuh", "Kensen Shi", "Sasha Tsvyashchenko", "Joshua Maynez", "Abhishek Rao", "Parker Barnes", "Yi Tay", "Noam M. Shazeer", "Vinodkumar Prabhakaran", "Emily Reif", "Nan Du", "Ben Hutchinson", "Reiner Pope", "James Bradbury", "Jacob Austin", "M. Isard", "Guy Gur-Ari", "Pengcheng Yin", "Toju Duke", "Anselm Levskaya", "Sanjay Ghemawat", "Sunipa Dev", "H. Michalewski", "Xavier García", "Vedant Misra", "Kevin Robinson", "Liam Fedus", "Denny Zhou", "Daphne Ippolito", "D. Luan", "Hyeontaek Lim", "Barret Zoph", "A. Spiridonov", "Ryan Sepassi", "David Dohan", "Shivani Agrawal", "Mark Omernick", "Andrew M. Dai", "Thanumalayan Sankaranarayana Pillai", "Marie Pellat", "Aitor Lewkowycz", "Erica Moreira", "R. Child", "Oleksandr Polozov", "Katherine Lee", "Zongwei Zhou", "Xuezhi Wang", "Brennan Saeta", "Mark Díaz", "Orhan Firat", "Michele Catasta", "Jason Wei", "K. Meier-Hellstern", "D. Eck", "J. Dean", "Slav Petrov", "Noah Fiedel" ], "externalIds": { "ArXiv": "2204.02311", "DBLP": "journals/corr/abs-2204-02311", "CorpusId": 247951931 }, "url": "https://www.semanticscholar.org/paper/094ff971d6a8b8ff870946c9b3ce5aa173617bfb", "referenceCount": 173, "citationCount": 4789, "influentialCitationCount": 335, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Universal Law of Robustness via Isoperimetry", "abstract": "Classically, data interpolation with a parametrized model class is possible as long as the number of parameters is larger than the number of equations to be satisfied. A puzzling phenomenon in deep learning is that models are trained with many more parameters than what this classical theory would suggest. We propose a partial theoretical explanation for this phenomenon. We prove that for a broad class of data distributions and model classes, overparametrization is necessary if one wants to interpolate the data smoothly. Namely we show that smooth interpolation requires d times more parameters than mere interpolation, where d is the ambient data dimension. We prove this universal law of robustness for any smoothly parametrized function class with polynomial size weights, and any covariate distribution verifying isoperimetry (or a mixture thereof). In the case of two-layer neural networks and Gaussian covariates, this law was conjectured in prior work by Bubeck, Li, and Nagaraj. We also give an interpretation of our result as an improved generalization bound for model classes consisting of smooth functions.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Sébastien Bubeck", "Mark Sellke" ], "externalIds": { "ArXiv": "2105.12806", "DBLP": "conf/nips/BubeckS21", "DOI": "10.1145/3578580", "CorpusId": 235212537 }, "url": "https://www.semanticscholar.org/paper/385d055ca18509d36f8601fca9d832a11d490e30", "referenceCount": 43, "citationCount": 187, "influentialCitationCount": 21, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Learning Transferable Visual Models From Natural Language Supervision", "abstract": "State-of-the-art computer vision systems are trained to predict a fixed set of predetermined object categories. This restricted form of supervision limits their generality and usability since additional labeled data is needed to specify any other visual concept. Learning directly from raw text about images is a promising alternative which leverages a much broader source of supervision. We demonstrate that the simple pre-training task of predicting which caption goes with which image is an efficient and scalable way to learn SOTA image representations from scratch on a dataset of 400 million (image, text) pairs collected from the internet. After pre-training, natural language is used to reference learned visual concepts (or describe new ones) enabling zero-shot transfer of the model to downstream tasks. We study the performance of this approach by benchmarking on over 30 different existing computer vision datasets, spanning tasks such as OCR, action recognition in videos, geo-localization, and many types of fine-grained object classification. The model transfers non-trivially to most tasks and is often competitive with a fully supervised baseline without the need for any dataset specific training. For instance, we match the accuracy of the original ResNet-50 on ImageNet zero-shot without needing to use any of the 1.28 million training examples it was trained on. We release our code and pre-trained model weights at https://github.com/OpenAI/CLIP.", "year": 2021, "venue": "International Conference on Machine Learning", "authors": [ "Alec Radford", "Jong Wook Kim", "Chris Hallacy", "A. Ramesh", "Gabriel Goh", "Sandhini Agarwal", "Girish Sastry", "Amanda Askell", "Pamela Mishkin", "Jack Clark", "Gretchen Krueger", "I. Sutskever" ], "externalIds": { "ArXiv": "2103.00020", "DBLP": "conf/icml/RadfordKHRGASAM21", "CorpusId": 231591445 }, "url": "https://www.semanticscholar.org/paper/6f870f7f02a8c59c3e23f407f3ef00dd1dcf8fc4", "referenceCount": 220, "citationCount": 18886, "influentialCitationCount": 5013, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Scaling Laws for Transfer", "abstract": "We study empirical scaling laws for transfer learning between distributions in an unsupervised, fine-tuning setting. When we train increasingly large neural networks from-scratch on a fixed-size dataset, they eventually become data-limited and stop improving in performance (cross-entropy loss). When we do the same for models pre-trained on a large language dataset, the slope in performance gains is merely reduced rather than going to zero. We calculate the effective data\"transferred\"from pre-training by determining how much data a transformer of the same size would have required to achieve the same loss when training from scratch. In other words, we focus on units of data while holding everything else fixed. We find that the effective data transferred is described well in the low data regime by a power-law of parameter count and fine-tuning dataset size. We believe the exponents in these power-laws correspond to measures of the generality of a model and proximity of distributions (in a directed rather than symmetric sense). We find that pre-training effectively multiplies the fine-tuning dataset size. Transfer, like overall performance, scales predictably in terms of parameters, data, and compute.", "year": 2021, "venue": "arXiv.org", "authors": [ "Danny Hernandez", "J. Kaplan", "T. Henighan", "Sam McCandlish" ], "externalIds": { "ArXiv": "2102.01293", "DBLP": "journals/corr/abs-2102-01293", "CorpusId": 231749962 }, "url": "https://www.semanticscholar.org/paper/4383a975c09b72ba2f1a77cd779bb6965dbfb2fb", "referenceCount": 45, "citationCount": 185, "influentialCitationCount": 14, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Scaling Laws for Autoregressive Generative Modeling", "abstract": "We identify empirical scaling laws for the cross-entropy loss in four domains: generative image modeling, video modeling, multimodal image$\\leftrightarrow$text models, and mathematical problem solving. In all cases autoregressive Transformers smoothly improve in performance as model size and compute budgets increase, following a power-law plus constant scaling law. The optimal model size also depends on the compute budget through a power-law, with exponents that are nearly universal across all data domains. \nThe cross-entropy loss has an information theoretic interpretation as $S($True$) + D_{\\mathrm{KL}}($True$||$Model$)$, and the empirical scaling laws suggest a prediction for both the true data distribution's entropy and the KL divergence between the true and model distributions. With this interpretation, billion-parameter Transformers are nearly perfect models of the YFCC100M image distribution downsampled to an $8\\times 8$ resolution, and we can forecast the model size needed to achieve any given reducible loss (ie $D_{\\mathrm{KL}}$) in nats/image for other resolutions. \nWe find a number of additional scaling laws in specific domains: (a) we identify a scaling relation for the mutual information between captions and images in multimodal models, and show how to answer the question \"Is a picture worth a thousand words?\"; (b) in the case of mathematical problem solving, we identify scaling laws for model performance when extrapolating beyond the training distribution; (c) we finetune generative image models for ImageNet classification and find smooth scaling of the classification loss and error rate, even as the generative loss levels off. Taken together, these results strengthen the case that scaling laws have important implications for neural network performance, including on downstream tasks.", "year": 2020, "venue": "arXiv.org", "authors": [ "T. Henighan", "J. Kaplan", "Mor Katz", "Mark Chen", "Christopher Hesse", "Jacob Jackson", "Heewoo Jun", "Tom B. Brown", "Prafulla Dhariwal", "Scott Gray", "Chris Hallacy", "Benjamin Mann", "Alec Radford", "A. Ramesh", "Nick Ryder", "Daniel M. Ziegler", "John Schulman", "Dario Amodei", "Sam McCandlish" ], "externalIds": { "DBLP": "journals/corr/abs-2010-14701", "ArXiv": "2010.14701", "MAG": "3095645723", "CorpusId": 225094178 }, "url": "https://www.semanticscholar.org/paper/3efbcfeeb0ea1051a71101d3318da4411081f0b8", "referenceCount": 36, "citationCount": 303, "influentialCitationCount": 46, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Evaluation for Weakly Supervised Object Localization: Protocol, Metrics, and Datasets", "abstract": "Weakly-supervised object localization (WSOL) has gained popularity over the last years for its promise to train localization models with only image-level labels. Since the seminal WSOL work of class activation mapping (CAM), the field has focused on how to expand the attention regions to cover objects more broadly and localize them better. However, these strategies rely on full localization supervision for validating hyperparameters and model selection, which is in principle prohibited under the WSOL setup. In this paper, we argue that WSOL task is ill-posed with only image-level labels, and propose a new evaluation protocol where full supervision is limited to only a small held-out set not overlapping with the test set. We observe that, under our protocol, the five most recent WSOL methods have not made a major improvement over the CAM baseline. Moreover, we report that existing WSOL methods have not reached the few-shot learning baseline, where the full-supervision at validation time is used for model training instead. Based on our findings, we discuss some future directions for WSOL. Source code and dataset are available at https://github.com/clovaai/wsolevaluation https://github.com/clovaai/wsolevaluation.", "year": 2020, "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", "authors": [ "Junsuk Choe", "Seong Joon Oh", "Sanghyuk Chun", "Zeynep Akata", "Hyunjung Shim" ], "externalIds": { "ArXiv": "2007.04178", "DBLP": "journals/corr/abs-2007-04178", "MAG": "3041935510", "DOI": "10.1109/TPAMI.2022.3169881", "CorpusId": 220403518, "PubMed": "35468058" }, "url": "https://www.semanticscholar.org/paper/7143b056c5cfcc561b71c4ed02bd40d420c0a94b", "referenceCount": 73, "citationCount": 20, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Perturbed Masking: Parameter-free Probing for Analyzing and Interpreting BERT", "abstract": "By introducing a small set of additional parameters, a probe learns to solve specific linguistic tasks (e.g., dependency parsing) in a supervised manner using feature representations (e.g., contextualized embeddings). The effectiveness of such probing tasks is taken as evidence that the pre-trained model encodes linguistic knowledge. However, this approach of evaluating a language model is undermined by the uncertainty of the amount of knowledge that is learned by the probe itself. Complementary to those works, we propose a parameter-free probing technique for analyzing pre-trained language models (e.g., BERT). Our method does not require direct supervision from the probing tasks, nor do we introduce additional parameters to the probing process. Our experiments on BERT show that syntactic trees recovered from BERT using our method are significantly better than linguistically-uninformed baselines. We further feed the empirically induced dependency structures into a downstream sentiment classification task and find its improvement compatible with or even superior to a human-designed dependency schema.", "year": 2020, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Zhiyong Wu", "Yun Chen", "B. Kao", "Qun Liu" ], "externalIds": { "ArXiv": "2004.14786", "ACL": "2020.acl-main.383", "DBLP": "journals/corr/abs-2004-14786", "MAG": "3034503989", "DOI": "10.18653/v1/2020.acl-main.383", "CorpusId": 216914626 }, "url": "https://www.semanticscholar.org/paper/cdf766403e365643ac4dfdf9e10df8da1b75b63f", "referenceCount": 57, "citationCount": 165, "influentialCitationCount": 28, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "TextCaps: a Dataset for Image Captioning with Reading Comprehension", "abstract": null, "year": 2020, "venue": "European Conference on Computer Vision", "authors": [ "Oleksii Sidorov", "Ronghang Hu", "Marcus Rohrbach", "Amanpreet Singh" ], "externalIds": { "ArXiv": "2003.12462", "MAG": "3106859150", "DBLP": "journals/corr/abs-2003-12462", "DOI": "10.1007/978-3-030-58536-5_44", "CorpusId": 214693197 }, "url": "https://www.semanticscholar.org/paper/33eadd4e666a894306a22ba0839c5e0cef77280e", "referenceCount": 39, "citationCount": 288, "influentialCitationCount": 55, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Scaling Laws for Neural Language Models", "abstract": "We study empirical scaling laws for language model performance on the cross-entropy loss. The loss scales as a power-law with model size, dataset size, and the amount of compute used for training, with some trends spanning more than seven orders of magnitude. Other architectural details such as network width or depth have minimal effects within a wide range. Simple equations govern the dependence of overfitting on model/dataset size and the dependence of training speed on model size. These relationships allow us to determine the optimal allocation of a fixed compute budget. Larger models are significantly more sample-efficient, such that optimally compute-efficient training involves training very large models on a relatively modest amount of data and stopping significantly before convergence.", "year": 2020, "venue": "arXiv.org", "authors": [ "J. Kaplan", "Sam McCandlish", "T. Henighan", "Tom B. Brown", "B. Chess", "R. Child", "Scott Gray", "Alec Radford", "Jeff Wu", "Dario Amodei" ], "externalIds": { "MAG": "3001279689", "ArXiv": "2001.08361", "DBLP": "journals/corr/abs-2001-08361", "CorpusId": 210861095 }, "url": "https://www.semanticscholar.org/paper/e6c561d02500b2596a230b341a8eb8b921ca5bf2", "referenceCount": 59, "citationCount": 3074, "influentialCitationCount": 266, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer", "abstract": "Transfer learning, where a model is first pre-trained on a data-rich task before being fine-tuned on a downstream task, has emerged as a powerful technique in natural language processing (NLP). The effectiveness of transfer learning has given rise to a diversity of approaches, methodology, and practice. In this paper, we explore the landscape of transfer learning techniques for NLP by introducing a unified framework that converts every language problem into a text-to-text format. Our systematic study compares pre-training objectives, architectures, unlabeled datasets, transfer approaches, and other factors on dozens of language understanding tasks. By combining the insights from our exploration with scale and our new \"Colossal Clean Crawled Corpus\", we achieve state-of-the-art results on many benchmarks covering summarization, question answering, text classification, and more. To facilitate future work on transfer learning for NLP, we release our dataset, pre-trained models, and code.", "year": 2019, "venue": "Journal of machine learning research", "authors": [ "Colin Raffel", "Noam M. Shazeer", "Adam Roberts", "Katherine Lee", "Sharan Narang", "Michael Matena", "Yanqi Zhou", "Wei Li", "Peter J. Liu" ], "externalIds": { "MAG": "2981852735", "DBLP": "journals/corr/abs-1910-10683", "ArXiv": "1910.10683", "CorpusId": 204838007 }, "url": "https://www.semanticscholar.org/paper/6c4b76232bb72897685d19b3d264c6ee3005bc2b", "referenceCount": 134, "citationCount": 15984, "influentialCitationCount": 2029, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Language Models as Knowledge Bases?", "abstract": "Recent progress in pretraining language models on large textual corpora led to a surge of improvements for downstream NLP tasks. Whilst learning linguistic knowledge, these models may also be storing relational knowledge present in the training data, and may be able to answer queries structured as “fill-in-the-blank” cloze statements. Language models have many advantages over structured knowledge bases: they require no schema engineering, allow practitioners to query about an open class of relations, are easy to extend to more data, and require no human supervision to train. We present an in-depth analysis of the relational knowledge already present (without fine-tuning) in a wide range of state-of-the-art pretrained language models. We find that (i) without fine-tuning, BERT contains relational knowledge competitive with traditional NLP methods that have some access to oracle knowledge, (ii) BERT also does remarkably well on open-domain question answering against a supervised baseline, and (iii) certain types of factual knowledge are learned much more readily than others by standard language model pretraining approaches. The surprisingly strong ability of these models to recall factual knowledge without any fine-tuning demonstrates their potential as unsupervised open-domain QA systems. The code to reproduce our analysis is available at https://github.com/facebookresearch/LAMA.", "year": 2019, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "F. Petroni", "Tim Rocktäschel", "Patrick Lewis", "A. Bakhtin", "Yuxiang Wu", "Alexander H. Miller", "Sebastian Riedel" ], "externalIds": { "DBLP": "conf/emnlp/PetroniRRLBWM19", "ArXiv": "1909.01066", "MAG": "2970476646", "ACL": "D19-1250", "DOI": "10.18653/v1/D19-1250", "CorpusId": 202539551 }, "url": "https://www.semanticscholar.org/paper/d0086b86103a620a86bc918746df0aa642e2a8a3", "referenceCount": 47, "citationCount": 2248, "influentialCitationCount": 269, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "OCR-VQA: Visual Question Answering by Reading Text in Images", "abstract": "The problem of answering questions about an image is popularly known as visual question answering (or VQA in short). It is a well-established problem in computer vision. However, none of the VQA methods currently utilize the text often present in the image. These \"texts in images\" provide additional useful cues and facilitate better understanding of the visual content. In this paper, we introduce a novel task of visual question answering by reading text in images, i.e., by optical character recognition or OCR. We refer to this problem as OCR-VQA. To facilitate a systematic way of studying this new problem, we introduce a large-scale dataset, namely OCRVQA-200K. This dataset comprises of 207,572 images of book covers and contains more than 1 million question-answer pairs about these images. We judiciously combine well-established techniques from OCR and VQA domains to present a novel baseline for OCR-VQA-200K. The experimental results and rigorous analysis demonstrate various challenges present in this dataset leaving ample scope for the future research. We are optimistic that this new task along with compiled dataset will open-up many exciting research avenues both for the document image analysis and the VQA communities.", "year": 2019, "venue": "IEEE International Conference on Document Analysis and Recognition", "authors": [ "Anand Mishra", "Shashank Shekhar", "A. Singh", "Anirban Chakraborty" ], "externalIds": { "DBLP": "conf/icdar/0001SSC19", "MAG": "3004268082", "DOI": "10.1109/ICDAR.2019.00156", "CorpusId": 209413409 }, "url": "https://www.semanticscholar.org/paper/1097cf8cf5961589ff693b069002e7181e24e631", "referenceCount": 26, "citationCount": 282, "influentialCitationCount": 41, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "OK-VQA: A Visual Question Answering Benchmark Requiring External Knowledge", "abstract": "Visual Question Answering (VQA) in its ideal form lets us study reasoning in the joint space of vision and language and serves as a proxy for the AI task of scene understanding. However, most VQA benchmarks to date are focused on questions such as simple counting, visual attributes, and object detection that do not require reasoning or knowledge beyond what is in the image. In this paper, we address the task of knowledge-based visual question answering and provide a benchmark, called OK-VQA, where the image content is not sufficient to answer the questions, encouraging methods that rely on external knowledge resources. Our new dataset includes more than 14,000 questions that require external knowledge to answer. We show that the performance of the state-of-the-art VQA models degrades drastically in this new setting. Our analysis shows that our knowledge-based VQA task is diverse, difficult, and large compared to previous knowledge-based VQA datasets. We hope that this dataset enables researchers to open up new avenues for research in this domain.", "year": 2019, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Kenneth Marino", "Mohammad Rastegari", "Ali Farhadi", "Roozbeh Mottaghi" ], "externalIds": { "DBLP": "conf/cvpr/MarinoRFM19", "ArXiv": "1906.00067", "MAG": "2947312908", "DOI": "10.1109/CVPR.2019.00331", "CorpusId": 173991173 }, "url": "https://www.semanticscholar.org/paper/28ad018c39d1578bea84e7cedf94459e3dbe1e70", "referenceCount": 56, "citationCount": 764, "influentialCitationCount": 146, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning and Generalization in Overparameterized Neural Networks, Going Beyond Two Layers", "abstract": "The fundamental learning theory behind neural networks remains largely open. What classes of functions can neural networks actually learn? Why doesn't the trained network overfit when it is overparameterized? \nIn this work, we prove that overparameterized neural networks can learn some notable concept classes, including two and three-layer networks with fewer parameters and smooth activations. Moreover, the learning can be simply done by SGD (stochastic gradient descent) or its variants in polynomial time using polynomially many samples. The sample complexity can also be almost independent of the number of parameters in the network. \nOn the technique side, our analysis goes beyond the so-called NTK (neural tangent kernel) linearization of neural networks in prior works. We establish a new notion of quadratic approximation of the neural network (that can be viewed as a second-order variant of NTK), and connect it to the SGD theory of escaping saddle points.", "year": 2018, "venue": "Neural Information Processing Systems", "authors": [ "Zeyuan Allen-Zhu", "Yuanzhi Li", "Yingyu Liang" ], "externalIds": { "ArXiv": "1811.04918", "DBLP": "journals/corr/abs-1811-04918", "MAG": "2970330753", "CorpusId": 53287096 }, "url": "https://www.semanticscholar.org/paper/611fe6e34df07ea1b2104899e49642b4531b53e9", "referenceCount": 64, "citationCount": 727, "influentialCitationCount": 101, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "A Convergence Theory for Deep Learning via Over-Parameterization", "abstract": "Deep neural networks (DNNs) have demonstrated dominating performance in many fields; since AlexNet, networks used in practice are going wider and deeper. On the theoretical side, a long line of works has been focusing on training neural networks with one hidden layer. The theory of multi-layer networks remains largely unsettled. \nIn this work, we prove why stochastic gradient descent (SGD) can find $\\textit{global minima}$ on the training objective of DNNs in $\\textit{polynomial time}$. We only make two assumptions: the inputs are non-degenerate and the network is over-parameterized. The latter means the network width is sufficiently large: $\\textit{polynomial}$ in $L$, the number of layers and in $n$, the number of samples. \nOur key technique is to derive that, in a sufficiently large neighborhood of the random initialization, the optimization landscape is almost-convex and semi-smooth even with ReLU activations. This implies an equivalence between over-parameterized neural networks and neural tangent kernel (NTK) in the finite (and polynomial) width setting. \nAs concrete examples, starting from randomly initialized weights, we prove that SGD can attain 100% training accuracy in classification tasks, or minimize regression loss in linear convergence speed, with running time polynomial in $n,L$. Our theory applies to the widely-used but non-smooth ReLU activation, and to any smooth and possibly non-convex loss functions. In terms of network architectures, our theory at least applies to fully-connected neural networks, convolutional neural networks (CNN), and residual neural networks (ResNet).", "year": 2018, "venue": "International Conference on Machine Learning", "authors": [ "Zeyuan Allen-Zhu", "Yuanzhi Li", "Zhao Song" ], "externalIds": { "ArXiv": "1811.03962", "MAG": "2964098911", "DBLP": "journals/corr/abs-1811-03962", "CorpusId": 53250107 }, "url": "https://www.semanticscholar.org/paper/42ec3db12a2e4628885451b13035c2e975220a25", "referenceCount": 68, "citationCount": 1349, "influentialCitationCount": 196, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "A Unified Approach to Interpreting Model Predictions", "abstract": "Understanding why a model makes a certain prediction can be as crucial as the prediction's accuracy in many applications. However, the highest accuracy for large modern datasets is often achieved by complex models that even experts struggle to interpret, such as ensemble or deep learning models, creating a tension between accuracy and interpretability. In response, various methods have recently been proposed to help users interpret the predictions of complex models, but it is often unclear how these methods are related and when one method is preferable over another. To address this problem, we present a unified framework for interpreting predictions, SHAP (SHapley Additive exPlanations). SHAP assigns each feature an importance value for a particular prediction. Its novel components include: (1) the identification of a new class of additive feature importance measures, and (2) theoretical results showing there is a unique solution in this class with a set of desirable properties. The new class unifies six existing methods, notable because several recent methods in the class lack the proposed desirable properties. Based on insights from this unification, we present new methods that show improved computational performance and/or better consistency with human intuition than previous approaches.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "Scott M. Lundberg", "Su-In Lee" ], "externalIds": { "MAG": "2618851150", "DBLP": "journals/corr/LundbergL17", "ArXiv": "1705.07874", "CorpusId": 21889700 }, "url": "https://www.semanticscholar.org/paper/442e10a3c6640ded9408622005e3c2a8906ce4c2", "referenceCount": 10, "citationCount": 16601, "influentialCitationCount": 1905, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Axiomatic Attribution for Deep Networks", "abstract": "We study the problem of attributing the prediction of a deep network to its input features, a problem previously studied by several other works. We identify two fundamental axioms— Sensitivity and Implementation Invariance that attribution methods ought to satisfy. We show that they are not satisfied by most known attribution methods, which we consider to be a fundamental weakness of those methods. We use the axioms to guide the design of a new attribution method called Integrated Gradients. Our method requires no modification to the original network and is extremely simple to implement; it just needs a few calls to the standard gradient operator. We apply this method to a couple of image models, a couple of text models and a chemistry model, demonstrating its ability to debug networks, to extract rules from a network, and to enable users to engage with models better.", "year": 2017, "venue": "International Conference on Machine Learning", "authors": [ "Mukund Sundararajan", "Ankur Taly", "Qiqi Yan" ], "externalIds": { "DBLP": "journals/corr/SundararajanTY17", "MAG": "2949197630", "ArXiv": "1703.01365", "CorpusId": 16747630 }, "url": "https://www.semanticscholar.org/paper/f302e136c41db5de1d624412f68c9174cf7ae8be", "referenceCount": 35, "citationCount": 5097, "influentialCitationCount": 786, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Understanding deep learning requires rethinking generalization", "abstract": "Despite their massive size, successful deep artificial neural networks can exhibit a remarkably small difference between training and test performance. Conventional wisdom attributes small generalization error either to properties of the model family, or to the regularization techniques used during training. \nThrough extensive systematic experiments, we show how these traditional approaches fail to explain why large neural networks generalize well in practice. Specifically, our experiments establish that state-of-the-art convolutional networks for image classification trained with stochastic gradient methods easily fit a random labeling of the training data. This phenomenon is qualitatively unaffected by explicit regularization, and occurs even if we replace the true images by completely unstructured random noise. We corroborate these experimental findings with a theoretical construction showing that simple depth two neural networks already have perfect finite sample expressivity as soon as the number of parameters exceeds the number of data points as it usually does in practice. \nWe interpret our experimental findings by comparison with traditional models.", "year": 2016, "venue": "International Conference on Learning Representations", "authors": [ "Chiyuan Zhang", "Samy Bengio", "Moritz Hardt", "B. Recht", "O. Vinyals" ], "externalIds": { "MAG": "2950220847", "ArXiv": "1611.03530", "DBLP": "journals/corr/ZhangBHRV16", "CorpusId": 6212000 }, "url": "https://www.semanticscholar.org/paper/54ddb00fa691728944fd8becea90a373d21597cf", "referenceCount": 34, "citationCount": 4427, "influentialCitationCount": 370, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Grad-CAM: Visual Explanations from Deep Networks via Gradient-Based Localization", "abstract": null, "year": 2016, "venue": "International Journal of Computer Vision", "authors": [ "Ramprasaath R. Selvaraju", "Abhishek Das", "Ramakrishna Vedantam", "Michael Cogswell", "Devi Parikh", "Dhruv Batra" ], "externalIds": { "MAG": "2962858109", "DBLP": "conf/iccv/SelvarajuCDVPB17", "ArXiv": "1610.02391", "DOI": "10.1007/s11263-019-01228-7", "CorpusId": 15019293 }, "url": "https://www.semanticscholar.org/paper/5582bebed97947a41e3ddd9bd1f284b73f1648c2", "referenceCount": 72, "citationCount": 16622, "influentialCitationCount": 1836, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Visual Genome: Connecting Language and Vision Using Crowdsourced Dense Image Annotations", "abstract": null, "year": 2016, "venue": "International Journal of Computer Vision", "authors": [ "Ranjay Krishna", "Yuke Zhu", "O. Groth", "Justin Johnson", "K. Hata", "Joshua Kravitz", "Stephanie Chen", "Yannis Kalantidis", "Li-Jia Li", "David A. Shamma", "Michael S. Bernstein", "Li Fei-Fei" ], "externalIds": { "ArXiv": "1602.07332", "DBLP": "journals/corr/KrishnaZGJHKCKL16", "MAG": "2277195237", "DOI": "10.1007/s11263-016-0981-7", "CorpusId": 4492210 }, "url": "https://www.semanticscholar.org/paper/afcf4dbd2ef300e5c4b35043d4fbe516807cdf7d", "referenceCount": 130, "citationCount": 5091, "influentialCitationCount": 659, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "“Why Should I Trust You?”: Explaining the Predictions of Any Classifier", "abstract": "Despite widespread adoption, machine learning models remain mostly black boxes. Understanding the reasons behind predictions is, however, quite important in assessing trust, which is fundamental if one plans to take action based on a prediction, or when choosing whether to deploy a new model. Such understanding also provides insights into the model, which can be used to transform an untrustworthy model or prediction into a trustworthy one. In this work, we propose LIME, a novel explanation technique that explains the predictions of any classifier in an interpretable and faithful manner, by learning an interpretable model locally varound the prediction. We also propose a method to explain models by presenting representative individual predictions and their explanations in a non-redundant way, framing the task as a submodular optimization problem. We demonstrate the flexibility of these methods by explaining different models for text (e.g. random forests) and image classification (e.g. neural networks). We show the utility of explanations via novel experiments, both simulated and with human subjects, on various scenarios that require trust: deciding if one should trust a prediction, choosing between models, improving an untrustworthy classifier, and identifying why a classifier should not be trusted.", "year": 2016, "venue": "North American Chapter of the Association for Computational Linguistics", "authors": [ "Marco Tulio Ribeiro", "Sameer Singh", "Carlos Guestrin" ], "externalIds": { "ACL": "N16-3020", "MAG": "2951501516", "DBLP": "journals/corr/RibeiroSG16", "ArXiv": "1602.04938", "DOI": "10.1145/2939672.2939778", "CorpusId": 13029170 }, "url": "https://www.semanticscholar.org/paper/c0883f5930a232a9c1ad601c978caede29155979", "referenceCount": 41, "citationCount": 14452, "influentialCitationCount": 1693, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Learning Deep Features for Discriminative Localization", "abstract": "In this work, we revisit the global average pooling layer proposed in [13], and shed light on how it explicitly enables the convolutional neural network (CNN) to have remarkable localization ability despite being trained on imagelevel labels. While this technique was previously proposed as a means for regularizing training, we find that it actually builds a generic localizable deep representation that exposes the implicit attention of CNNs on an image. Despite the apparent simplicity of global average pooling, we are able to achieve 37.1% top-5 error for object localization on ILSVRC 2014 without training on any bounding box annotation. We demonstrate in a variety of experiments that our network is able to localize the discriminative image regions despite just being trained for solving classification task1.", "year": 2015, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Bolei Zhou", "A. Khosla", "Àgata Lapedriza", "A. Oliva", "A. Torralba" ], "externalIds": { "DBLP": "conf/cvpr/ZhouKLOT16", "ArXiv": "1512.04150", "MAG": "2950328304", "DOI": "10.1109/CVPR.2016.319", "CorpusId": 6789015 }, "url": "https://www.semanticscholar.org/paper/31f9eb39d840821979e5df9f34a6e92dd9c879f2", "referenceCount": 37, "citationCount": 8532, "influentialCitationCount": 1404, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A comparative experimental study of image feature detectors and descriptors", "abstract": null, "year": 2015, "venue": "Machine Vision and Applications", "authors": [ "D. Mukherjee", "Q. M. J. Wu", "Guanghui Wang" ], "externalIds": { "MAG": "2028394888", "DBLP": "journals/mva/MukherjeeWW15", "DOI": "10.1007/s00138-015-0679-9", "CorpusId": 17001735 }, "url": "https://www.semanticscholar.org/paper/800d4a628c3df0eafaad9fc03ef9d9eaf1f8f314", "referenceCount": 47, "citationCount": 110, "influentialCitationCount": 7, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "ImageNet: A large-scale hierarchical image database", "abstract": "The explosion of image data on the Internet has the potential to foster more sophisticated and robust models and algorithms to index, retrieve, organize and interact with images and multimedia data. But exactly how such data can be harnessed and organized remains a critical problem. We introduce here a new database called “ImageNet”, a large-scale ontology of images built upon the backbone of the WordNet structure. ImageNet aims to populate the majority of the 80,000 synsets of WordNet with an average of 500-1000 clean and full resolution images. This will result in tens of millions of annotated images organized by the semantic hierarchy of WordNet. This paper offers a detailed analysis of ImageNet in its current state: 12 subtrees with 5247 synsets and 3.2 million images in total. We show that ImageNet is much larger in scale and diversity and much more accurate than the current image datasets. Constructing such a large-scale database is a challenging task. We describe the data collection scheme with Amazon Mechanical Turk. Lastly, we illustrate the usefulness of ImageNet through three simple applications in object recognition, image classification and automatic object clustering. We hope that the scale, accuracy, diversity and hierarchical structure of ImageNet can offer unparalleled opportunities to researchers in the computer vision community and beyond.", "year": 2009, "venue": "2009 IEEE Conference on Computer Vision and Pattern Recognition", "authors": [ "Jia Deng", "Wei Dong", "R. Socher", "Li-Jia Li", "K. Li", "Li Fei-Fei" ], "externalIds": { "DBLP": "conf/cvpr/DengDSLL009", "MAG": "2108598243", "DOI": "10.1109/CVPR.2009.5206848", "CorpusId": 57246310 }, "url": "https://www.semanticscholar.org/paper/d2c733e34d48784a37d717fe43d9e93277a8c53e", "referenceCount": 27, "citationCount": 56678, "influentialCitationCount": 8947, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Shape recognition with edge-based features", "abstract": "In this paper we describe an approach to recognizing poorly textured objects, that may contain holes and tubular parts, in cluttered scenes under arbitrary viewing conditions. To this end we develop a number of novel components. First, we introduce a new edge-based local feature detector that is invariant to similarity transformations. The features are localized on edges and a neighbourhood is estimated in a scale invariant manner. Second, the neighbourhood descriptor computed for foreground features is not affected by background clutter, even if the feature is on an object boundary. Third, the descriptor generalizes Lowe's SIFT method to edges. An object model is learnt from a single training image. The object is then recognized in new images in a series of steps which apply progressively tighter geometric restrictions. A final contribution of this work is to allow sufficient flexibility in the geometric representation that objects in the same visual class can be recognized. Results are demonstrated for various object classes including bikes and rackets.", "year": 2003, "venue": "British Machine Vision Conference", "authors": [ "K. Mikolajczyk", "Andrew Zisserman", "C. Schmid" ], "externalIds": { "MAG": "2056313036", "DBLP": "conf/bmvc/MikolajczykZS03", "DOI": "10.5244/C.17.79", "CorpusId": 10816036 }, "url": "https://www.semanticscholar.org/paper/5e80772f40e8ef924727c6c24168cadc3be0b856", "referenceCount": 26, "citationCount": 245, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Cognition, construction of knowledge, and teaching", "abstract": null, "year": 1989, "venue": "Synthese", "authors": [ "E. Glasersfeld" ], "externalIds": { "MAG": "2055150233", "DOI": "10.1007/BF00869951", "CorpusId": 46967038 }, "url": "https://www.semanticscholar.org/paper/edba34895f64881f3b7d57ce963ed8e8786bebb6", "referenceCount": 26, "citationCount": 1156, "influentialCitationCount": 52, "isOpenAccess": false, "fieldsOfStudy": [ "Philosophy" ] }, { "title": "[The physiology of color perception].", "abstract": null, "year": 1959, "venue": "Klinische Wochenschrift", "authors": [ "G. Brindley" ], "externalIds": { "MAG": "1969726624", "DOI": "10.1007/BF01491352", "CorpusId": 28969773, "PubMed": "13804457" }, "url": "https://www.semanticscholar.org/paper/f3c5371cb7a576fe1b96fe7d43788b78615f657b", "referenceCount": 12, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Biology", "Medicine" ] }, { "title": "Vicuna: An open-source chatbot impressing gpt-4 with 90%* chatgpt quality", "abstract": null, "year": 2023, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "A method for stochastic optimization", "abstract": null, "year": 2015, "venue": "International Conference on Learning Representations (ICLR)", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Interest Point Detector and Feature Descriptor Survey", "abstract": null, "year": 2014, "venue": "", "authors": [ "Scott Krig" ], "externalIds": { "MAG": "1882440334", "DOI": "10.1007/978-1-4302-5930-5_6", "CorpusId": 27395503 }, "url": "https://www.semanticscholar.org/paper/30fb3ccc03edc91b08f1d09198e81e1bab5dd4e6", "referenceCount": 684, "citationCount": 93, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Perceptual organization and visual recognition , volume 5", "abstract": null, "year": 2012, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Color in computer vision: fundamentals and applications", "abstract": null, "year": 2012, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "VISION A Computational Investigation into the Human Representation and Processing of Visual Information", "abstract": null, "year": 2009, "venue": "", "authors": [ "Shimon Ullman", "Tomaso A. Poggio" ], "externalIds": { "CorpusId": 84293972 }, "url": "https://www.semanticscholar.org/paper/52df763d69ed5fe609a4a88f07e97a6cfaadf59f", "referenceCount": 0, "citationCount": 2463, "influentialCitationCount": 179, "isOpenAccess": false, "fieldsOfStudy": null } ] }, "Patch Ranking: Efficient CLIP by Learning to Rank Local Patches": { "paper_title": "Patch Ranking: Efficient CLIP by Learning to Rank Local Patches", "arxiv_id": "2409.14607v1", "keyword": "vision language model", "authors": [ "Cheng-En Wu", "Jinhong Lin", "Yu Hen Hu", "Pedro Morgado" ], "references": [ { "title": "DiffRate : Differentiable Compression Rate for Efficient Vision Transformers", "abstract": "Token compression aims to speed up large-scale vision transformers (e.g. ViTs) by pruning (dropping) or merging tokens. It is an important but challenging task. Although recent advanced approaches achieved great success, they need to carefully handcraft a compression rate (i.e. number of tokens to remove), which is tedious and leads to sub-optimal performance. To tackle this problem, we propose Differentiable Compression Rate (DiffRate), a novel token compression method that has several appealing properties prior arts do not have. First, DiffRate enables propagating the loss function’s gradient onto the compression ratio, which is considered as a non-differentiable hyperparameter in previous work. In this case, different layers can automatically learn different compression rates layer-wisely without extra overhead. Second, token pruning and merging can be naturally performed simultaneously in DiffRate, while they were isolated in previous works. Third, extensive experiments demonstrate that DiffRate achieves state-of-the-art performance. For example, by applying the learned layer-wise compression rates to an off-the-shelf ViT-H (MAE) model, we achieve a 40% FLOPs reduction and a 1.5× throughput improvement, with a minor accuracy drop of 0.16% on ImageNet without fine-tuning, even outperforming previous methods with fine-tuning. Codes and models are available at https://github.com/OpenGVLab/DiffRate.", "year": 2023, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Mengzhao Chen", "Wenqi Shao", "Peng Xu", "Mingbao Lin", "Kaipeng Zhang", "Fei Chao", "Rongrong Ji", "Y. Qiao", "Ping Luo" ], "externalIds": { "DBLP": "journals/corr/abs-2305-17997", "ArXiv": "2305.17997", "DOI": "10.1109/ICCV51070.2023.01574", "CorpusId": 258959271 }, "url": "https://www.semanticscholar.org/paper/78870d58ba638b32d4ab39a55d4102dca1cb1077", "referenceCount": 47, "citationCount": 22, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Joint Token Pruning and Squeezing Towards More Aggressive Compression of Vision Transformers", "abstract": "Although vision transformers (ViTs) have shown promising results in various computer vision tasks recently, their high computational cost limits their practical applications. Previous approaches that prune redundant tokens have demonstrated a good trade-off between performance and computation costs. Nevertheless, errors caused by pruning strategies can lead to significant information loss. Our quantitative experiments reveal that the impact of pruned tokens on performance should be noticeable. To address this issue, we propose a novel joint Token Pruning & Squeezing module (TPS) for compressing vision transformers with higher efficiency. Firstly, TPS adopts pruning to get the reserved and pruned subsets. Secondly, TPS squeezes the information of pruned tokens into partial reserved tokens via the unidirectional nearest-neighbor matching and similarity-based fusing steps. Compared to state-of-the-art methods, our approach outperforms them under all token pruning intensities. Especially while shrinking DeiT-tiny&small computational budgets to 35%, it improves the accuracy by 1%-6% compared with baselines on ImageNet classification. The proposed method can accelerate the throughput of DeiT-small beyond DeiT-tiny, while its accuracy surpasses DeiT-tiny by 4.78%. Experiments on various transformers demonstrate the effectiveness of our method, while analysis experiments prove our higher robustness to the errors of the token pruning policy. Code is available at https://github.com/megvii-research/TPS-CVPR2023.", "year": 2023, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Siyuan Wei", "Tianzhu Ye", "Shen Zhang", "Yao Tang", "Jiajun Liang" ], "externalIds": { "DBLP": "journals/corr/abs-2304-10716", "ArXiv": "2304.10716", "DOI": "10.1109/CVPR52729.2023.00208", "CorpusId": 258291892 }, "url": "https://www.semanticscholar.org/paper/bccf49f7cdf0662ef421b7c22537ef9b0b33e6e1", "referenceCount": 42, "citationCount": 35, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Beyond Attentive Tokens: Incorporating Token Importance and Diversity for Efficient Vision Transformers", "abstract": "Vision transformers have achieved significant improvements on various vision tasks but their quadratic interactions between tokens significantly reduce computational efficiency. Many pruning methods have been proposed to remove redundant tokens for efficient vision transformers recently. However, existing studies mainly focus on the token importance to preserve local attentive tokens but completely ignore the global token diversity. In this paper, we emphasize the cruciality of diverse global semantics and propose an efficient token decoupling and merging method that can jointly consider the token importance and diversity for token pruning. According to the class token attention, we decouple the attentive and inattentive tokens. In addition to preserving the most discriminative local tokens, we merge similar inattentive tokens and match homogeneous attentive tokens to maximize the token diversity. Despite its simplicity, our method obtains a promising trade-off between model complexity and classification accuracy. On DeiT-S, our method reduces the FLOPs by 35% with only a 0.2% accuracy drop. Notably, benefiting from maintaining the token diversity, our method can even improve the accuracy of DeiT-T by 0.1% after reducing its FLOPs by 40%.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Sifan Long", "Z. Zhao", "Jimin Pi", "Sheng-sheng Wang", "Jingdong Wang" ], "externalIds": { "DBLP": "conf/cvpr/LongZP0023", "ArXiv": "2211.11315", "DOI": "10.1109/CVPR52729.2023.00996", "CorpusId": 253734476 }, "url": "https://www.semanticscholar.org/paper/b6184aa4365008520742a166fc7e6bfd22988029", "referenceCount": 44, "citationCount": 21, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Token Merging: Your ViT But Faster", "abstract": "We introduce Token Merging (ToMe), a simple method to increase the throughput of existing ViT models without needing to train. ToMe gradually combines similar tokens in a transformer using a general and light-weight matching algorithm that is as fast as pruning while being more accurate. Off-the-shelf, ToMe can 2x the throughput of state-of-the-art ViT-L @ 512 and ViT-H @ 518 models on images and 2.2x the throughput of ViT-L on video with only a 0.2-0.3% accuracy drop in each case. ToMe can also easily be applied during training, improving in practice training speed up to 2x for MAE fine-tuning on video. Training with ToMe further minimizes accuracy drop, leading to 2x the throughput of ViT-B on audio for only a 0.4% mAP drop. Qualitatively, we find that ToMe merges object parts into one token, even over multiple frames of video. Overall, ToMe's accuracy and speed are competitive with state-of-the-art on images, video, and audio.", "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Daniel Bolya", "Cheng-Yang Fu", "Xiaoliang Dai", "Peizhao Zhang", "Christoph Feichtenhofer", "Judy Hoffman" ], "externalIds": { "ArXiv": "2210.09461", "DBLP": "conf/iclr/BolyaFDZFH23", "DOI": "10.48550/arXiv.2210.09461", "CorpusId": 252968113 }, "url": "https://www.semanticscholar.org/paper/1dff6b1b35e2d45d4db57c8b4e4395486c3e365f", "referenceCount": 59, "citationCount": 245, "influentialCitationCount": 61, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MaPLe: Multi-modal Prompt Learning", "abstract": "Pre-trained vision-language (V-L) models such as CLIP have shown excellent generalization ability to downstream tasks. However, they are sensitive to the choice of input text prompts and require careful selection of prompt templates to perform well. Inspired by the Natural Language Processing (NLP) literature, recent CLIP adaptation approaches learn prompts as the textual inputs to fine-tune CLIP for downstream tasks. We note that using prompting to adapt representations in a single branch of CLIP (language or vision) is sub-optimal since it does not allow the flexibility to dynamically adjust both representation spaces on a downstream task. In this work, we propose Multi-modal Prompt Learning (MaPLe) for both vision and language branches to improve alignment between the vision and language representations. Our design promotes strong coupling between the vision-language prompts to ensure mutual synergy and discourages learning independent uni-modal solutions. Further, we learn separate prompts across different early stages to progressively model the stage-wise feature relationships to allow rich context learning. We evaluate the effectiveness of our approach on three representative tasks of generalization to novel classes, new target datasets and unseen domain shifts. Compared with the state-of-the-art method Co-CoOp, MaPLe exhibits favorable performance and achieves an absolute gain of 3.45% on novel classes and 2.72% on overall harmonic-mean, averaged over 11 diverse image recognition datasets. Our code and pre-trained models are available at https://github.com/muzairkhattak/multimodal-prompt-learning.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Muhammad Uzair Khattak", "H. Rasheed", "Muhammad Maaz", "Salman H. Khan", "F. Khan" ], "externalIds": { "DBLP": "conf/cvpr/KhattakR0KK23", "ArXiv": "2210.03117", "DOI": "10.1109/CVPR52729.2023.01832", "CorpusId": 252735181 }, "url": "https://www.semanticscholar.org/paper/0d0dbfb1b315a43216020abaf74d289456198219", "referenceCount": 53, "citationCount": 309, "influentialCitationCount": 75, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Visual Prompt Tuning", "abstract": "The current modus operandi in adapting pre-trained models involves updating all the backbone parameters, ie, full fine-tuning. This paper introduces Visual Prompt Tuning (VPT) as an efficient and effective alternative to full fine-tuning for large-scale Transformer models in vision. Taking inspiration from recent advances in efficiently tuning large language models, VPT introduces only a small amount (less than 1% of model parameters) of trainable parameters in the input space while keeping the model backbone frozen. Via extensive experiments on a wide variety of downstream recognition tasks, we show that VPT achieves significant performance gains compared to other parameter efficient tuning protocols. Most importantly, VPT even outperforms full fine-tuning in many cases across model capacities and training data scales, while reducing per-task storage cost.", "year": 2022, "venue": "European Conference on Computer Vision", "authors": [ "Menglin Jia", "Luming Tang", "Bor-Chun Chen", "Claire Cardie", "Serge J. Belongie", "Bharath Hariharan", "S. Lim" ], "externalIds": { "DBLP": "journals/corr/abs-2203-12119", "ArXiv": "2203.12119", "DOI": "10.48550/arXiv.2203.12119", "CorpusId": 247618727 }, "url": "https://www.semanticscholar.org/paper/adb272fbdea3631059cf88ab764bb6c2ce29f965", "referenceCount": 98, "citationCount": 1034, "influentialCitationCount": 237, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Conditional Prompt Learning for Vision-Language Models", "abstract": "With the rise of powerful pre-trained vision-language models like CLIP, it becomes essential to investigate ways to adapt these models to downstream datasets. A recently proposed method named Context Optimization (CoOp) introduces the concept of prompt learning—a recent trend in NLP—to the vision domain for adapting pre-trained vision-language models. Specifically, CoOp turns context words in a prompt into a set of learnable vectors and, with only a few labeled images for learning, can achieve huge improvements over intensively-tuned manual prompts. In our study we identify a critical problem of CoOp: the learned context is not generalizable to wider unseen classes within the same dataset, suggesting that CoOp overfits base classes observed during training. To address the problem, we propose Conditional Context Optimization (CoCoOp), which extends CoOp by further learning a lightweight neural network to generate for each image an input-conditional token (vector). Compared to CoOp's static prompts, our dynamic prompts adapt to each instance and are thus less sensitive to class shift. Extensive experiments show that CoCoOp generalizes much better than CoOp to unseen classes, even showing promising transferability beyond a single dataset; and yields stronger domain generalization performance as well. Code is available at https://github.com/KaiyangZhou/CoOp.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Kaiyang Zhou", "Jingkang Yang", "Chen Change Loy", "Ziwei Liu" ], "externalIds": { "DBLP": "journals/corr/abs-2203-05557", "ArXiv": "2203.05557", "DOI": "10.1109/CVPR52688.2022.01631", "CorpusId": 247363011 }, "url": "https://www.semanticscholar.org/paper/b879450f50a6113f44a5baf0bcd5b4331eeb7bbc", "referenceCount": 67, "citationCount": 892, "influentialCitationCount": 183, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "SPViT: Enabling Faster Vision Transformers via Latency-Aware Soft Token Pruning", "abstract": null, "year": 2021, "venue": "European Conference on Computer Vision", "authors": [ "Zhenglun Kong", "Peiyan Dong", "Xiaolong Ma", "Xin Meng", "Wei Niu", "Mengshu Sun", "Bin Ren", "Minghai Qin", "H. Tang", "Yanzhi Wang" ], "externalIds": { "DBLP": "conf/eccv/KongDMMNSSYRTQW22", "ArXiv": "2112.13890", "DOI": "10.1007/978-3-031-20083-0_37", "CorpusId": 245537400 }, "url": "https://www.semanticscholar.org/paper/722d71a19e4049b30a03d1028158881560432135", "referenceCount": 110, "citationCount": 90, "influentialCitationCount": 13, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A-ViT: Adaptive Tokens for Efficient Vision Transformer", "abstract": "We introduce A - ViT, a method that adaptively adjusts the inference cost of vision transformer (ViT) for images of different complexity. A - ViT achieves this by automatically reducing the number of tokens in vision transformers that are processed in the network as inference proceeds. We refor-mulate Adaptive Computation Time (ACT [17]) for this task, extending halting to discard redundant spatial tokens. The appealing architectural properties of vision transformers enables our adaptive token reduction mechanism to speed up inference without modifying the network architecture or inference hardware. We demonstrate that A - ViT requires no extra parameters or sub-network for halting, as we base the learning of adaptive halting on the original network parameters. We further introduce distributional prior regularization that stabilizes training compared to prior ACT approaches. On the image classification task (ImageNet1K), we show that our proposed A - ViT yields high efficacy in filtering informative spatial features and cutting down on the overall compute. The proposed method improves the throughput of DeiT-Tiny by 62% and DeiT-Small by 38% with only 0.3% accuracy drop, outperforming prior art by a large margin.", "year": 2021, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Hongxu Yin", "Arash Vahdat", "J. Álvarez", "Arun Mallya", "J. Kautz", "Pavlo Molchanov" ], "externalIds": { "ArXiv": "2112.07658", "DBLP": "conf/cvpr/YinVAMKM22", "DOI": "10.1109/CVPR52688.2022.01054", "CorpusId": 245131572 }, "url": "https://www.semanticscholar.org/paper/c2a0c18e810535db52e5ebaf180c64ce70356748", "referenceCount": 56, "citationCount": 230, "influentialCitationCount": 25, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "AdaViT: Adaptive Vision Transformers for Efficient Image Recognition", "abstract": "Built on top of self-attention mechanisms, vision transformers have demonstrated remarkable performance on a variety of tasks recently. While achieving excellent performance, they still require relatively intensive computational cost that scales up drastically as the numbers of patches, self-attention heads and transformer blocks increase. In this paper, we argue that due to the large variations among images, their need for modeling long-range dependencies between patches differ. To this end, we introduce AdaViT, an adaptive computation framework that learns to derive usage policies on which patches, self-attention heads and transformer blocks to use throughout the backbone on a per-input basis, aiming to improve inference efficiency of vision transformers with a minimal drop of accuracy for image recognition. Optimized jointly with a transformer backbone in an end-to-end manner, a light-weight decision network is attached to the backbone to produce decisions on-the-fly. Extensive experiments on ImageNet demonstrate that our method obtains more than 2 × improvement on efficiency compared to state-of-the-art vision transformers with only 0.8% drop of accuracy, achieving good efficiency/accuracy trade-offs conditioned on different computational budgets. We further conduct quantitative and qualitative analysis on learned usage polices and provide more insights on the redundancy in vision transformers. Code is available at ht tps: / / gi thub. com/MengLcool/AdaVi T.", "year": 2021, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Lingchen Meng", "Hengduo Li", "Bor-Chun Chen", "Shiyi Lan", "Zuxuan Wu", "Yu-Gang Jiang", "S. Lim" ], "externalIds": { "ArXiv": "2111.15668", "DBLP": "journals/corr/abs-2111-15668", "DOI": "10.1109/CVPR52688.2022.01199", "CorpusId": 244729636 }, "url": "https://www.semanticscholar.org/paper/38212997a6e8c55141574c329bb58d2eadcb0db5", "referenceCount": 61, "citationCount": 159, "influentialCitationCount": 12, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Adaptive Token Sampling for Efficient Vision Transformers", "abstract": null, "year": 2021, "venue": "European Conference on Computer Vision", "authors": [ "Mohsen Fayyaz", "Soroush Abbasi Koohpayegani", "F. Jafari", "Sunando Sengupta", "Hamid Reza Vaezi Joze", "Eric Sommerlade", "H. Pirsiavash", "Juergen Gall" ], "externalIds": { "ArXiv": "2111.15667", "DBLP": "conf/eccv/FayyazKJSJSPG22", "DOI": "10.1007/978-3-031-20083-0_24", "CorpusId": 251067151 }, "url": "https://www.semanticscholar.org/paper/8144ca1f78c045cb001815090bcf8a726e37e0ad", "referenceCount": 73, "citationCount": 101, "influentialCitationCount": 12, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning to Prompt for Vision-Language Models", "abstract": null, "year": 2021, "venue": "International Journal of Computer Vision", "authors": [ "Kaiyang Zhou", "Jingkang Yang", "Chen Change Loy", "Ziwei Liu" ], "externalIds": { "ArXiv": "2109.01134", "DBLP": "journals/ijcv/ZhouYLL22", "DOI": "10.1007/s11263-022-01653-1", "CorpusId": 237386023 }, "url": "https://www.semanticscholar.org/paper/96ea07447d2f9adefe03852a878517a2a6d45b96", "referenceCount": 61, "citationCount": 1486, "influentialCitationCount": 303, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Evo-ViT: Slow-Fast Token Evolution for Dynamic Vision Transformer", "abstract": "Vision transformers (ViTs) have recently received explosive popularity, but the huge computational cost is still a severe issue. Since the computation complexity of ViT is quadratic with respect to the input sequence length, a mainstream paradigm for computation reduction is to reduce the number of tokens. Existing designs include structured spatial compression that uses a progressive shrinking pyramid to reduce the computations of large feature maps, and unstructured token pruning that dynamically drops redundant tokens. However, the limitation of existing token pruning lies in two folds: 1) the incomplete spatial structure caused by pruning is not compatible with structured spatial compression that is commonly used in modern deep-narrow transformers; 2) it usually requires a time-consuming pre-training procedure. To tackle the limitations and expand the applicable scenario of token pruning, we present Evo-ViT, a self-motivated slow-fast token evolution approach for vision transformers. Specifically, we conduct unstructured instance-wise token selection by taking advantage of the simple and effective global class attention that is native to vision transformers. Then, we propose to update the selected informative tokens and uninformative tokens with different computation paths, namely, slow-fast updating. Since slow-fast updating mechanism maintains the spatial structure and information flow, Evo-ViT can accelerate vanilla transformers of both flat and deep-narrow structures from the very beginning of the training process. Experimental results demonstrate that our method significantly reduces the computational cost of vision transformers while maintaining comparable performance on image classification. For example, our method accelerates DeiT-S by over 60% throughput while only sacrificing 0.4% top-1 accuracy on ImageNet-1K, outperforming current token pruning methods on both accuracy and efficiency.", "year": 2021, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Yifan Xu", "Zhijie Zhang", "Mengdan Zhang", "Kekai Sheng", "Ke Li", "Weiming Dong", "Liqing Zhang", "Changsheng Xu", "Xing Sun" ], "externalIds": { "ArXiv": "2108.01390", "DBLP": "conf/aaai/XuZZSLDZXS22", "DOI": "10.1609/aaai.v36i3.20202", "CorpusId": 236881638 }, "url": "https://www.semanticscholar.org/paper/d045133e6e022684329ff944d67f91888be1bc3b", "referenceCount": 42, "citationCount": 144, "influentialCitationCount": 19, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "IA-RED2: Interpretability-Aware Redundancy Reduction for Vision Transformers", "abstract": "The self-attention-based model, transformer, is recently becoming the leading backbone in the field of computer vision. In spite of the impressive success made by transformers in a variety of vision tasks, it still suffers from heavy computation and intensive memory costs. To address this limitation, this paper presents an Interpretability-Aware REDundancy REDuction framework (IA-RED$^2$). We start by observing a large amount of redundant computation, mainly spent on uncorrelated input patches, and then introduce an interpretable module to dynamically and gracefully drop these redundant patches. This novel framework is then extended to a hierarchical structure, where uncorrelated tokens at different stages are gradually removed, resulting in a considerable shrinkage of computational cost. We include extensive experiments on both image and video tasks, where our method could deliver up to 1.4x speed-up for state-of-the-art models like DeiT and TimeSformer, by only sacrificing less than 0.7% accuracy. More importantly, contrary to other acceleration approaches, our method is inherently interpretable with substantial visual evidence, making vision transformer closer to a more human-understandable architecture while being lighter. We demonstrate that the interpretability that naturally emerged in our framework can outperform the raw attention learned by the original visual transformer, as well as those generated by off-the-shelf interpretation methods, with both qualitative and quantitative results. Project Page: http://people.csail.mit.edu/bpan/ia-red/.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Bowen Pan", "Yifan Jiang", "Rameswar Panda", "Zhangyang Wang", "R. Feris", "A. Oliva" ], "externalIds": { "DBLP": "conf/nips/PanPJWFO21", "ArXiv": "2106.12620", "CorpusId": 235623729 }, "url": "https://www.semanticscholar.org/paper/e2f2662f0734e2edc2b4b36a734de111c7f8d54d", "referenceCount": 69, "citationCount": 119, "influentialCitationCount": 21, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Patch Slimming for Efficient Vision Transformers", "abstract": "This paper studies the efficiency problem for visual transformers by excavating redundant calculation in given networks. The recent transformer architecture has demonstrated its effectiveness for achieving excellent performance on a series of computer vision tasks. However, similar to that of convolutional neural networks, the huge computational cost of vision transformers is still a severe issue. Considering that the attention mechanism aggregates different patches layer-by-layer, we present a novel patch slimming approach that discards useless patches in a topdown paradigm. We first identify the effective patches in the last layer and then use them to guide the patch selection process of previous layers. For each layer, the impact of a patch on the final output feature is approximated and patches with less impacts will be removed. Experimental results on benchmark datasets demonstrate that the proposed method can significantly reduce the computational costs of vision transformers without affecting their performances. For example, over 45% FLOPs of the ViT-Ti model can be reduced with only 0.2% top-1 accuracy drop on the ImageNet dataset.", "year": 2021, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Yehui Tang", "Kai Han", "Yunhe Wang", "Chang Xu", "Jianyuan Guo", "Chao Xu", "D. Tao" ], "externalIds": { "ArXiv": "2106.02852", "DBLP": "journals/corr/abs-2106-02852", "DOI": "10.1109/CVPR52688.2022.01185", "CorpusId": 235358476 }, "url": "https://www.semanticscholar.org/paper/33fd56e5067a1e8a9713378af3e1c1c08d5ce93b", "referenceCount": 49, "citationCount": 134, "influentialCitationCount": 17, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DynamicViT: Efficient Vision Transformers with Dynamic Token Sparsification", "abstract": "Attention is sparse in vision transformers. We observe the final prediction in vision transformers is only based on a subset of most informative tokens, which is sufficient for accurate image recognition. Based on this observation, we propose a dynamic token sparsification framework to prune redundant tokens progressively and dynamically based on the input. Specifically, we devise a lightweight prediction module to estimate the importance score of each token given the current features. The module is added to different layers to prune redundant tokens hierarchically. To optimize the prediction module in an end-to-end manner, we propose an attention masking strategy to differentiably prune a token by blocking its interactions with other tokens. Benefiting from the nature of self-attention, the unstructured sparse tokens are still hardware friendly, which makes our framework easy to achieve actual speed-up. By hierarchically pruning 66% of the input tokens, our method greatly reduces 31%~37% FLOPs and improves the throughput by over 40% while the drop of accuracy is within 0.5% for various vision transformers. Equipped with the dynamic token sparsification framework, DynamicViT models can achieve very competitive complexity/accuracy trade-offs compared to state-of-the-art CNNs and vision transformers on ImageNet. Code is available at https://github.com/raoyongming/DynamicViT", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Yongming Rao", "Wenliang Zhao", "Benlin Liu", "Jiwen Lu", "Jie Zhou", "Cho-Jui Hsieh" ], "externalIds": { "DBLP": "conf/nips/RaoZLLZH21", "ArXiv": "2106.02034", "CorpusId": 235313562 }, "url": "https://www.semanticscholar.org/paper/dbdcabd0444ad50b68ee09e30f39b66e9068f5d2", "referenceCount": 39, "citationCount": 508, "influentialCitationCount": 101, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MLP-Mixer: An all-MLP Architecture for Vision", "abstract": "Convolutional Neural Networks (CNNs) are the go-to model for computer vision. Recently, attention-based networks, such as the Vision Transformer, have also become popular. In this paper we show that while convolutions and attention are both sufficient for good performance, neither of them are necessary. We present MLP-Mixer, an architecture based exclusively on multi-layer perceptrons (MLPs). MLP-Mixer contains two types of layers: one with MLPs applied independently to image patches (i.e.\"mixing\"the per-location features), and one with MLPs applied across patches (i.e.\"mixing\"spatial information). When trained on large datasets, or with modern regularization schemes, MLP-Mixer attains competitive scores on image classification benchmarks, with pre-training and inference cost comparable to state-of-the-art models. We hope that these results spark further research beyond the realms of well established CNNs and Transformers.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "I. Tolstikhin", "N. Houlsby", "Alexander Kolesnikov", "Lucas Beyer", "Xiaohua Zhai", "Thomas Unterthiner", "Jessica Yung", "Daniel Keysers", "Jakob Uszkoreit", "Mario Lucic", "Alexey Dosovitskiy" ], "externalIds": { "DBLP": "conf/nips/TolstikhinHKBZU21", "ArXiv": "2105.01601", "CorpusId": 233714958 }, "url": "https://www.semanticscholar.org/paper/67571d29190faea9fbd104acd16274f8c4edf254", "referenceCount": 65, "citationCount": 2146, "influentialCitationCount": 321, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The Power of Scale for Parameter-Efficient Prompt Tuning", "abstract": "In this work, we explore “prompt tuning,” a simple yet effective mechanism for learning “soft prompts” to condition frozen language models to perform specific downstream tasks. Unlike the discrete text prompts used by GPT-3, soft prompts are learned through backpropagation and can be tuned to incorporate signals from any number of labeled examples. Our end-to-end learned approach outperforms GPT-3’s few-shot learning by a large margin. More remarkably, through ablations on model size using T5, we show that prompt tuning becomes more competitive with scale: as models exceed billions of parameters, our method “closes the gap” and matches the strong performance of model tuning (where all model weights are tuned). This finding is especially relevant because large models are costly to share and serve and the ability to reuse one frozen model for multiple downstream tasks can ease this burden. Our method can be seen as a simplification of the recently proposed “prefix tuning” of Li and Liang (2021) and we provide a comparison to this and other similar approaches. Finally, we show that conditioning a frozen model with soft prompts confers benefits in robustness to domain transfer and enables efficient “prompt ensembling.” We release code and model checkpoints to reproduce our experiments.", "year": 2021, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Brian Lester", "Rami Al-Rfou", "Noah Constant" ], "externalIds": { "DBLP": "journals/corr/abs-2104-08691", "ArXiv": "2104.08691", "ACL": "2021.emnlp-main.243", "DOI": "10.18653/v1/2021.emnlp-main.243", "CorpusId": 233296808 }, "url": "https://www.semanticscholar.org/paper/ffdbd7f0b03b85747b001b4734d5ee31b5229aa4", "referenceCount": 61, "citationCount": 3011, "influentialCitationCount": 358, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning Transferable Visual Models From Natural Language Supervision", "abstract": "State-of-the-art computer vision systems are trained to predict a fixed set of predetermined object categories. This restricted form of supervision limits their generality and usability since additional labeled data is needed to specify any other visual concept. Learning directly from raw text about images is a promising alternative which leverages a much broader source of supervision. We demonstrate that the simple pre-training task of predicting which caption goes with which image is an efficient and scalable way to learn SOTA image representations from scratch on a dataset of 400 million (image, text) pairs collected from the internet. After pre-training, natural language is used to reference learned visual concepts (or describe new ones) enabling zero-shot transfer of the model to downstream tasks. We study the performance of this approach by benchmarking on over 30 different existing computer vision datasets, spanning tasks such as OCR, action recognition in videos, geo-localization, and many types of fine-grained object classification. The model transfers non-trivially to most tasks and is often competitive with a fully supervised baseline without the need for any dataset specific training. For instance, we match the accuracy of the original ResNet-50 on ImageNet zero-shot without needing to use any of the 1.28 million training examples it was trained on. We release our code and pre-trained model weights at https://github.com/OpenAI/CLIP.", "year": 2021, "venue": "International Conference on Machine Learning", "authors": [ "Alec Radford", "Jong Wook Kim", "Chris Hallacy", "A. Ramesh", "Gabriel Goh", "Sandhini Agarwal", "Girish Sastry", "Amanda Askell", "Pamela Mishkin", "Jack Clark", "Gretchen Krueger", "I. Sutskever" ], "externalIds": { "ArXiv": "2103.00020", "DBLP": "conf/icml/RadfordKHRGASAM21", "CorpusId": 231591445 }, "url": "https://www.semanticscholar.org/paper/6f870f7f02a8c59c3e23f407f3ef00dd1dcf8fc4", "referenceCount": 220, "citationCount": 18886, "influentialCitationCount": 5013, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale", "abstract": "While the Transformer architecture has become the de-facto standard for natural language processing tasks, its applications to computer vision remain limited. In vision, attention is either applied in conjunction with convolutional networks, or used to replace certain components of convolutional networks while keeping their overall structure in place. We show that this reliance on CNNs is not necessary and a pure transformer applied directly to sequences of image patches can perform very well on image classification tasks. When pre-trained on large amounts of data and transferred to multiple mid-sized or small image recognition benchmarks (ImageNet, CIFAR-100, VTAB, etc.), Vision Transformer (ViT) attains excellent results compared to state-of-the-art convolutional networks while requiring substantially fewer computational resources to train.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Alexey Dosovitskiy", "Lucas Beyer", "Alexander Kolesnikov", "Dirk Weissenborn", "Xiaohua Zhai", "Thomas Unterthiner", "Mostafa Dehghani", "Matthias Minderer", "G. Heigold", "S. Gelly", "Jakob Uszkoreit", "N. Houlsby" ], "externalIds": { "MAG": "3094502228", "ArXiv": "2010.11929", "DBLP": "conf/iclr/DosovitskiyB0WZ21", "CorpusId": 225039882 }, "url": "https://www.semanticscholar.org/paper/268d347e8a55b5eb82fb5e7d2f800e33c75ab18a", "referenceCount": 65, "citationCount": 28233, "influentialCitationCount": 4121, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Language Models are Few-Shot Learners", "abstract": "Recent work has demonstrated substantial gains on many NLP tasks and benchmarks by pre-training on a large corpus of text followed by fine-tuning on a specific task. While typically task-agnostic in architecture, this method still requires task-specific fine-tuning datasets of thousands or tens of thousands of examples. By contrast, humans can generally perform a new language task from only a few examples or from simple instructions - something which current NLP systems still largely struggle to do. Here we show that scaling up language models greatly improves task-agnostic, few-shot performance, sometimes even reaching competitiveness with prior state-of-the-art fine-tuning approaches. Specifically, we train GPT-3, an autoregressive language model with 175 billion parameters, 10x more than any previous non-sparse language model, and test its performance in the few-shot setting. For all tasks, GPT-3 is applied without any gradient updates or fine-tuning, with tasks and few-shot demonstrations specified purely via text interaction with the model. GPT-3 achieves strong performance on many NLP datasets, including translation, question-answering, and cloze tasks, as well as several tasks that require on-the-fly reasoning or domain adaptation, such as unscrambling words, using a novel word in a sentence, or performing 3-digit arithmetic. At the same time, we also identify some datasets where GPT-3's few-shot learning still struggles, as well as some datasets where GPT-3 faces methodological issues related to training on large web corpora. Finally, we find that GPT-3 can generate samples of news articles which human evaluators have difficulty distinguishing from articles written by humans. We discuss broader societal impacts of this finding and of GPT-3 in general.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Tom B. Brown", "Benjamin Mann", "Nick Ryder", "Melanie Subbiah", "J. Kaplan", "Prafulla Dhariwal", "Arvind Neelakantan", "Pranav Shyam", "Girish Sastry", "Amanda Askell", "Sandhini Agarwal", "Ariel Herbert-Voss", "Gretchen Krueger", "T. Henighan", "R. Child", "A. Ramesh", "Daniel M. Ziegler", "Jeff Wu", "Clemens Winter", "Christopher Hesse", "Mark Chen", "Eric Sigler", "Ma-teusz Litwin", "Scott Gray", "B. Chess", "Jack Clark", "Christopher Berner", "Sam McCandlish", "Alec Radford", "I. Sutskever", "Dario Amodei" ], "externalIds": { "ArXiv": "2005.14165", "DBLP": "conf/nips/BrownMRSKDNSSAA20", "MAG": "3030163527", "CorpusId": 218971783 }, "url": "https://www.semanticscholar.org/paper/90abbc2cf38462b954ae1b772fac9532e2ccd8b0", "referenceCount": 146, "citationCount": 30854, "influentialCitationCount": 3529, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Food-101 - Mining Discriminative Components with Random Forests", "abstract": null, "year": 2014, "venue": "European Conference on Computer Vision", "authors": [ "Lukas Bossard", "M. Guillaumin", "L. Gool" ], "externalIds": { "MAG": "12634471", "DBLP": "conf/eccv/BossardGG14", "DOI": "10.1007/978-3-319-10599-4_29", "CorpusId": 12726540 }, "url": "https://www.semanticscholar.org/paper/8e3f12804882b60ad5f59aad92755c5edb34860e", "referenceCount": 41, "citationCount": 1908, "influentialCitationCount": 260, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Describing Textures in the Wild", "abstract": "Patterns and textures are key characteristics of many natural objects: a shirt can be striped, the wings of a butterfly can be veined, and the skin of an animal can be scaly. Aiming at supporting this dimension in image understanding, we address the problem of describing textures with semantic attributes. We identify a vocabulary of forty-seven texture terms and use them to describe a large dataset of patterns collected \"in the wild\". The resulting Describable Textures Dataset (DTD) is a basis to seek the best representation for recognizing describable texture attributes in images. We port from object recognition to texture recognition the Improved Fisher Vector (IFV) and Deep Convolutional-network Activation Features (DeCAF), and show that surprisingly, they both outperform specialized texture descriptors not only on our problem, but also in established material recognition datasets. We also show that our describable attributes are excellent texture descriptors, transferring between datasets and tasks, in particular, combined with IFV and DeCAF, they significantly outperform the state-of-the-art by more than 10% on both FMD and KTH-TIPS-2b benchmarks. We also demonstrate that they produce intuitive descriptions of materials and Internet images.", "year": 2013, "venue": "2014 IEEE Conference on Computer Vision and Pattern Recognition", "authors": [ "Mircea Cimpoi", "Subhransu Maji", "Iasonas Kokkinos", "S. Mohamed", "A. Vedaldi" ], "externalIds": { "MAG": "2918062363", "ArXiv": "1311.3618", "DBLP": "journals/corr/CimpoiMKMV13", "DOI": "10.1109/CVPR.2014.461", "CorpusId": 4309276 }, "url": "https://www.semanticscholar.org/paper/18c125ce0f64e85577f7d30132cf0e92ec664bf4", "referenceCount": 45, "citationCount": 2176, "influentialCitationCount": 321, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning to rank for recommender systems", "abstract": "Recommender system aim at providing a personalized list of items ranked according to the preferences of the user, as such ranking methods are at the core of many recommendation algorithms. The topic of this tutorial focuses on the cutting-edge algorithmic development in the area of recommender systems. This tutorial will provide an in depth picture of the progress of ranking models in the field, summarizing the strengths and weaknesses of existing methods, and discussing open issues that could be promising for future research in the community. A qualitative and quantitative comparison between different models will be provided while we will also highlight recent developments in the areas of Reinforcement Learning.", "year": 2013, "venue": "ACM Conference on Recommender Systems", "authors": [ "Alexandros Karatzoglou", "L. Baltrunas", "Yue Shi" ], "externalIds": { "MAG": "1969147614", "DBLP": "conf/recsys/KaratzoglouBS13", "DOI": "10.1145/2507157.2508063", "CorpusId": 17176456 }, "url": "https://www.semanticscholar.org/paper/d46a52c2f229efbd42f2c3d9aca301ecfb7ba2a5", "referenceCount": 13, "citationCount": 137, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Fine-Grained Visual Classification of Aircraft", "abstract": "This paper introduces FGVC-Aircraft, a new dataset containing 10,000 images of aircraft spanning 100 aircraft models, organised in a three-level hierarchy. At the finer level, differences between models are often subtle but always visually measurable, making visual recognition challenging but possible. A benchmark is obtained by defining corresponding classification tasks and evaluation protocols, and baseline results are presented. The construction of this dataset was made possible by the work of aircraft enthusiasts, a strategy that can extend to the study of number of other object classes. Compared to the domains usually considered in fine-grained visual classification (FGVC), for example animals, aircraft are rigid and hence less deformable. They, however, present other interesting modes of variation, including purpose, size, designation, structure, historical style, and branding.", "year": 2013, "venue": "arXiv.org", "authors": [ "Subhransu Maji", "Esa Rahtu", "Juho Kannala", "Matthew B. Blaschko", "A. Vedaldi" ], "externalIds": { "ArXiv": "1306.5151", "DBLP": "journals/corr/MajiRKBV13", "MAG": "1846799578", "CorpusId": 2118703 }, "url": "https://www.semanticscholar.org/paper/522d65a3db7431015aeaa201a7fc4450a57e40c3", "referenceCount": 5, "citationCount": 1850, "influentialCitationCount": 288, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "UCF101: A Dataset of 101 Human Actions Classes From Videos in The Wild", "abstract": "We introduce UCF101 which is currently the largest dataset of human actions. It consists of 101 action classes, over 13k clips and 27 hours of video data. The database consists of realistic user uploaded videos containing camera motion and cluttered background. Additionally, we provide baseline action recognition results on this new dataset using standard bag of words approach with overall performance of 44.5%. To the best of our knowledge, UCF101 is currently the most challenging dataset of actions due to its large number of classes, large number of clips and also unconstrained nature of such clips.", "year": 2012, "venue": "arXiv.org", "authors": [ "K. Soomro", "Amir Zamir", "M. Shah" ], "externalIds": { "MAG": "24089286", "ArXiv": "1212.0402", "DBLP": "journals/corr/abs-1212-0402", "CorpusId": 7197134 }, "url": "https://www.semanticscholar.org/paper/da9e411fcf740569b6b356f330a1d0fc077c8d7c", "referenceCount": 13, "citationCount": 5514, "influentialCitationCount": 1209, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Cats and dogs", "abstract": "We investigate the fine grained object categorization problem of determining the breed of animal from an image. To this end we introduce a new annotated dataset of pets covering 37 different breeds of cats and dogs. The visual problem is very challenging as these animals, particularly cats, are very deformable and there can be quite subtle differences between the breeds. We make a number of contributions: first, we introduce a model to classify a pet breed automatically from an image. The model combines shape, captured by a deformable part model detecting the pet face, and appearance, captured by a bag-of-words model that describes the pet fur. Fitting the model involves automatically segmenting the animal in the image. Second, we compare two classification approaches: a hierarchical one, in which a pet is first assigned to the cat or dog family and then to a breed, and a flat one, in which the breed is obtained directly. We also investigate a number of animal and image orientated spatial layouts. These models are very good: they beat all previously published results on the challenging ASIRRA test (cat vs dog discrimination). When applied to the task of discriminating the 37 different breeds of pets, the models obtain an average accuracy of about 59%, a very encouraging result considering the difficulty of the problem.", "year": 2012, "venue": "2012 IEEE Conference on Computer Vision and Pattern Recognition", "authors": [ "Omkar M. Parkhi", "A. Vedaldi", "Andrew Zisserman", "C. V. Jawahar" ], "externalIds": { "DBLP": "conf/cvpr/ParkhiVZJ12", "MAG": "1977295328", "DOI": "10.1109/CVPR.2012.6248092", "CorpusId": 383200 }, "url": "https://www.semanticscholar.org/paper/84b50ebe85f7a1721800125e7882fce8c45b5c5a", "referenceCount": 43, "citationCount": 1670, "influentialCitationCount": 178, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ImageNet: A large-scale hierarchical image database", "abstract": "The explosion of image data on the Internet has the potential to foster more sophisticated and robust models and algorithms to index, retrieve, organize and interact with images and multimedia data. But exactly how such data can be harnessed and organized remains a critical problem. We introduce here a new database called “ImageNet”, a large-scale ontology of images built upon the backbone of the WordNet structure. ImageNet aims to populate the majority of the 80,000 synsets of WordNet with an average of 500-1000 clean and full resolution images. This will result in tens of millions of annotated images organized by the semantic hierarchy of WordNet. This paper offers a detailed analysis of ImageNet in its current state: 12 subtrees with 5247 synsets and 3.2 million images in total. We show that ImageNet is much larger in scale and diversity and much more accurate than the current image datasets. Constructing such a large-scale database is a challenging task. We describe the data collection scheme with Amazon Mechanical Turk. Lastly, we illustrate the usefulness of ImageNet through three simple applications in object recognition, image classification and automatic object clustering. We hope that the scale, accuracy, diversity and hierarchical structure of ImageNet can offer unparalleled opportunities to researchers in the computer vision community and beyond.", "year": 2009, "venue": "2009 IEEE Conference on Computer Vision and Pattern Recognition", "authors": [ "Jia Deng", "Wei Dong", "R. Socher", "Li-Jia Li", "K. Li", "Li Fei-Fei" ], "externalIds": { "DBLP": "conf/cvpr/DengDSLL009", "MAG": "2108598243", "DOI": "10.1109/CVPR.2009.5206848", "CorpusId": 57246310 }, "url": "https://www.semanticscholar.org/paper/d2c733e34d48784a37d717fe43d9e93277a8c53e", "referenceCount": 27, "citationCount": 56678, "influentialCitationCount": 8947, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning to rank for information retrieval", "abstract": null, "year": 2009, "venue": "Annual International ACM SIGIR Conference on Research and Development in Information Retrieval", "authors": [ "Tie-Yan Liu" ], "externalIds": { "MAG": "1973435495", "DBLP": "journals/ftir/Liu09", "DOI": "10.1007/978-3-642-14267-3", "CorpusId": 28826624 }, "url": "https://www.semanticscholar.org/paper/5fc5c5a4e489e781de434567d946e6eb65c44f60", "referenceCount": 260, "citationCount": 3034, "influentialCitationCount": 261, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Automated Flower Classification over a Large Number of Classes", "abstract": "We investigate to what extent combinations of features can improve classification performance on a large dataset of similar classes. To this end we introduce a 103 class flower dataset. We compute four different features for the flowers, each describing different aspects, namely the local shape/texture, the shape of the boundary, the overall spatial distribution of petals, and the colour. We combine the features using a multiple kernel framework with a SVM classifier. The weights for each class are learnt using the method of Varma and Ray, which has achieved state of the art performance on other large dataset, such as Caltech 101/256. Our dataset has a similar challenge in the number of classes, but with the added difficulty of large between class similarity and small within class similarity. Results show that learning the optimum kernel combination of multiple features vastly improves the performance, from 55.1% for the best single feature to 72.8% for the combination of all features.", "year": 2008, "venue": "2008 Sixth Indian Conference on Computer Vision, Graphics & Image Processing", "authors": [ "M. Nilsback", "Andrew Zisserman" ], "externalIds": { "DBLP": "conf/icvgip/NilsbackZ08", "MAG": "2533598788", "DOI": "10.1109/ICVGIP.2008.47", "CorpusId": 15193013 }, "url": "https://www.semanticscholar.org/paper/02b28f3b71138a06e40dbd614abf8568420ae183", "referenceCount": 19, "citationCount": 3310, "influentialCitationCount": 391, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Learning Generative Visual Models from Few Training Examples: An Incremental Bayesian Approach Tested on 101 Object Categories", "abstract": null, "year": 2004, "venue": "2004 Conference on Computer Vision and Pattern Recognition Workshop", "authors": [ "Li Fei-Fei", "R. Fergus", "P. Perona" ], "externalIds": { "MAG": "2155904486", "DBLP": "conf/cvpr/LiFP04", "DOI": "10.1016/j.cviu.2005.09.012", "CorpusId": 2156851 }, "url": "https://www.semanticscholar.org/paper/ed9db7b20e019cdb1c7db8b7921221ee2d9f36e2", "referenceCount": 22, "citationCount": 4650, "influentialCitationCount": 445, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "EViT: Expediting Vision Transformers via Token Reorganizations", "abstract": null, "year": 2022, "venue": "International Conference on Learning Representations", "authors": [ "Youwei Liang", "Chongjian Ge", "Zhan Tong", "Yibing Song", "Jue Wang", "P. Xie" ], "externalIds": { "DBLP": "conf/iclr/LiangGTS0X22", "CorpusId": 251647803 }, "url": "https://www.semanticscholar.org/paper/293535c2b0ef674e1ed9a7ba227e37cca35e5e4b", "referenceCount": 0, "citationCount": 74, "influentialCitationCount": 23, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "P-Tuning: Prompt Tuning Can Be Comparable to Fine-tuning Across Scales and Tasks", "abstract": "Prompt tuning, which only tunes continuous prompts with a frozen language model, substantially reduces per-task storage and memory usage at training. However, in the context of NLU, prior work reveals that prompt tuning does not perform well for normal-sized pretrained models. We also find that existing methods of prompt tuning cannot handle hard sequence labeling tasks, indicating a lack of universality. We present a novel empirical finding that properly optimized prompt tuning can be universally effective across a wide range of model scales and NLU tasks. It matches the performance of finetuning while having only 0.1%-3% tuned parameters. Our method P-Tuning v2 is an implementation of Deep Prompt Tuning (CITATION) optimized and adapted for NLU. Given the universality and simplicity of P-Tuning v2, we believe it can serve as an alternative to finetuning and a strong baseline for future research.", "year": 2022, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Xiao Liu", "Kaixuan Ji", "Yicheng Fu", "W. Tam", "Zhengxiao Du", "Zhilin Yang", "Jie Tang" ], "externalIds": { "DBLP": "conf/acl/LiuJFTDY022", "ACL": "2022.acl-short.8", "DOI": "10.18653/v1/2022.acl-short.8", "CorpusId": 248780177 }, "url": "https://www.semanticscholar.org/paper/ec936b808e0fab9281c050ad4010cddec92c8cbe", "referenceCount": 38, "citationCount": 523, "influentialCitationCount": 50, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] } ] }, "PromptTA: Prompt-driven Text Adapter for Source-free Domain Generalization": { "paper_title": "PromptTA: Prompt-driven Text Adapter for Source-free Domain Generalization", "arxiv_id": "2409.14163v1", "keyword": "vision language model", "authors": [ "Haoran Zhang", "Shuanghao Bai", "Wanqi Zhou", "Jingwen Fu", "Badong Chen" ], "references": [ { "title": "CapS-Adapter: Caption-based MultiModal Adapter in Zero-Shot Classification", "abstract": "Recent advances in vision-language foundational models, such as CLIP, have demonstrated significant strides in zero-shot classification. However, the extensive parameterization of models like CLIP necessitates a resource-intensive fine-tuning process. In response, TIP-Adapter and SuS-X have introduced training-free methods aimed at bolstering the efficacy of downstream tasks. While these approaches incorporate support sets to maintain data distribution consistency between knowledge cache and test sets, they often fall short in terms of generalization on the test set, particularly when faced with test data exhibiting substantial distributional variations. In this work, we present CapS-Adapter, an innovative method that employs a caption-based support set, effectively harnessing both image and caption features to exceed existing state-of-the-art techniques in training-free scenarios. CapS-Adapter adeptly constructs support sets that closely mirror target distributions, utilizing instance-level distribution features extracted from multimodal large models. By leveraging CLIP's single and cross-modal strengths, CapS-Adapter enhances predictive accuracy through the use of multimodal support sets. Our method achieves outstanding zero-shot classification results across 19 benchmark datasets, improving accuracy by 2.19\\% over the previous leading method. Our contributions are substantiated through extensive validation on multiple benchmark datasets, demonstrating superior performance and robust generalization capabilities. Our code is made publicly available at https://github.com/WLuLi/CapS-Adapter.", "year": 2024, "venue": "arXiv.org", "authors": [ "Qijie Wang", "Guandu Liu", "Bin Wang" ], "externalIds": { "ArXiv": "2405.16591", "DBLP": "journals/corr/abs-2405-16591", "DOI": "10.48550/arXiv.2405.16591", "CorpusId": 270063567 }, "url": "https://www.semanticscholar.org/paper/30b00365e0e2c6994ad4dfc456e0d706aefde27d", "referenceCount": 33, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Soft Prompt Generation for Domain Generalization", "abstract": "Large pre-trained vision language models (VLMs) have shown impressive zero-shot ability on downstream tasks with manually designed prompt. To further adapt VLMs to downstream tasks, soft prompt is proposed to replace manually designed prompt, which undergoes fine-tuning based on specific domain data. Prior prompt learning methods primarily learn a fixed prompt or residuled prompt from training samples. However, the learned prompts lack diversity and ignore information about unseen domains. In this paper, we reframe the prompt learning framework from a generative perspective and propose a simple yet efficient method for the Domain Generalization (DG) task, namely Soft Prompt Generation (SPG). Specifically, SPG consists of a two-stage training phase and an inference phase. During the training phase, we introduce soft prompt label for each domain, aiming to incorporate the generative model domain knowledge. During the inference phase, the generator of the generative model is employed to obtain instance-specific soft prompts for the unseen target domain. Extensive experiments on five domain generalization benchmarks of three DG tasks demonstrate that SPG achieves state-of-the-art performance. The code is available at https://github.com/renytek13/Soft-Prompt-Generation-with-CGAN.", "year": 2024, "venue": "arXiv.org", "authors": [ "Shuanghao Bai", "Yuedi Zhang", "Wanqi Zhou", "Zhirong Luan", "Badong Chen" ], "externalIds": { "DBLP": "journals/corr/abs-2404-19286", "ArXiv": "2404.19286", "DOI": "10.48550/arXiv.2404.19286", "CorpusId": 269457455 }, "url": "https://www.semanticscholar.org/paper/c36cb4a41369369d837ea170397f7818d02150dd", "referenceCount": 39, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Meta-Adapter: An Online Few-shot Learner for Vision-Language Model", "abstract": "The contrastive vision-language pre-training, known as CLIP, demonstrates remarkable potential in perceiving open-world visual concepts, enabling effective zero-shot image recognition. Nevertheless, few-shot learning methods based on CLIP typically require offline fine-tuning of the parameters on few-shot samples, resulting in longer inference time and the risk of over-fitting in certain domains. To tackle these challenges, we propose the Meta-Adapter, a lightweight residual-style adapter, to refine the CLIP features guided by the few-shot samples in an online manner. With a few training samples, our method can enable effective few-shot learning capabilities and generalize to unseen data or tasks without additional fine-tuning, achieving competitive performance and high efficiency. Without bells and whistles, our approach outperforms the state-of-the-art online few-shot learning method by an average of 3.6\\% on eight image classification datasets with higher inference speed. Furthermore, our model is simple and flexible, serving as a plug-and-play module directly applicable to downstream tasks. Without further fine-tuning, Meta-Adapter obtains notable performance improvements in open-vocabulary object detection and segmentation tasks.", "year": 2023, "venue": "Neural Information Processing Systems", "authors": [ "Cheng Cheng", "Lin Song", "Ruoyi Xue", "Hang Wang", "Hongbin Sun", "Yixiao Ge", "Ying Shan" ], "externalIds": { "ArXiv": "2311.03774", "DBLP": "conf/nips/ChengSXW0GS23", "DOI": "10.48550/arXiv.2311.03774", "CorpusId": 265043127 }, "url": "https://www.semanticscholar.org/paper/41abe0e65be6505e113d39ff936e2ef031835f0a", "referenceCount": 70, "citationCount": 7, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Sentence Speaks a Thousand Images: Domain Generalization through Distilling CLIP with Language Guidance", "abstract": "Domain generalization studies the problem of training a model with samples from several domains (or distributions) and then testing the model with samples from a new, unseen domain. In this paper, we propose a novel approach for domain generalization that leverages recent advances in large vision-language models, specifically a CLIP teacher model, to train a smaller model that generalizes to unseen domains. The key technical contribution is a new type of regularization that requires the student’s learned image representations to be close to the teacher’s learned text representations obtained from encoding the corresponding text descriptions of images. We introduce two designs of the loss function, absolute and relative distance, which provide specific guidance on how the training process of the student model should be regularized. We evaluate our proposed method, dubbed RISE (Regularized Invariance with Semantic Embeddings), on various benchmark datasets, and show that it outperforms several state-of-the-art domain generalization methods. To our knowledge, our work is the first to leverage knowledge distillation using a large vision-language model for domain generalization. By incorporating text-based information, RISE improves the generalization capability of machine learning models.", "year": 2023, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Zeyi Huang", "Andy Zhou", "Zijian Lin", "Mu Cai", "Haohan Wang", "Yong Jae Lee" ], "externalIds": { "DBLP": "journals/corr/abs-2309-12530", "ArXiv": "2309.12530", "DOI": "10.1109/ICCV51070.2023.01073", "CorpusId": 262217079 }, "url": "https://www.semanticscholar.org/paper/c96f079177d68033d2f1c6a6d95b20bc0288038a", "referenceCount": 86, "citationCount": 14, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "PromptStyler: Prompt-driven Style Generation for Source-free Domain Generalization", "abstract": "In a joint vision-language space, a text feature (e.g., from \"a photo of a dog\") could effectively represent its relevant image features (e.g., from dog photos). Also, a recent study has demonstrated the cross-modal transferability phenomenon of this joint space. From these observations, we propose PromptStyler which simulates various distribution shifts in the joint space by synthesizing diverse styles via prompts without using any images to deal with source-free domain generalization. The proposed method learns to generate a variety of style features (from \"a S∗ style of a\") via learnable style word vectors for pseudo-words S∗. To ensure that learned styles do not distort content information, we force style-content features (from \"a S∗ style of a [class]\") to be located nearby their corresponding content features (from \"[class]\") in the joint vision-language space. After learning style word vectors, we train a linear classifier using synthesized style-content features. PromptStyler achieves the state of the art on PACS, VLCS, OfficeHome and DomainNet, even though it does not require any images for training.", "year": 2023, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Junhyeong Cho", "Gilhyun Nam", "Sungyeon Kim", "Hunmin Yang", "Suha Kwak" ], "externalIds": { "ArXiv": "2307.15199", "DBLP": "conf/iccv/ChoNKYK23", "DOI": "10.1109/ICCV51070.2023.01439", "CorpusId": 260316052 }, "url": "https://www.semanticscholar.org/paper/3580d855dfe151885a3515cd818eb300bb33fb37", "referenceCount": 76, "citationCount": 31, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "MaPLe: Multi-modal Prompt Learning", "abstract": "Pre-trained vision-language (V-L) models such as CLIP have shown excellent generalization ability to downstream tasks. However, they are sensitive to the choice of input text prompts and require careful selection of prompt templates to perform well. Inspired by the Natural Language Processing (NLP) literature, recent CLIP adaptation approaches learn prompts as the textual inputs to fine-tune CLIP for downstream tasks. We note that using prompting to adapt representations in a single branch of CLIP (language or vision) is sub-optimal since it does not allow the flexibility to dynamically adjust both representation spaces on a downstream task. In this work, we propose Multi-modal Prompt Learning (MaPLe) for both vision and language branches to improve alignment between the vision and language representations. Our design promotes strong coupling between the vision-language prompts to ensure mutual synergy and discourages learning independent uni-modal solutions. Further, we learn separate prompts across different early stages to progressively model the stage-wise feature relationships to allow rich context learning. We evaluate the effectiveness of our approach on three representative tasks of generalization to novel classes, new target datasets and unseen domain shifts. Compared with the state-of-the-art method Co-CoOp, MaPLe exhibits favorable performance and achieves an absolute gain of 3.45% on novel classes and 2.72% on overall harmonic-mean, averaged over 11 diverse image recognition datasets. Our code and pre-trained models are available at https://github.com/muzairkhattak/multimodal-prompt-learning.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Muhammad Uzair Khattak", "H. Rasheed", "Muhammad Maaz", "Salman H. Khan", "F. Khan" ], "externalIds": { "DBLP": "conf/cvpr/KhattakR0KK23", "ArXiv": "2210.03117", "DOI": "10.1109/CVPR52729.2023.01832", "CorpusId": 252735181 }, "url": "https://www.semanticscholar.org/paper/0d0dbfb1b315a43216020abaf74d289456198219", "referenceCount": 53, "citationCount": 309, "influentialCitationCount": 75, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Domain-Unified Prompt Representations for Source-Free Domain Generalization", "abstract": "Domain generalization (DG), aiming to make models work on unseen domains, is a surefire way toward general artificial intelligence. Limited by the scale and diversity of current DG datasets, it is difficult for existing methods to scale to diverse domains in open-world scenarios (e.g., science fiction and pixelate style). Therefore, the source-free domain generalization (SFDG) task is necessary and challenging. To address this issue, we propose an approach based on large-scale vision-language pretraining models (e.g., CLIP), which exploits the extensive domain information embedded in it. The proposed scheme generates diverse prompts from a domain bank that contains many more diverse domains than existing DG datasets. Furthermore, our method yields domain-unified representations from these prompts, thus being able to cope with samples from open-world domains. Extensive experiments on mainstream DG datasets, namely PACS, VLCS, OfficeHome, and DomainNet, show that the proposed method achieves competitive performance compared to state-of-the-art (SOTA) DG methods that require source domain data for training. Besides, we collect a small datasets consists of two domains to evaluate the open-world domain generalization ability of the proposed method. The source code and the dataset will be made publicly available at https://github.com/muse1998/Source-Free-Domain-Generalization", "year": 2022, "venue": "arXiv.org", "authors": [ "Hongjing Niu", "Hanting Li", "Feng Zhao", "Bin Li" ], "externalIds": { "ArXiv": "2209.14926", "DBLP": "journals/corr/abs-2209-14926", "DOI": "10.48550/arXiv.2209.14926", "CorpusId": 252596109 }, "url": "https://www.semanticscholar.org/paper/f1f54c6ed60fb2187d7372d302bff186249fec3a", "referenceCount": 59, "citationCount": 14, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Tip-Adapter: Training-free Adaption of CLIP for Few-shot Classification", "abstract": "Contrastive Vision-Language Pre-training, known as CLIP, has provided a new paradigm for learning visual representations using large-scale image-text pairs. It shows impressive performance on downstream tasks by zero-shot knowledge transfer. To further enhance CLIP's adaption capability, existing methods proposed to fine-tune additional learnable modules, which significantly improves the few-shot performance but introduces extra training time and computational resources. In this paper, we propose a training-free adaption method for CLIP to conduct few-shot classification, termed as Tip-Adapter, which not only inherits the training-free advantage of zero-shot CLIP but also performs comparably to those training-required approaches. Tip-Adapter constructs the adapter via a key-value cache model from the few-shot training set, and updates the prior knowledge encoded in CLIP by feature retrieval. On top of that, the performance of Tip-Adapter can be further boosted to be state-of-the-art on ImageNet by fine-tuning the cache model for 10$\\times$ fewer epochs than existing methods, which is both effective and efficient. We conduct extensive experiments of few-shot classification on 11 datasets to demonstrate the superiority of our proposed methods. Code is released at https://github.com/gaopengcuhk/Tip-Adapter.", "year": 2022, "venue": "European Conference on Computer Vision", "authors": [ "Renrui Zhang", "Zhang Wei", "Rongyao Fang", "Peng Gao", "Kunchang Li", "Jifeng Dai", "Y. Qiao", "Hongsheng Li" ], "externalIds": { "DBLP": "journals/corr/abs-2207-09519", "ArXiv": "2207.09519", "DOI": "10.48550/arXiv.2207.09519", "CorpusId": 250698940 }, "url": "https://www.semanticscholar.org/paper/a32887af7eb1fcfd3b3d892ad41f3516a37f11c1", "referenceCount": 79, "citationCount": 172, "influentialCitationCount": 45, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Domain Generalization by Mutual-Information Regularization with Pre-trained Models", "abstract": "Domain generalization (DG) aims to learn a generalized model to an unseen target domain using only limited source domains. Previous attempts to DG fail to learn domain-invariant representations only from the source domains due to the significant domain shifts between training and test domains. Instead, we re-formulate the DG objective using mutual information with the oracle model, a model generalized to any possible domain. We derive a tractable variational lower bound via approximating the oracle model by a pre-trained model, called Mutual Information Regularization with Oracle (MIRO). Our extensive experiments show that MIRO significantly improves the out-of-distribution performance. Furthermore, our scaling experiments show that the larger the scale of the pre-trained model, the greater the performance improvement of MIRO. Source code is available at https://github.com/kakaobrain/miro.", "year": 2022, "venue": "European Conference on Computer Vision", "authors": [ "Junbum Cha", "Kyungjae Lee", "Sungrae Park", "Sanghyuk Chun" ], "externalIds": { "DBLP": "journals/corr/abs-2203-10789", "ArXiv": "2203.10789", "DOI": "10.48550/arXiv.2203.10789", "CorpusId": 247594612 }, "url": "https://www.semanticscholar.org/paper/b9dc7f44768dcb18c4a3ad8dafa727defbb280ba", "referenceCount": 76, "citationCount": 106, "influentialCitationCount": 27, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Conditional Prompt Learning for Vision-Language Models", "abstract": "With the rise of powerful pre-trained vision-language models like CLIP, it becomes essential to investigate ways to adapt these models to downstream datasets. A recently proposed method named Context Optimization (CoOp) introduces the concept of prompt learning—a recent trend in NLP—to the vision domain for adapting pre-trained vision-language models. Specifically, CoOp turns context words in a prompt into a set of learnable vectors and, with only a few labeled images for learning, can achieve huge improvements over intensively-tuned manual prompts. In our study we identify a critical problem of CoOp: the learned context is not generalizable to wider unseen classes within the same dataset, suggesting that CoOp overfits base classes observed during training. To address the problem, we propose Conditional Context Optimization (CoCoOp), which extends CoOp by further learning a lightweight neural network to generate for each image an input-conditional token (vector). Compared to CoOp's static prompts, our dynamic prompts adapt to each instance and are thus less sensitive to class shift. Extensive experiments show that CoCoOp generalizes much better than CoOp to unseen classes, even showing promising transferability beyond a single dataset; and yields stronger domain generalization performance as well. Code is available at https://github.com/KaiyangZhou/CoOp.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Kaiyang Zhou", "Jingkang Yang", "Chen Change Loy", "Ziwei Liu" ], "externalIds": { "DBLP": "journals/corr/abs-2203-05557", "ArXiv": "2203.05557", "DOI": "10.1109/CVPR52688.2022.01631", "CorpusId": 247363011 }, "url": "https://www.semanticscholar.org/paper/b879450f50a6113f44a5baf0bcd5b4331eeb7bbc", "referenceCount": 67, "citationCount": 892, "influentialCitationCount": 183, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Domain-Adjusted Regression or: ERM May Already Learn Features Sufficient for Out-of-Distribution Generalization", "abstract": "A common explanation for the failure of deep networks to generalize out-of-distribution is that they fail to recover the “correct” features. We challenge this notion with a simple experiment which suggests that ERM already learns sufficient features and that the current bottleneck is not feature learning, but robust regression . Our findings also imply that given a small amount of data from the target distribution, retraining only the last linear layer will give excellent performance. We therefore argue that devising simpler methods for learning predictors on existing features is a promising direction for future research. Towards this end, we introduce Domain-Adjusted Regression (DARE), a convex objective for learning a linear predictor that is provably robust under a new model of distribution shift. Rather than learning one function, DARE performs a domain-specific adjustment to unify the domains in a canonical latent space and learns to predict in this space. Under a natural model, we prove that the DARE solution is the minimax-optimal predictor for a constrained set of test distributions. Further, we provide the first finite-environment convergence guarantee to the minimax risk, improving over existing analyses which only yield minimax predictors after an environment threshold. Evaluated on finetuned features, we find that DARE compares favorably to prior methods, consistently achieving equal or better performance.", "year": 2022, "venue": "arXiv.org", "authors": [ "Elan Rosenfeld", "Pradeep Ravikumar", "Andrej Risteski" ], "externalIds": { "DBLP": "journals/corr/abs-2202-06856", "ArXiv": "2202.06856", "CorpusId": 246823246 }, "url": "https://www.semanticscholar.org/paper/241f8a90da2a9ec13ca44be5b602585bde4f92b7", "referenceCount": 53, "citationCount": 68, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Optimal Representations for Covariate Shift", "abstract": "Machine learning systems often experience a distribution shift between training and testing. In this paper, we introduce a simple variational objective whose optima are exactly the set of all representations on which risk minimizers are guaranteed to be robust to any distribution shift that preserves the Bayes predictor, e.g., covariate shifts. Our objective has two components. First, a representation must remain discriminative for the task, i.e., some predictor must be able to simultaneously minimize the source and target risk. Second, the representation's marginal support needs to be the same across source and target. We make this practical by designing self-supervised objectives that only use unlabelled data and augmentations to train robust representations. Our objectives give insights into the robustness of CLIP, and further improve CLIP's representations to achieve SOTA results on DomainBed.", "year": 2021, "venue": "International Conference on Learning Representations", "authors": [ "Yangjun Ruan", "Yann Dubois", "Chris J. Maddison" ], "externalIds": { "ArXiv": "2201.00057", "DBLP": "journals/corr/abs-2201-00057", "CorpusId": 245650182 }, "url": "https://www.semanticscholar.org/paper/5382d9bc17aabfd47b7c7d9873d2b64fdde48305", "referenceCount": 73, "citationCount": 60, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "CLIP-Adapter: Better Vision-Language Models with Feature Adapters", "abstract": null, "year": 2021, "venue": "International Journal of Computer Vision", "authors": [ "Peng Gao", "Shijie Geng", "Renrui Zhang", "Teli Ma", "Rongyao Fang", "Yongfeng Zhang", "Hongsheng Li", "Y. Qiao" ], "externalIds": { "DBLP": "journals/corr/abs-2110-04544", "ArXiv": "2110.04544", "DOI": "10.1007/s11263-023-01891-x", "CorpusId": 238583492 }, "url": "https://www.semanticscholar.org/paper/c04067f03fba2df0c14ea51a170f213eb2983708", "referenceCount": 54, "citationCount": 640, "influentialCitationCount": 98, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning to Prompt for Vision-Language Models", "abstract": null, "year": 2021, "venue": "International Journal of Computer Vision", "authors": [ "Kaiyang Zhou", "Jingkang Yang", "Chen Change Loy", "Ziwei Liu" ], "externalIds": { "ArXiv": "2109.01134", "DBLP": "journals/ijcv/ZhouYLL22", "DOI": "10.1007/s11263-022-01653-1", "CorpusId": 237386023 }, "url": "https://www.semanticscholar.org/paper/96ea07447d2f9adefe03852a878517a2a6d45b96", "referenceCount": 61, "citationCount": 1486, "influentialCitationCount": 303, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Reducing Domain Gap by Reducing Style Bias", "abstract": "Convolutional Neural Networks (CNNs) often fail to maintain their performance when they confront new test domains, which is known as the problem of domain shift. Recent studies suggest that one of the main causes of this problem is CNNs’ strong inductive bias towards image styles (i.e. textures) which are sensitive to domain changes, rather than contents (i.e. shapes). Inspired by this, we propose to reduce the intrinsic style bias of CNNs to close the gap between domains. Our Style-Agnostic Networks (SagNets) disentangle style encodings from class categories to prevent style biased predictions and focus more on the contents. Extensive experiments show that our method effectively reduces the style bias and makes the model more robust under domain shift. It achieves remarkable performance improvements in a wide range of cross-domain tasks including domain generalization, unsupervised domain adaptation, and semi-supervised domain adaptation on multiple datasets.1", "year": 2021, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Hyeonseob Nam", "Hyunjae Lee", "Jongchan Park", "Wonjun Yoon", "Donggeun Yoo" ], "externalIds": { "DBLP": "conf/cvpr/NamLPYY21", "DOI": "10.1109/CVPR46437.2021.00858", "CorpusId": 235719382 }, "url": "https://www.semanticscholar.org/paper/94001e6bdf94fd61be3fba6ab9e6e77fbd888867", "referenceCount": 51, "citationCount": 294, "influentialCitationCount": 38, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Domain Generalization: A Survey", "abstract": "Generalization to out-of-distribution (OOD) data is a capability natural to humans yet challenging for machines to reproduce. This is because most learning algorithms strongly rely on the i.i.d. assumption on source/target data, which is often violated in practice due to domain shift. Domain generalization (DG) aims to achieve OOD generalization by using only source data for model learning. Over the last ten years, research in DG has made great progress, leading to a broad spectrum of methodologies, e.g., those based on domain alignment, meta-learning, data augmentation, or ensemble learning, to name a few; DG has also been studied in various application areas including computer vision, speech recognition, natural language processing, medical imaging, and reinforcement learning. In this paper, for the first time a comprehensive literature review in DG is provided to summarize the developments over the past decade. Specifically, we first cover the background by formally defining DG and relating it to other relevant fields like domain adaptation and transfer learning. Then, we conduct a thorough review into existing methods and theories. Finally, we conclude this survey with insights and discussions on future research directions.", "year": 2021, "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", "authors": [ "Kaiyang Zhou", "Ziwei Liu", "Y. Qiao", "T. Xiang", "Chen Change Loy" ], "externalIds": { "DBLP": "journals/pami/ZhouLQXL23", "ArXiv": "2103.02503", "DOI": "10.1109/TPAMI.2022.3195549", "CorpusId": 232104764, "PubMed": "35914036" }, "url": "https://www.semanticscholar.org/paper/b249fe4e5e2bada6655ce5d61e7f50da5d471cb4", "referenceCount": 309, "citationCount": 729, "influentialCitationCount": 41, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Learning Transferable Visual Models From Natural Language Supervision", "abstract": "State-of-the-art computer vision systems are trained to predict a fixed set of predetermined object categories. This restricted form of supervision limits their generality and usability since additional labeled data is needed to specify any other visual concept. Learning directly from raw text about images is a promising alternative which leverages a much broader source of supervision. We demonstrate that the simple pre-training task of predicting which caption goes with which image is an efficient and scalable way to learn SOTA image representations from scratch on a dataset of 400 million (image, text) pairs collected from the internet. After pre-training, natural language is used to reference learned visual concepts (or describe new ones) enabling zero-shot transfer of the model to downstream tasks. We study the performance of this approach by benchmarking on over 30 different existing computer vision datasets, spanning tasks such as OCR, action recognition in videos, geo-localization, and many types of fine-grained object classification. The model transfers non-trivially to most tasks and is often competitive with a fully supervised baseline without the need for any dataset specific training. For instance, we match the accuracy of the original ResNet-50 on ImageNet zero-shot without needing to use any of the 1.28 million training examples it was trained on. We release our code and pre-trained model weights at https://github.com/OpenAI/CLIP.", "year": 2021, "venue": "International Conference on Machine Learning", "authors": [ "Alec Radford", "Jong Wook Kim", "Chris Hallacy", "A. Ramesh", "Gabriel Goh", "Sandhini Agarwal", "Girish Sastry", "Amanda Askell", "Pamela Mishkin", "Jack Clark", "Gretchen Krueger", "I. Sutskever" ], "externalIds": { "ArXiv": "2103.00020", "DBLP": "conf/icml/RadfordKHRGASAM21", "CorpusId": 231591445 }, "url": "https://www.semanticscholar.org/paper/6f870f7f02a8c59c3e23f407f3ef00dd1dcf8fc4", "referenceCount": 220, "citationCount": 18886, "influentialCitationCount": 5013, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "SWAD: Domain Generalization by Seeking Flat Minima", "abstract": "Domain generalization (DG) methods aim to achieve generalizability to an unseen target domain by using only training data from the source domains. Although a variety of DG methods have been proposed, a recent study shows that under a fair evaluation protocol, called DomainBed, the simple empirical risk minimization (ERM) approach works comparable to or even outperforms previous methods. Unfortunately, simply solving ERM on a complex, non-convex loss function can easily lead to sub-optimal generalizability by seeking sharp minima. In this paper, we theoretically show that finding flat minima results in a smaller domain generalization gap. We also propose a simple yet effective method, named Stochastic Weight Averaging Densely (SWAD), to find flat minima. SWAD finds flatter minima and suffers less from overfitting than does the vanilla SWA by a dense and overfit-aware stochastic weight sampling strategy. SWAD shows state-of-the-art performances on five DG benchmarks, namely PACS, VLCS, OfficeHome, TerraIncognita, and DomainNet, with consistent and large margins of +1.6% averagely on out-of-domain accuracy. We also compare SWAD with conventional generalization methods, such as data augmentation and consistency regularization methods, to verify that the remarkable performance improvements are originated from by seeking flat minima, not from better in-domain generalizability. Last but not least, SWAD is readily adaptable to existing DG methods without modification; the combination of SWAD and an existing DG method further improves DG performances. Source code is available at https://github.com/khanrc/swad.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Junbum Cha", "Sanghyuk Chun", "Kyungjae Lee", "Han-Cheol Cho", "Seunghyun Park", "Yunsung Lee", "Sungrae Park" ], "externalIds": { "ArXiv": "2102.08604", "DBLP": "conf/nips/ChaCLCPLP21", "CorpusId": 235367622 }, "url": "https://www.semanticscholar.org/paper/4d87a9f6a0bc9c67088193402813da5cba3f06c1", "referenceCount": 73, "citationCount": 340, "influentialCitationCount": 66, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale", "abstract": "While the Transformer architecture has become the de-facto standard for natural language processing tasks, its applications to computer vision remain limited. In vision, attention is either applied in conjunction with convolutional networks, or used to replace certain components of convolutional networks while keeping their overall structure in place. We show that this reliance on CNNs is not necessary and a pure transformer applied directly to sequences of image patches can perform very well on image classification tasks. When pre-trained on large amounts of data and transferred to multiple mid-sized or small image recognition benchmarks (ImageNet, CIFAR-100, VTAB, etc.), Vision Transformer (ViT) attains excellent results compared to state-of-the-art convolutional networks while requiring substantially fewer computational resources to train.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Alexey Dosovitskiy", "Lucas Beyer", "Alexander Kolesnikov", "Dirk Weissenborn", "Xiaohua Zhai", "Thomas Unterthiner", "Mostafa Dehghani", "Matthias Minderer", "G. Heigold", "S. Gelly", "Jakob Uszkoreit", "N. Houlsby" ], "externalIds": { "MAG": "3094502228", "ArXiv": "2010.11929", "DBLP": "conf/iclr/DosovitskiyB0WZ21", "CorpusId": 225039882 }, "url": "https://www.semanticscholar.org/paper/268d347e8a55b5eb82fb5e7d2f800e33c75ab18a", "referenceCount": 65, "citationCount": 28233, "influentialCitationCount": 4121, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Self-Challenging Improves Cross-Domain Generalization", "abstract": null, "year": 2020, "venue": "European Conference on Computer Vision", "authors": [ "Zeyi Huang", "Haohan Wang", "E. Xing", "Dong Huang" ], "externalIds": { "DBLP": "journals/corr/abs-2007-02454", "ArXiv": "2007.02454", "MAG": "3106845355", "DOI": "10.1007/978-3-030-58536-5_8", "CorpusId": 220363892 }, "url": "https://www.semanticscholar.org/paper/09472ff0d3c3f975ef1fdc02cfb1605d3d4275fa", "referenceCount": 39, "citationCount": 523, "influentialCitationCount": 90, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "In Search of Lost Domain Generalization", "abstract": "The goal of domain generalization algorithms is to predict well on distributions different from those seen during training. While a myriad of domain generalization algorithms exist, inconsistencies in experimental conditions -- datasets, architectures, and model selection criteria -- render fair and realistic comparisons difficult. In this paper, we are interested in understanding how useful domain generalization algorithms are in realistic settings. As a first step, we realize that model selection is non-trivial for domain generalization tasks. Contrary to prior work, we argue that domain generalization algorithms without a model selection strategy should be regarded as incomplete. Next, we implement DomainBed, a testbed for domain generalization including seven multi-domain datasets, nine baseline algorithms, and three model selection criteria. We conduct extensive experiments using DomainBed and find that, when carefully implemented, empirical risk minimization shows state-of-the-art performance across all datasets. Looking forward, we hope that the release of DomainBed, along with contributions from fellow researchers, will streamline reproducible and rigorous research in domain generalization.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Ishaan Gulrajani", "David Lopez-Paz" ], "externalIds": { "DBLP": "journals/corr/abs-2007-01434", "MAG": "3039431315", "ArXiv": "2007.01434", "CorpusId": 220347682 }, "url": "https://www.semanticscholar.org/paper/6a5efb990b6558c21d9fdded4884c00ba152cb7c", "referenceCount": 135, "citationCount": 956, "influentialCitationCount": 259, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Towards Universal Representation Learning for Deep Face Recognition", "abstract": "Recognizing wild faces is extremely hard as they appear with all kinds of variations. Traditional methods either train with specifically annotated variation data from target domains, or by introducing unlabeled target variation data to adapt from the training data. Instead, we propose a universal representation learning framework that can deal with larger variation unseen in the given training data without leveraging target domain knowledge. We firstly synthesize training data alongside some semantically meaningful variations, such as low resolution, occlusion and head pose. However, directly feeding the augmented data for training will not converge well as the newly introduced samples are mostly hard examples. We propose to split the feature embedding into multiple sub-embeddings, and associate different confidence values for each sub-embedding to smooth the training procedure. The sub-embeddings are further decorrelated by regularizing variation classification loss and variation adversarial loss on different partitions of them. Experiments show that our method achieves top performance on general face recognition datasets such as LFW and MegaFace, while significantly better on extreme benchmarks such as TinyFace and IJB-S.", "year": 2020, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Yichun Shi", "Xiang Yu", "Kihyuk Sohn", "Manmohan Chandraker", "Anil K. Jain" ], "externalIds": { "MAG": "3034302825", "ArXiv": "2002.11841", "DBLP": "conf/cvpr/Shi0SC020", "DOI": "10.1109/cvpr42600.2020.00685", "CorpusId": 211532315 }, "url": "https://www.semanticscholar.org/paper/8878428c0edb28fadf45cd2d97d1718c3a0ebbce", "referenceCount": 47, "citationCount": 138, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Invariant Risk Minimization", "abstract": "We introduce Invariant Risk Minimization (IRM), a learning paradigm to estimate invariant correlations across multiple training distributions. To achieve this goal, IRM learns a data representation such that the optimal classifier, on top of that data representation, matches for all training distributions. Through theory and experiments, we show how the invariances learned by IRM relate to the causal structures governing the data and enable out-of-distribution generalization.", "year": 2019, "venue": "arXiv.org", "authors": [ "Martín Arjovsky", "L. Bottou", "Ishaan Gulrajani", "David Lopez-Paz" ], "externalIds": { "MAG": "2953494151", "DBLP": "journals/corr/abs-1907-02893", "ArXiv": "1907.02893", "CorpusId": 195820364 }, "url": "https://www.semanticscholar.org/paper/753b7a701adc1b6072378bd048cfa8567885d9c7", "referenceCount": 77, "citationCount": 1862, "influentialCitationCount": 469, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Addressing Model Vulnerability to Distributional Shifts Over Image Transformation Sets", "abstract": "We are concerned with the vulnerability of computer vision models to distributional shifts. We formulate a combinatorial optimization problem that allows evaluating the regions in the image space where a given model is more vulnerable, in terms of image transformations applied to the input, and face it with standard search algorithms. We further embed this idea in a training procedure, where we define new data augmentation rules according to the image transformations that the current model is most vulnerable to, over iterations. An empirical evaluation on classification and semantic segmentation problems suggests that the devised algorithm allows to train models that are more robust against content-preserving image manipulations and, in general, against distributional shifts.", "year": 2019, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Riccardo Volpi", "Vittorio Murino" ], "externalIds": { "MAG": "3010293082", "DBLP": "conf/iccv/VolpiM19", "DOI": "10.1109/ICCV.2019.00807", "CorpusId": 263895301 }, "url": "https://www.semanticscholar.org/paper/364e22bab28881db03865c11d3728cdeb1fdb9f3", "referenceCount": 66, "citationCount": 58, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Moment Matching for Multi-Source Domain Adaptation", "abstract": "Conventional unsupervised domain adaptation (UDA) assumes that training data are sampled from a single domain. This neglects the more practical scenario where training data are collected from multiple sources, requiring multi-source domain adaptation. We make three major contributions towards addressing this problem. First, we collect and annotate by far the largest UDA dataset, called DomainNet, which contains six domains and about 0.6 million images distributed among 345 categories, addressing the gap in data availability for multi-source UDA research. Second, we propose a new deep learning approach, Moment Matching for Multi-Source Domain Adaptation (M3SDA), which aims to transfer knowledge learned from multiple labeled source domains to an unlabeled target domain by dynamically aligning moments of their feature distributions. Third, we provide new theoretical insights specifically for moment matching approaches in both single and multiple source domain adaptation. Extensive experiments are conducted to demonstrate the power of our new dataset in benchmarking state-of-the-art multi-source domain adaptation methods, as well as the advantage of our proposed model. Dataset and Code are available at http://ai.bu.edu/M3SDA/", "year": 2018, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Xingchao Peng", "Qinxun Bai", "Xide Xia", "Zijun Huang", "Kate Saenko", "Bo Wang" ], "externalIds": { "DBLP": "conf/iccv/PengBXHSW19", "MAG": "2902524664", "ArXiv": "1812.01754", "DOI": "10.1109/ICCV.2019.00149", "CorpusId": 54458071 }, "url": "https://www.semanticscholar.org/paper/3217278e346fefbd34f0727321059c7ea5792612", "referenceCount": 65, "citationCount": 1523, "influentialCitationCount": 414, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep Domain Generalization via Conditional Invariant Adversarial Networks", "abstract": null, "year": 2018, "venue": "European Conference on Computer Vision", "authors": [ "Ya Li", "Xinmei Tian", "Mingming Gong", "Yajing Liu", "Tongliang Liu", "Kun Zhang", "D. Tao" ], "externalIds": { "DBLP": "conf/eccv/LiTGLLZT18", "MAG": "2894728917", "DOI": "10.1007/978-3-030-01267-0_38", "CorpusId": 52956008 }, "url": "https://www.semanticscholar.org/paper/a60540a8407fd117fd8e6857d4728e661f53dcc8", "referenceCount": 38, "citationCount": 631, "influentialCitationCount": 90, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deeper, Broader and Artier Domain Generalization", "abstract": "The problem of domain generalization is to learn from multiple training domains, and extract a domain-agnostic model that can then be applied to an unseen domain. Domain generalization (DG) has a clear motivation in contexts where there are target domains with distinct characteristics, yet sparse data for training. For example recognition in sketch images, which are distinctly more abstract and rarer than photos. Nevertheless, DG methods have primarily been evaluated on photo-only benchmarks focusing on alleviating the dataset bias where both problems of domain distinctiveness and data sparsity can be minimal. We argue that these benchmarks are overly straightforward, and show that simple deep learning baselines perform surprisingly well on them. In this paper, we make two main contributions: Firstly, we build upon the favorable domain shift-robust properties of deep learning methods, and develop a low-rank parameterized CNN model for end-to-end DG learning. Secondly, we develop a DG benchmark dataset covering photo, sketch, cartoon and painting domains. This is both more practically relevant, and harder (bigger domain shift) than existing benchmarks. The results show that our method outperforms existing DG alternatives, and our dataset provides a more significant DG challenge to drive future research.", "year": 2017, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Da Li", "Yongxin Yang", "Yi-Zhe Song", "Timothy M. Hospedales" ], "externalIds": { "DBLP": "journals/corr/abs-1710-03077", "MAG": "2763549966", "ArXiv": "1710.03077", "DOI": "10.1109/ICCV.2017.591", "CorpusId": 6037691 }, "url": "https://www.semanticscholar.org/paper/b1e7f07965a53491690bd31fdab626bfac606eae", "referenceCount": 34, "citationCount": 1243, "influentialCitationCount": 324, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep Hashing Network for Unsupervised Domain Adaptation", "abstract": "In recent years, deep neural networks have emerged as a dominant machine learning tool for a wide variety of application domains. However, training a deep neural network requires a large amount of labeled data, which is an expensive process in terms of time, labor and human expertise. Domain adaptation or transfer learning algorithms address this challenge by leveraging labeled data in a different, but related source domain, to develop a model for the target domain. Further, the explosive growth of digital data has posed a fundamental challenge concerning its storage and retrieval. Due to its storage and retrieval efficiency, recent years have witnessed a wide application of hashing in a variety of computer vision applications. In this paper, we first introduce a new dataset, Office-Home, to evaluate domain adaptation algorithms. The dataset contains images of a variety of everyday objects from multiple domains. We then propose a novel deep learning framework that can exploit labeled source data and unlabeled target data to learn informative hash codes, to accurately classify unseen target data. To the best of our knowledge, this is the first research effort to exploit the feature learning capabilities of deep neural networks to learn representative hash codes to address the domain adaptation problem. Our extensive empirical studies on multiple transfer tasks corroborate the usefulness of the framework in learning efficient hash codes which outperform existing competitive baselines for unsupervised domain adaptation.", "year": 2017, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Hemanth Venkateswara", "José Eusébio", "Shayok Chakraborty", "S. Panchanathan" ], "externalIds": { "MAG": "2951111165", "ArXiv": "1706.07522", "DBLP": "conf/cvpr/VenkateswaraECP17", "DOI": "10.1109/CVPR.2017.572", "CorpusId": 2928248 }, "url": "https://www.semanticscholar.org/paper/b8ebda42e272d3617375118542d4675a0c0e501d", "referenceCount": 51, "citationCount": 1804, "influentialCitationCount": 311, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep Residual Learning for Image Recognition", "abstract": "Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. We provide comprehensive empirical evidence showing that these residual networks are easier to optimize, and can gain accuracy from considerably increased depth. On the ImageNet dataset we evaluate residual nets with a depth of up to 152 layers - 8× deeper than VGG nets [40] but still having lower complexity. An ensemble of these residual nets achieves 3.57% error on the ImageNet test set. This result won the 1st place on the ILSVRC 2015 classification task. We also present analysis on CIFAR-10 with 100 and 1000 layers. The depth of representations is of central importance for many visual recognition tasks. Solely due to our extremely deep representations, we obtain a 28% relative improvement on the COCO object detection dataset. Deep residual nets are foundations of our submissions to ILSVRC & COCO 2015 competitions1, where we also won the 1st places on the tasks of ImageNet detection, ImageNet localization, COCO detection, and COCO segmentation.", "year": 2015, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Kaiming He", "X. Zhang", "Shaoqing Ren", "Jian Sun" ], "externalIds": { "DBLP": "conf/cvpr/HeZRS16", "MAG": "2949650786", "ArXiv": "1512.03385", "DOI": "10.1109/cvpr.2016.90", "CorpusId": 206594692 }, "url": "https://www.semanticscholar.org/paper/2c03df8b48bf3fa39054345bafabfeff15bfd11d", "referenceCount": 53, "citationCount": 172713, "influentialCitationCount": 28229, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Unbiased Metric Learning: On the Utilization of Multiple Datasets and Web Images for Softening Bias", "abstract": "Many standard computer vision datasets exhibit biases due to a variety of sources including illumination condition, imaging system, and preference of dataset collectors. Biases like these can have downstream effects in the use of vision datasets in the construction of generalizable techniques, especially for the goal of the creation of a classification system capable of generalizing to unseen and novel datasets. In this work we propose Unbiased Metric Learning (UML), a metric learning approach, to achieve this goal. UML operates in the following two steps: (1) By varying hyper parameters, it learns a set of less biased candidate distance metrics on training examples from multiple biased datasets. The key idea is to learn a neighborhood for each example, which consists of not only examples of the same category from the same dataset, but those from other datasets. The learning framework is based on structural SVM. (2) We do model validation on a set of weakly-labeled web images retrieved by issuing class labels as keywords to search engine. The metric with best validation performance is selected. Although the web images sometimes have noisy labels, they often tend to be less biased, which makes them suitable for the validation set in our task. Cross-dataset image classification experiments are carried out. Results show significant performance improvement on four well-known computer vision datasets.", "year": 2013, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Chen Fang", "Ye Xu", "D. Rockmore" ], "externalIds": { "MAG": "2167366427", "DBLP": "conf/iccv/FangXR13", "DOI": "10.1109/ICCV.2013.208", "CorpusId": 722896 }, "url": "https://www.semanticscholar.org/paper/10478ed2892f24c49ca0be1588c1c0e29841abb1", "referenceCount": 26, "citationCount": 345, "influentialCitationCount": 84, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "ImageNet: A large-scale hierarchical image database", "abstract": "The explosion of image data on the Internet has the potential to foster more sophisticated and robust models and algorithms to index, retrieve, organize and interact with images and multimedia data. But exactly how such data can be harnessed and organized remains a critical problem. We introduce here a new database called “ImageNet”, a large-scale ontology of images built upon the backbone of the WordNet structure. ImageNet aims to populate the majority of the 80,000 synsets of WordNet with an average of 500-1000 clean and full resolution images. This will result in tens of millions of annotated images organized by the semantic hierarchy of WordNet. This paper offers a detailed analysis of ImageNet in its current state: 12 subtrees with 5247 synsets and 3.2 million images in total. We show that ImageNet is much larger in scale and diversity and much more accurate than the current image datasets. Constructing such a large-scale database is a challenging task. We describe the data collection scheme with Amazon Mechanical Turk. Lastly, we illustrate the usefulness of ImageNet through three simple applications in object recognition, image classification and automatic object clustering. We hope that the scale, accuracy, diversity and hierarchical structure of ImageNet can offer unparalleled opportunities to researchers in the computer vision community and beyond.", "year": 2009, "venue": "2009 IEEE Conference on Computer Vision and Pattern Recognition", "authors": [ "Jia Deng", "Wei Dong", "R. Socher", "Li-Jia Li", "K. Li", "Li Fei-Fei" ], "externalIds": { "DBLP": "conf/cvpr/DengDSLL009", "MAG": "2108598243", "DOI": "10.1109/CVPR.2009.5206848", "CorpusId": 57246310 }, "url": "https://www.semanticscholar.org/paper/d2c733e34d48784a37d717fe43d9e93277a8c53e", "referenceCount": 27, "citationCount": 56678, "influentialCitationCount": 8947, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Test-Time Classifier Adjustment Module for Model-Agnostic Domain Generalization", "abstract": "This paper presents a new algorithm for domain generalization (DG), test-time template adjuster (T3A) , aiming to robustify a model to unknown distribution shift. Unlike existing methods that focus on training phase , our method focuses test phase , i.e., correcting its prediction by itself during test time. Specifically, T3A adjusts a trained linear classifier (the last layer of deep neural networks) with the following procedure: (1) compute a pseudo-prototype representation for each class using online unlabeled data augmented by the base classifier trained in the source domains, (2) and then classify each sample based on its distance to the pseudo-prototypes. T3A is back-propagation-free and modifies only the linear layer; therefore, the increase in computational cost during inference is negligible and avoids the catastrophic failure might caused by stochastic optimization. Despite its simplicity, T3A can leverage knowledge about the target domain by using off-the-shelf test-time data and improve performance. We tested our method on four domain generalization benchmarks, namely PACS, VLCS, OfficeHome, and TerraIncognita, along with various backbone networks including ResNet18, ResNet50, Big Transfer (BiT), Vision Transformers (ViT), and MLP-Mixer. The results show T3A stably improves performance on unseen domains across choices of backbone networks, and outperforms existing domain generalization methods.", "year": 2021, "venue": "Neural Information Processing Systems", "authors": [ "Yusuke Iwasawa", "Yutaka Matsuo" ], "externalIds": { "DBLP": "conf/nips/IwasawaM21", "CorpusId": 244958432 }, "url": "https://www.semanticscholar.org/paper/f3522f72b44ba67a04133a718a26e1c6c59fe9c1", "referenceCount": 73, "citationCount": 188, "influentialCitationCount": 31, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Visualizing Data using t-SNE", "abstract": "We present a new technique called “t-SNE” that visualizes high-dimensional data by giving each datapoint a location in a two or three-dimensional map. The technique is a variation of Stochastic Neighbor Embedding (Hinton and Roweis, 2002) that is much easier to optimize, and produces significantly better visualizations by reducing the tendency to crowd points together in the center of the map. t-SNE is better than existing techniques at creating a single map that reveals structure at many different scales. This is particularly important for high-dimensional data that lie on several different, but related, low-dimensional manifolds, such as images of objects from multiple classes seen from multiple viewpoints. For visualizing the structure of very large datasets, we show how t-SNE can use random walks on neighborhood graphs to allow the implicit structure of all of the data to influence the way in which a subset of the data is displayed. We illustrate the performance of t-SNE on a wide variety of datasets and compare it with many other non-parametric visualization techniques, including Sammon mapping, Isomap, and Locally Linear Embedding. The visualizations produced by t-SNE are significantly better than those produced by the other techniques on almost all of the datasets.", "year": 2008, "venue": "", "authors": [ "L. Maaten", "Geoffrey E. Hinton" ], "externalIds": { "MAG": "2187089797", "CorpusId": 5855042 }, "url": "https://www.semanticscholar.org/paper/1c46943103bd7b7a2c7be86859995a4144d1938b", "referenceCount": 41, "citationCount": 37233, "influentialCitationCount": 1191, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] } ] }, "Explaining Explaining": { "paper_title": "Explaining Explaining", "arxiv_id": "2409.18052v2", "keyword": "explainable ai", "authors": [ "Sergei Nirenburg", "Marjorie McShane", "Kenneth W. Goodman", "Sanjay Oruganti" ], "references": [ { "title": "HARMONIC: A Framework for Explanatory Cognitive Robots", "abstract": "We present HARMONIC, a framework for implementing cognitive robots that transforms general-purpose robots into trusted teammates capable of complex decision-making, natural communication and human-level explanation. The framework supports interoperability between a strategic (cognitive) layer for high-level decision-making and a tactical (robot) layer for low-level control and execution. We describe the core features of the framework and our initial implementation, in which HARMONIC was deployed on a simulated UGV and drone involved in a multi-robot search and retrieval task.", "year": 2024, "venue": "", "authors": [ "Sanjay Oruganti", "S. Nirenburg", "Marjorie J. McShane", "Jesse English", "Michael K. Roberts", "Christian Arndt" ], "externalIds": { "ArXiv": "2409.18037", "CorpusId": 272910586 }, "url": "https://www.semanticscholar.org/paper/ae3e1be4cfbb1129eeff40bb68ac9d74a0cc0cbc", "referenceCount": 20, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "HARMONIC: Cognitive and Control Collaboration in Human-Robotic Teams", "abstract": "This paper presents a novel approach to multi-robot planning and collaboration. We demonstrate a cognitive strategy for robots in human-robot teams that incorporates metacognition, natural language communication, and explainability. The system is embodied using the HARMONIC architecture that flexibly integrates cognitive and control capabilities across the team. We evaluate our approach through simulation experiments involving a joint search task by a team of heterogeneous robots (a UGV and a drone) and a human. We detail the system's handling of complex, real-world scenarios, effective action coordination between robots with different capabilities, and natural human-robot communication. This work demonstrates that the robots' ability to reason about plans, goals, and attitudes, and to provide explanations for actions and decisions are essential prerequisites for realistic human-robot teaming.", "year": 2024, "venue": "", "authors": [ "Sanjay Oruganti", "S. Nirenburg", "Marjorie J. McShane", "Jesse English", "Michael K. Roberts", "Christian Arndt" ], "externalIds": { "ArXiv": "2409.18047", "CorpusId": 272910816 }, "url": "https://www.semanticscholar.org/paper/61f4480a07423dca153cd603e3986761dd235a61", "referenceCount": 0, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Explainable Artificial Intelligence (XAI)", "abstract": "Explainable Artificial Intelligence (XAI) has emerged as a critical facet in the realm of machine learning and artificial intelligence, responding to the increasing complexity of models, particularly deep neural networks, and the subsequent need for transparent decision making processes. This research paper delves into the essence of XAI, unraveling its significance across diverse domains such as healthcare, finance, and criminal justice. As a countermeasure to the opacity of intricate models, the paper explores various XAI methods and techniques, including LIME and SHAP, weighing their interpretability against computational efficiency and accuracy. Through an examination of real-world applications, the research elucidates how XAI not only enhances decision-making processes but also influences user trust and acceptance in AI systems. However, the paper also scrutinizes the delicate balance between interpretability and performance, shedding light on instances where the pursuit of accuracy may compromise explain-ability. Additionally, it navigates through the current challenges and limitations in XAI, the regulatory landscape surrounding AI explain-ability, and offers insights into future trends and directions, fostering a comprehensive understanding of XAI's present state and future potential.", "year": 2023, "venue": "International Journal of Food and Nutritional Science", "authors": [ "Ranu Sewada", "Ashwani Jangid", "Piyush Kumar", "Neha Mishra" ], "externalIds": { "DOI": "10.48047/ijfans/v12/i1/271", "CorpusId": 260436532 }, "url": "https://www.semanticscholar.org/paper/e1d2f2a717aa03280126f87c8e5fad695f52bf7c", "referenceCount": 33, "citationCount": 623, "influentialCitationCount": 32, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "An Empirical Survey on Explainable AI Technologies: Recent Trends, Use-Cases, and Categories from Technical and Application Perspectives", "abstract": "In a wide range of industries and academic fields, artificial intelligence is becoming increasingly prevalent. AI models are taking on more crucial decision-making tasks as they grow in popularity and performance. Although AI models, particularly machine learning models, are successful in research, they have numerous limitations and drawbacks in practice. Furthermore, due to the lack of transparency behind their behavior, users need more understanding of how these models make specific decisions, especially in complex state-of-the-art machine learning algorithms. Complex machine learning systems utilize less transparent algorithms, thereby exacerbating the problem. This survey analyzes the significance and evolution of explainable AI (XAI) research across various domains and applications. Throughout this study, a rich repository of explainability classifications and summaries has been developed, along with their applications and practical use cases. We believe this study will make it easier for researchers to understand all explainability methods and access their applications simultaneously.", "year": 2023, "venue": "Electronics", "authors": [ "Mohammad Nagahisarchoghaei", "Nasheen Nur", "Logan Cummins", "Nashtarin Nur", "Mirhossein Mousavi Karimi", "Shreya Nandanwar", "S. Bhattacharyya", "Shahram Rahimi" ], "externalIds": { "DOI": "10.3390/electronics12051092", "CorpusId": 257174555 }, "url": "https://www.semanticscholar.org/paper/3b881aa4eb844fb1f64eced6dbb6c1bed54584c4", "referenceCount": 0, "citationCount": 21, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "A survey on XAI and natural language explanations", "abstract": null, "year": 2023, "venue": "Information Processing & Management", "authors": [ "E. Cambria", "Lorenzo Malandri", "Fabio Mercorio", "Mario Mezzanzanica", "Navid Nobani" ], "externalIds": { "DBLP": "journals/ipm/CambriaMMMN23", "DOI": "10.1016/j.ipm.2022.103111", "CorpusId": 253106353 }, "url": "https://www.semanticscholar.org/paper/a2c68dcfb842c327044dd15bcf4b62b1d22c57c3", "referenceCount": 144, "citationCount": 62, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Theoretical Framework for AI Models Explainability with Application in Biomedicine", "abstract": "EXplainable Artificial Intelligence (XAI) is a vibrant research topic in the artificial intelligence community. It is raising growing interest across methods and domains, especially those involving high stake decision-making, such as the biomedical sector. Much has been written about the subject, yet XAI still lacks shared terminology and a framework capable of providing structural soundness to explanations. In our work, we address these issues by proposing a novel definition of explanation that synthesizes what can be found in the literature. We recognize that explanations are not atomic but the combination of evidence stemming from the model and its input-output mapping, and the human interpretation of this evidence. Furthermore, we fit explanations into the properties of faithfulness (i.e., the explanation is an accurate description of the model’s inner workings and decision-making process) and plausibility (i.e., how much the explanation seems convincing to the user). Our theoretical framework simplifies how these properties are operationalized, and it provides new insights into common explanation methods that we analyze as case studies. We also discuss the impact that our framework could have in biomedicine, a very sensitive application domain where XAI can have a central role in generating trust.", "year": 2022, "venue": "IEEE Symposium on Computational Intelligence in Bioinformatics and Computational Biology", "authors": [ "Matteo Rizzo", "Alberto Veneri", "A. Albarelli", "C. Lucchese", "C. Conati" ], "externalIds": { "DBLP": "conf/cibcb/RizzoVALNC23", "ArXiv": "2212.14447", "DOI": "10.1109/CIBCB56990.2023.10264877", "CorpusId": 255341085 }, "url": "https://www.semanticscholar.org/paper/daac1f7834dfdbff12dbe3303a7d5e9365f67e96", "referenceCount": 42, "citationCount": 3, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A psychological theory of explainability", "abstract": "The goal of explainable Artificial Intelligence (XAI) is to generate human-interpretable explanations, but there are no computationally precise theories of how humans interpret AI generated explanations. The lack of theory means that validation of XAI must be done empirically, on a case-by-case basis, which prevents systematic theory-building in XAI. We propose a psychological theory of how humans draw conclusions from saliency maps, the most common form of XAI explanation, which for the first time allows for precise prediction of explainee inference conditioned on explanation. Our theory posits that absent explanation humans expect the AI to make similar decisions to themselves, and that they interpret an explanation by comparison to the explanations they themselves would give. Comparison is formalized via Shepard's universal law of generalization in a similarity space, a classic theory from cognitive science. A pre-registered user study on AI image classifications with saliency map explanations demonstrate that our theory quantitatively matches participants' predictions of the AI.", "year": 2022, "venue": "International Conference on Machine Learning", "authors": [ "Scott Cheng-Hsin Yang", "Tomas Folke", "Patrick Shafto" ], "externalIds": { "DBLP": "conf/icml/YangFS22", "ArXiv": "2205.08452", "DOI": "10.48550/arXiv.2205.08452", "CorpusId": 248834131 }, "url": "https://www.semanticscholar.org/paper/80c62eb7d86b5ed290213fee42f5139d3dc12962", "referenceCount": 43, "citationCount": 14, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Human-Centered Explainable AI (HCXAI): Beyond Opening the Black-Box of AI", "abstract": "Explainability of AI systems is crucial to hold them accountable because they are increasingly becoming consequential in our lives by powering high-stakes decisions in domains like healthcare and law. When it comes to Explainable AI (XAI), understanding who interacts with the black-box of AI is just as important as “opening” it, if not more. Yet the discourse of XAI has been predominantly centered around the black-box, suffering from deficiencies in meeting user needs and exacerbating issues of algorithmic opacity. To address these issues, researchers have called for human-centered approaches to XAI. In this second CHI workshop on Human-centered XAI (HCXAI), we build on the success of the first installment from CHI 2021 to expand the conversation around XAI. We chart the domain and shape the HCXAI discourse with reflective discussions from diverse stakeholders. The goal of the second installment is to go beyond the black box and examine how human-centered perspectives in XAI can be operationalized at the conceptual, methodological, and technical levels. Encouraging holistic (historical, sociological, and technical) approaches, we put an emphasis on “operationalizing”, aiming to produce actionable frameworks, transferable evaluation methods, concrete design guidelines, and articulate a coordinated research agenda for XAI.", "year": 2022, "venue": "CHI Extended Abstracts", "authors": [ "Upol Ehsan", "Philipp Wintersberger", "Q. Liao", "E. A. Watkins", "Carina Manger", "Hal Daumé Iii", "A. Riener", "Mark O. Riedl" ], "externalIds": { "DBLP": "conf/chi/EhsanWLWMDRR22", "DOI": "10.1145/3491101.3503727", "CorpusId": 248419244 }, "url": "https://www.semanticscholar.org/paper/2a68fb05d91bd0eb3c3a93c77a1297b8dc6d8cc8", "referenceCount": 31, "citationCount": 61, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Human-Centered Explainable AI (XAI): From Algorithms to User Experiences", "abstract": "In recent years, the field of explainable AI (XAI) has produced a vast collection of algorithms, providing a useful toolbox for researchers and practitioners to build XAI applications. With the rich application opportunities, explainability is believed to have moved beyond a demand by data scientists or researchers to comprehend the models they develop, to an essential requirement for people to trust and adopt AI deployed in numerous domains. However, explainability is an inherently human-centric property and the field is starting to embrace human-centered approaches. Human-computer interaction (HCI) research and user experience (UX) design in this area are becoming increasingly important. In this chapter, we begin with a high-level overview of the technical landscape of XAI algorithms, then selectively survey our own and other recent HCI works that take human-centered approaches to design, evaluate, and provide conceptual and methodological tools for XAI. We ask the question\"what are human-centered approaches doing for XAI\"and highlight three roles that they play in shaping XAI technologies by helping navigate, assess and expand the XAI toolbox: to drive technical choices by users' explainability needs, to uncover pitfalls of existing XAI methods and inform new methods, and to provide conceptual frameworks for human-compatible XAI.", "year": 2021, "venue": "arXiv.org", "authors": [ "Q. Liao", "Microsoft Research", "Canada Kush", "R. Varshney", "Kush R. Varshney" ], "externalIds": { "DBLP": "journals/corr/abs-2110-10790", "ArXiv": "2110.10790", "CorpusId": 239050385 }, "url": "https://www.semanticscholar.org/paper/5e1746995debd1f17c24af01514c727598cc5613", "referenceCount": 110, "citationCount": 154, "influentialCitationCount": 18, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Beware explanations from AI in health care", "abstract": "The benefits of explainable artificial intelligence are not what they appear Artificial intelligence and machine learning (AI/ML) algorithms are increasingly developed in health care for diagnosis and treatment of a variety of medical conditions (1). However, despite the technical prowess of such systems, their adoption has been challenging, and whether and how much they will actually improve health care remains to be seen. A central reason for this is that the effectiveness of AI/ML-based medical devices depends largely on the behavioral characteristics of its users, who, for example, are often vulnerable to well-documented biases or algorithmic aversion (2). Many stakeholders increasingly identify the so-called black-box nature of predictive algorithms as the core source of users' skepticism, lack of trust, and slow uptake (3, 4). As a result, lawmakers have been moving in the direction of requiring the availability of explanations for black-box algorithmic decisions (5). Indeed, a near-consensus is emerging in favor of explainable AI/ML among academics, governments, and civil society groups. Many are drawn to this approach to harness the accuracy benefits of noninterpretable AI/ML such as deep learning or neural nets while also supporting transparency, trust, and adoption. We argue that this consensus, at least as applied to health care, both overstates the benefits and undercounts the drawbacks of requiring black-box algorithms to be explainable.", "year": 2021, "venue": "Science", "authors": [ "Boris Babic", "S. Gerke", "T. Evgeniou", "I. Cohen" ], "externalIds": { "DOI": "10.1126/science.abg1834", "CorpusId": 235914988, "PubMed": "34437144" }, "url": "https://www.semanticscholar.org/paper/f106ef1bad05ed38011cbd711d7c397080023b86", "referenceCount": 0, "citationCount": 126, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Introduction to the Special Issue on Explainable Robotic Systems", "abstract": "Robotic systems are likely to become increasingly ubiquitous but at the same time also increasingly complex. With this will come the need for them to be transparent and trustworthy for a broad range of users: people have to understand enough about a robot’s inner workings to assess when such systems can be trusted. The call for autonomous intelligent systems (AIS) to be transparent has recently become loud and clear (e.g., [19]) and currently is a pressing funding and research agenda. Some forms of transparency, such as traceability and verification, are particularly important for software and hardware engineers [2, 5]; other forms, such as explainability or intelligibility, are particularly important for ordinary people [3]. As artificial agents, and especially socially interactive robots, enter human society, the demands for such systems to be transparent and explainable grow rapidly. When people interact with a robotic system, they construct mental models to understand and predict its actions. However, people’s mental models of robots stem at least to some degree from their interactions with living beings. Thus, people easily run the risk of establishing incorrect or inadequate models of robotic systems, which may result in self-deception or even harm [23]. Moreover, a long-term study [18] showed that initially established (incorrect) mental models of an intelligent information system remained robust over time, even when details of the system’s implementation were explained and initial beliefs were challenged with contradictory evidence. This can easily result in people either under-trusting or over-trusting robotic systems. Incorrect mental models of AIS can have significant consequences for trust in such systems and, as a result, for acceptance of and collaboration with these systems [20]. Several studies indicate that people distrust a robotic system when they are unable to understand its actions. When a robot fails to communicate its intentions, people perceive the robot not only as creepy or unsettling [22] but also as erratic and untrustworthy even when it follows a clear decision-making process [9]. Indeed, when a robot is not transparent about its intentions (i.e., not providing any explanations", "year": 2021, "venue": "ACM Trans. Hum. Robot Interact.", "authors": [ "M. D. Graaf", "A. Dragan", "B. Malle", "T. Ziemke" ], "externalIds": { "DBLP": "journals/thri/GraafDMZ21", "DOI": "10.1145/3461597", "CorpusId": 236475547 }, "url": "https://www.semanticscholar.org/paper/3f3d468ecb0bee93c6773f9ffa808066be87c1a9", "referenceCount": 23, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A comprehensive taxonomy for explainable artificial intelligence: a systematic survey of surveys on methods and concepts", "abstract": null, "year": 2021, "venue": "Data mining and knowledge discovery", "authors": [ "Gesina Schwalbe", "Bettina Finzel" ], "externalIds": { "ArXiv": "2105.07190", "DBLP": "journals/datamine/SchwalbeF24", "DOI": "10.1007/s10618-022-00867-8", "CorpusId": 245124075 }, "url": "https://www.semanticscholar.org/paper/d0119e30a9eb5d261c09180a33e92c6f08581d45", "referenceCount": 172, "citationCount": 123, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Benchmarking and survey of explanation methods for black box models", "abstract": null, "year": 2021, "venue": "Data mining and knowledge discovery", "authors": [ "F. Bodria", "F. Giannotti", "Riccardo Guidotti", "Francesca Naretto", "D. Pedreschi", "S. Rinzivillo" ], "externalIds": { "DBLP": "journals/datamine/BodriaGGNPR23", "ArXiv": "2102.13076", "DOI": "10.1007/s10618-023-00933-9", "CorpusId": 232046272 }, "url": "https://www.semanticscholar.org/paper/93a55b04045ff7fff78de473f5ff52cbcfb9a948", "referenceCount": 195, "citationCount": 157, "influentialCitationCount": 12, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Explainable Artificial Intelligence (XAI): Concepts, Taxonomies, Opportunities and Challenges toward Responsible AI", "abstract": null, "year": 2019, "venue": "Information Fusion", "authors": [ "Alejandro Barredo Arrieta", "Natalia Díaz Rodríguez", "J. Ser", "Adrien Bennetot", "S. Tabik", "A. Barbado", "S. García", "S. Gil-Lopez", "D. Molina", "Richard Benjamins", "Raja Chatila", "Francisco Herrera" ], "externalIds": { "ArXiv": "1910.10045", "MAG": "2997428643", "DBLP": "journals/inffus/ArrietaRSBTBGGM20", "DOI": "10.1016/j.inffus.2019.12.012", "CorpusId": 204824113 }, "url": "https://www.semanticscholar.org/paper/530a059cb48477ad1e3d4f8f4b153274c8997332", "referenceCount": 430, "citationCount": 4971, "influentialCitationCount": 288, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Explainability in human–agent systems", "abstract": null, "year": 2019, "venue": "Autonomous Agents and Multi-Agent Systems", "authors": [ "A. Rosenfeld", "A. Richardson" ], "externalIds": { "MAG": "2938225809", "DBLP": "journals/corr/abs-1904-08123", "ArXiv": "1904.08123", "DOI": "10.1007/s10458-019-09408-y", "CorpusId": 118687946 }, "url": "https://www.semanticscholar.org/paper/e0f7763c0da21ea3180165fa09be97cf5c62d40e", "referenceCount": 157, "citationCount": 187, "influentialCitationCount": 11, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Explanation in Human-AI Systems: A Literature Meta-Review, Synopsis of Key Ideas and Publications, and Bibliography for Explainable AI", "abstract": "This is an integrative review that address the question, \"What makes for a good explanation?\" with reference to AI systems. Pertinent literatures are vast. Thus, this review is necessarily selective. That said, most of the key concepts and issues are expressed in this Report. The Report encapsulates the history of computer science efforts to create systems that explain and instruct (intelligent tutoring systems and expert systems). The Report expresses the explainability issues and challenges in modern AI, and presents capsule views of the leading psychological theories of explanation. Certain articles stand out by virtue of their particular relevance to XAI, and their methods, results, and key points are highlighted. It is recommended that AI/XAI researchers be encouraged to include in their research reports fuller details on their empirical or experimental methods, in the fashion of experimental psychology research reports: details on Participants, Instructions, Procedures, Tasks, Dependent Variables (operational definitions of the measures and metrics), Independent Variables (conditions), and Control Conditions.", "year": 2019, "venue": "arXiv.org", "authors": [ "Shane T. Mueller", "R. Hoffman", "W. Clancey", "Abigail Emrey", "Gary Klein" ], "externalIds": { "ArXiv": "1902.01876", "MAG": "2913693384", "DBLP": "journals/corr/abs-1902-01876", "CorpusId": 59606335 }, "url": "https://www.semanticscholar.org/paper/5c3a72f47ed8d58c0554210828af1ce4bbf2dbcd", "referenceCount": 642, "citationCount": 263, "influentialCitationCount": 26, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Will machine learning end the viability of radiology as a thriving medical specialty?", "abstract": "There have been tremendous advances in artificial intelligence (AI) and machine learning (ML) within the past decade, especially in the application of deep learning to various challenges. These include advanced competitive games (such as Chess and Go), self-driving cars, speech recognition, and intelligent personal assistants. Rapid advances in computer vision for recognition of objects in pictures have led some individuals, including computer science experts and health care system experts in machine learning, to make predictions that ML algorithms will soon lead to the replacement of the radiologist. However, there are complex technological, regulatory, and medicolegal obstacles facing the implementation of machine learning in radiology that will definitely preclude replacement of the radiologist by these algorithms within the next two decades and beyond. While not a comprehensive review of machine learning, this article is intended to highlight specific features of machine learning which face significant technological and health care systems challenges. Rather than replacing radiologists, machine learning will provide quantitative tools that will increase the value of diagnostic imaging as a biomarker, increase image quality with decreased acquisition times, and improve workflow, communication, and patient safety. In the foreseeable future, we predict that today's generation of radiologists will be replaced not by ML algorithms, but by a new breed of data science-savvy radiologists who have embraced and harnessed the incredible potential that machine learning has to advance our ability to care for our patients. In this way, radiology will remain a viable medical specialty for years to come.", "year": 2019, "venue": "British Journal of Radiology", "authors": [ "Stephen Chan", "E. Siegel" ], "externalIds": { "MAG": "2896202491", "DOI": "10.1259/bjr.20180416", "CorpusId": 53527848, "PubMed": "30325645" }, "url": "https://www.semanticscholar.org/paper/66e70d7d6af0c51b9b907d9c4a8de0dfca70b1a7", "referenceCount": 81, "citationCount": 60, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Engineering" ] }, { "title": "Cognitive Systems: Toward Human-Level Functionality", "abstract": "in, when we choose an approach to solving a problem we face, we willy-nilly make a few high-level methodological choices. One such choice is between developing systems that aim to supplant humans — cognitive prostheses — and systems that aim to enhance human performance — cognitive orthotics. The distinction between the two is clear on the example of machine translation (MT). Although prosthetic systems aim to supplant humans by independently matching human performance on a task, most prosthetic systems still have to rely on people to yield a high-quality final result. Thus, results of Google Translate must be edited by a person to yield a high-quality translation. The practice of postediting the results of machine translation has been employed for over half a century. It is clear that today’s fully automatic MT systems yield much better raw translations than systems of yore, thus making the job of a Articles", "year": 2017, "venue": "The AI Magazine", "authors": [ "S. Nirenburg" ], "externalIds": { "MAG": "2780076779", "DBLP": "journals/aim/Nirenburg17", "DOI": "10.1609/aimag.v38i4.2760", "CorpusId": 26885034 }, "url": "https://www.semanticscholar.org/paper/b09fae0547ac88fd07b149b704f1817793398c77", "referenceCount": 15, "citationCount": 9, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Thinking fast and slow.", "abstract": null, "year": 2014, "venue": "Australian Veterinary Journal", "authors": [ "N. McGlynn" ], "externalIds": { "MAG": "2753615671", "CorpusId": 36031679, "PubMed": "25577814" }, "url": "https://www.semanticscholar.org/paper/2f2961362355e45fa014ca0bb8ce4495aedf8824", "referenceCount": 0, "citationCount": 8490, "influentialCitationCount": 847, "isOpenAccess": false, "fieldsOfStudy": [ "Biology", "Medicine" ] }, { "title": "The Minds of Machines", "abstract": null, "year": 2011, "venue": "", "authors": [ "Namita Arora" ], "externalIds": { "MAG": "2136832565", "CorpusId": 170213541 }, "url": "https://www.semanticscholar.org/paper/04c86bf9561f2843b128bac26deaadfe35e9d63d", "referenceCount": 0, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Philosophy" ] }, { "title": "Causal–explanatory pluralism: How intentions, functions, and mechanisms influence causal ascriptions", "abstract": null, "year": 2010, "venue": "Cognitive Psychology", "authors": [ "T. Lombrozo" ], "externalIds": { "MAG": "1984735186", "DOI": "10.1016/j.cogpsych.2010.05.002", "CorpusId": 17903355, "PubMed": "20801434" }, "url": "https://www.semanticscholar.org/paper/aaf576f21f8e547c72ded7cd78bd4940d20b86c4", "referenceCount": 94, "citationCount": 234, "influentialCitationCount": 22, "isOpenAccess": false, "fieldsOfStudy": [ "Psychology", "Medicine" ] }, { "title": "Steps toward Artificial Intelligence", "abstract": "The problems of heuristic programming-of making computers solve really difficult problems-are divided into five main areas: Search, Pattern-Recognition, Learning, Planning, and Induction. A computer can do, in a sense, only what it is told to do. But even when we do not know how to solve a certain problem, we may program a machine (computer) to Search through some large space of solution attempts. Unfortunately, this usually leads to an enormously inefficient process. With Pattern-Recognition techniques, efficiency can often be improved, by restricting the application of the machine's methods to appropriate problems. Pattern-Recognition, together with Learning, can be used to exploit generalizations based on accumulated experience, further reducing search. By analyzing the situation, using Planning methods, we may obtain a fundamental improvement by replacing the given search with a much smaller, more appropriate exploration. To manage broad classes of problems, machines will need to construct models of their environments, using some scheme for Induction. Wherever appropriate, the discussion is supported by extensive citation of the literature and by descriptions of a few of the most successful heuristic (problem-solving) programs constructed to date.", "year": 1995, "venue": "Proceedings of the IRE", "authors": [ "M. Minsky" ], "externalIds": { "MAG": "2045031658", "DOI": "10.1109/JRPROC.1961.287775", "CorpusId": 14250548 }, "url": "https://www.semanticscholar.org/paper/b8ff8c7ab23eb70d4179c15a8a6b0efa1a493b8b", "referenceCount": 95, "citationCount": 1466, "influentialCitationCount": 58, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Aspects of Scientific Explanation.", "abstract": null, "year": 1966, "venue": "", "authors": [ "M. Resnik", "C. Hempel" ], "externalIds": { "MAG": "2052879098", "DOI": "10.2307/2106169", "CorpusId": 144679912 }, "url": "https://www.semanticscholar.org/paper/72952b44c187c12cfc5378e93cd4c0df644fecc5", "referenceCount": 0, "citationCount": 2494, "influentialCitationCount": 83, "isOpenAccess": false, "fieldsOfStudy": [ "Philosophy" ] }, { "title": "The Nature of Explanation", "abstract": null, "year": 1944, "venue": "Nature", "authors": [ "W. H. F. Barnes" ], "externalIds": { "MAG": "2088823862", "DOI": "10.1038/153605A0", "CorpusId": 4084461 }, "url": "https://www.semanticscholar.org/paper/ee66f87c06337fb430a90897112de06fb61f6a9f", "referenceCount": 0, "citationCount": 1346, "influentialCitationCount": 113, "isOpenAccess": true, "fieldsOfStudy": [ "Philosophy" ] }, { "title": "Agents in the Long Game of AI", "abstract": null, "year": 2024, "venue": "", "authors": [], "externalIds": { "DOI": "10.7551/mitpress/14940.001.0001", "CorpusId": 268050679 }, "url": "https://www.semanticscholar.org/paper/b7def33dcc808eb44df1449b575e2d871a3aca7d", "referenceCount": 0, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Deep learning is hitting a wall", "abstract": null, "year": 2022, "venue": "Nautilus", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "AI in Healthcare: Insights from two decades of FDA approvals", "abstract": null, "year": 2021, "venue": "Health Advances blog", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "What is mindreading Interdisciplinary Review of Cognitive Science", "abstract": null, "year": 2020, "venue": "wires.onlinelibrary", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Revealing the Conceptual Substrate of Biomedical Cognitive Models to the Wider Community", "abstract": "The patient authoring interface for each disease in the Maryland Virtual Patient simulation environment reveals the conceptual substrate of the disease model. Revealing the disease model to the community both explains how the interactive simulations work and invites collaboration from the wider community.", "year": 2008, "venue": "Medicine Meets Virtual Reality", "authors": [ "Marjorie J. McShane", "B. Jarrell", "G. Fantry", "S. Nirenburg", "S. Beale", "Ben Johnson" ], "externalIds": { "MAG": "2413846346", "DBLP": "conf/mmvr/McShaneJFNBJ08", "CorpusId": 11004546, "PubMed": "18391305" }, "url": "https://www.semanticscholar.org/paper/9fd4c1cb6716f421aa76e128f1fd408adf526bfe", "referenceCount": 2, "citationCount": 26, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Medicine", "Psychology" ] }, { "title": "Linguistics for the Age of AI", "abstract": null, "year": null, "venue": "", "authors": [ "Marjorie J. McShane", "S. Nirenburg" ], "externalIds": { "DOI": "10.7551/mitpress/13618.001.0001", "CorpusId": 243257518 }, "url": "https://www.semanticscholar.org/paper/8c983dbea7a40a8ef735735a105d96f758cdda8b", "referenceCount": 0, "citationCount": 21, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "2023. Compendium of Neurosymbolic Artificial Intelligence", "abstract": null, "year": null, "venue": "Frontiers in Artificial Intelligence and Applications, vol. 369", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "TSFeatLIME: An Online User Study in Enhancing Explainability in Univariate Time Series Forecasting": { "paper_title": "TSFeatLIME: An Online User Study in Enhancing Explainability in Univariate Time Series Forecasting", "arxiv_id": "2409.15950v1", "keyword": "explainable ai", "authors": [ "Hongnan Ma", "Kevin McAreavey", "Weiru Liu" ], "references": [ { "title": "How Human-Centered Explainable AI Interface Are Designed and Evaluated: A Systematic Survey", "abstract": "Despite its technological breakthroughs, eXplainable Artificial Intelligence (XAI) research has limited success in producing the {\\em effective explanations} needed by users. In order to improve XAI systems' usability, practical interpretability, and efficacy for real users, the emerging area of {\\em Explainable Interfaces} (EIs) focuses on the user interface and user experience design aspects of XAI. This paper presents a systematic survey of 53 publications to identify current trends in human-XAI interaction and promising directions for EI design and development. This is among the first systematic survey of EI research.", "year": 2024, "venue": "arXiv.org", "authors": [ "Thu Nguyen", "Alessandro Canossa", "Jichen Zhu" ], "externalIds": { "ArXiv": "2403.14496", "DBLP": "journals/corr/abs-2403-14496", "DOI": "10.48550/arXiv.2403.14496", "CorpusId": 268553839 }, "url": "https://www.semanticscholar.org/paper/82ed378207a39ea63c3c9dfb658f146c3cf14f53", "referenceCount": 108, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Interpretable Sequence Models for the Sales Forecasting Task: A Review", "abstract": "Sequence modelling has shown tremendous potential in solving real-world sequence prediction tasks like speech recognition, time series forecasting, and context identification. However, most of these sequence models are trained on univariate datasets and cannot leverage the information available in a multivariate setting. Moreover, the prediction/decision made by these models is not interpretable; consequently, the end users are unaware of the different steps involved in reaching that prediction/decision and cannot determine if the model aligns with the business and ethical values. This work investigates the performance of different sequence learners trained in a multivariate setting for the sales forecasting task. Specifically, different sequence models, including vanilla LSTM, stacked LSTM, bidirectional LSTM, and convolution neural networkbased-LSTM, have been trained on the Walmart dataset, and a comparative analysis of their performance using mean squared error (MSE) and weighted mean absolute error (WMAE) metric is reported. For training the learners in a multivariate setting, relevant features have been identified using exploratory data analytics. Furthermore, these sequence models are made interpretable using the Local Interpretable Model Agnostic Explanation (LIME) model to explain away the key variables involved in the prediction task. Empirical results obtained on the Walmart sales dataset established that the performance of the stacked LSTM model is superior to other learners. Additionally, the stacked model being the most generalizable, is complemented by the LIME module to explain away its predictions using the relevant features.", "year": 2023, "venue": "International Conference Intelligent Computing and Control Systems", "authors": [ "Rishi K Narang", "Upendra Pratap Singh" ], "externalIds": { "DOI": "10.1109/ICICCS56967.2023.10142614", "CorpusId": 259123538 }, "url": "https://www.semanticscholar.org/paper/f388380fed0e68f286ebf7abcd4d90f4c5376be7", "referenceCount": 48, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Measures for explainable AI: Explanation goodness, user satisfaction, mental models, curiosity, trust, and human-AI performance", "abstract": "If a user is presented an AI system that portends to explain how it works, how do we know whether the explanation works and the user has achieved a pragmatic understanding of the AI? This question entails some key concepts of measurement such as explanation goodness and trust. We present methods for enabling developers and researchers to: (1) Assess the a priori goodness of explanations, (2) Assess users' satisfaction with explanations, (3) Reveal user's mental model of an AI system, (4) Assess user's curiosity or need for explanations, (5) Assess whether the user's trust and reliance on the AI are appropriate, and finally, (6) Assess how the human-XAI work system performs. The methods we present derive from our integration of extensive research literatures and our own psychometric evaluations. We point to the previous research that led to the measurement scales which we aggregated and tailored specifically for the XAI context. Scales are presented in sufficient detail to enable their use by XAI researchers. For Mental Model assessment and Work System Performance, XAI researchers have choices. We point to a number of methods, expressed in terms of methods' strengths and weaknesses, and pertinent measurement issues.", "year": 2023, "venue": "Frontiers of Computer Science", "authors": [ "R. Hoffman", "Shane T. Mueller", "Gary Klein", "Jordan Litman" ], "externalIds": { "DBLP": "journals/fcomp/HoffmanMKL23", "DOI": "10.3389/fcomp.2023.1096257", "CorpusId": 256599630 }, "url": "https://www.semanticscholar.org/paper/3038e62388ba4961595ec0062948b31eef251e5d", "referenceCount": 165, "citationCount": 46, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Explainable AI for Non-Experts: Energy Tariff Forecasting", "abstract": "Non-expert users are increasingly affected by the decisions of systems that rely on machine learning (ML), yet it is often difficult for these users to understand the predictions of ML models. In this paper, we propose a web-based platform to evaluate explainable AI (XAI) for non-experts in the context of time series forecasting, focusing on energy price predictions as an exemplary use case. The XAI methods we consider include local feature importance and counterfactual explanations. The platform relies on gamification to encourage user engagement. Our research objective is to evaluate the effectiveness of these different approaches from the perspective of non-expert understanding of machine learning models.", "year": 2022, "venue": "International Conference on Automation and Computing", "authors": [ "Hongnan Ma", "Kevin McAreavey", "Ryan McConville", "Weiru Liu" ], "externalIds": { "DBLP": "conf/iconac/MaMML22", "DOI": "10.1109/ICAC55051.2022.9911105", "CorpusId": 252841949 }, "url": "https://www.semanticscholar.org/paper/cebe7fed9a2408cfc6dc4fc7d44ed815de7495f4", "referenceCount": 29, "citationCount": 4, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Time-series forecasting of seasonal items sales using machine learning - A comparative analysis", "abstract": null, "year": 2022, "venue": "Int. J. Inf. Manag. Data Insights", "authors": [ "Yasaman Ensafi", "S. H. Amin", "Guoqing Zhang", "Bharat Shah" ], "externalIds": { "DBLP": "journals/ijinfomandi/EnsafiAZS22", "DOI": "10.1016/j.jjimei.2022.100058", "CorpusId": 246162273 }, "url": "https://www.semanticscholar.org/paper/d96ab255585b0fa845cff9fe58ef9f2d4af7b118", "referenceCount": 56, "citationCount": 109, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "TS-MULE: Local Interpretable Model-Agnostic Explanations for Time Series Forecast Models", "abstract": null, "year": 2021, "venue": "PKDD/ECML Workshops", "authors": [ "U. Schlegel", "D. Lam", "D. Keim", "Daniel Seebacher" ], "externalIds": { "DBLP": "conf/pkdd/SchlegelLKS21", "ArXiv": "2109.08438", "DOI": "10.1007/978-3-030-93736-2_1", "CorpusId": 237562811 }, "url": "https://www.semanticscholar.org/paper/9fb0b26dc0aca8d1a1d32167dbc97e5acc6d8099", "referenceCount": 23, "citationCount": 18, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Towards a Rigorous Evaluation of Explainability for Multivariate Time Series", "abstract": "Machine learning-based systems are rapidly gaining popularity and in-line with that there has been a huge research surge in the field of explainability to ensure that machine learning models are reliable, fair, and can be held liable for their decision-making process. Explainable Artificial Intelligence (XAI) methods are typically deployed to debug black-box machine learning models but in comparison to tabular, text, and image data, explainability in time series is still relatively unexplored. The aim of this study was to achieve and evaluate model agnostic explainability in a time series forecasting problem. This work focused on proving a solution for a digital consultancy company aiming to find a data-driven approach in order to understand the effect of their sales related activities on the sales deals closed. The solution involved framing the problem as a time series forecasting problem to predict the sales deals and the explainability was achieved using two novel model agnostic explainability techniques, Local explainable model-agnostic explanations (LIME) and Shapley additive explanations (SHAP) which were evaluated using human evaluation of explainability. The results clearly indicate that the explanations produced by LIME and SHAP greatly helped lay humans in understanding the predictions made by the machine learning model. The presented work can easily be extended to any time", "year": 2021, "venue": "arXiv.org", "authors": [ "Rohit Saluja", "A. Malhi", "Samanta Knapic", "Kary Främling", "C. Cavdar" ], "externalIds": { "ArXiv": "2104.04075", "DBLP": "journals/corr/abs-2104-04075", "CorpusId": 233204336 }, "url": "https://www.semanticscholar.org/paper/96ed01cfcc9b76b7b48f9ed9580a1fa74f7bcf1c", "referenceCount": 31, "citationCount": 13, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Explainable Artificial Intelligence (XAI) on TimeSeries Data: A Survey", "abstract": "Most of state of the art methods applied on time series consist of deep learning methods that are too complex to be interpreted. This lack of interpretability is a major drawback, as several applications in the real world are critical tasks, such as the medical field or the autonomous driving field. The explainability of models applied on time series has not gather much attention compared to the computer vision or the natural language processing fields. In this paper, we present an overview of existing explainable AI (XAI) methods applied on time series and illustrate the type of explanations they produce. We also provide a reflection on the impact of these explanation methods to provide confidence and trust in the AI systems.", "year": 2021, "venue": "arXiv.org", "authors": [ "Thomas Rojat", "Raphael Puget", "David Filliat", "J. Ser", "R. Gelin", "Natalia D'iaz-Rodr'iguez" ], "externalIds": { "ArXiv": "2104.00950", "DBLP": "journals/corr/abs-2104-00950", "CorpusId": 233004703 }, "url": "https://www.semanticscholar.org/paper/2ff13c3f6678cd82dfc955bff5c3ec6a09688d70", "referenceCount": 103, "citationCount": 96, "influentialCitationCount": 8, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "AI in Healthcare: Time-Series Forecasting Using Statistical, Neural, and Ensemble Architectures", "abstract": "Both statistical and neural methods have been proposed in the literature to predict healthcare expenditures. However, less attention has been given to comparing predictions from both these methods as well as ensemble approaches in the healthcare domain. The primary objective of this paper was to evaluate different statistical, neural, and ensemble techniques in their ability to predict patients' weekly average expenditures on certain pain medications. Two statistical models, persistence (baseline) and autoregressive integrated moving average (ARIMA), a multilayer perceptron (MLP) model, a long short-term memory (LSTM) model, and an ensemble model combining predictions of the ARIMA, MLP, and LSTM models were calibrated to predict the expenditures on two different pain medications. In the MLP and LSTM models, we compared the influence of shuffling of training data and dropout of certain nodes in MLPs and nodes and recurrent connections in LSTMs in layers during training. Results revealed that the ensemble model outperformed the persistence, ARIMA, MLP, and LSTM models across both pain medications. In general, not shuffling the training data and adding the dropout helped the MLP models and shuffling the training data and not adding the dropout helped the LSTM models across both medications. We highlight the implications of using statistical, neural, and ensemble methods for time-series forecasting of outcomes in the healthcare domain.", "year": 2020, "venue": "Frontiers in Big Data", "authors": [ "S. Kaushik", "Abhinav Choudhury", "Pankaj Kumar Sheron", "Nataraj Dasgupta", "Sayee Natarajan", "Larry A. Pickett", "Varun Dutt" ], "externalIds": { "MAG": "3016053201", "DBLP": "journals/fdata/KaushikCSDNPD20", "PubMedCentral": "7931939", "DOI": "10.3389/fdata.2020.00004", "CorpusId": 212748311, "PubMed": "33693379" }, "url": "https://www.semanticscholar.org/paper/73f7bd9efb6f41a9fcf1491b6182455375239c5e", "referenceCount": 66, "citationCount": 113, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Agnostic Local Explanation for Time Series Classification", "abstract": "Recent advances in Machine Learning (such as Deep Learning) have brought tremendous gains in classification accuracy. However, these approaches build complex non-linear models, making the resulting predictions difficult to interpret for humans. The field of model interpretability has therefore recently emerged, aiming to address this issue by designing methods to explain a posteriori the predictions of complex learners. Interpretability frameworks such as LIME and SHAP have been proposed for tabular, image and text data. Nowadays, with the advent of the Internet of Things and of pervasive monitoring, time-series have become ubiquitous and their classification is a crucial task in many application domains. Like in other data domains, state-of-the-art time-series classifiers rely on complex models and typically do not provide intuitive and easily interpretable outputs, yet no interpretability framework had so far been proposed for this type of data. In this paper, we propose the first agnostic Local Explainer For TIme Series classificaTion (LEFTIST). LEFTIST provides explanations for predictions made by any time series classifier. Our thorough experiments on synthetic and real-world datasets show that the explanations provided by LEFTIST are at once faithful to the classification model and understandable by human users.", "year": 2019, "venue": "IEEE International Conference on Tools with Artificial Intelligence", "authors": [ "Maël Guillemé", "Véronique Masson", "L. Rozé", "A. Termier" ], "externalIds": { "MAG": "3005245032", "DBLP": "conf/ictai/GuillemeMRT19", "DOI": "10.1109/ICTAI.2019.00067", "CorpusId": 211211669 }, "url": "https://www.semanticscholar.org/paper/91362bec57d2b303e210471b020153dc154cec5b", "referenceCount": 20, "citationCount": 37, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "One Explanation Does Not Fit All: A Toolkit and Taxonomy of AI Explainability Techniques", "abstract": "As artificial intelligence and machine learning algorithms make further inroads into society, calls are increasing from multiple stakeholders for these algorithms to explain their outputs. At the same time, these stakeholders, whether they be affected citizens, government regulators, domain experts, or system developers, present different requirements for explanations. Toward addressing these needs, we introduce AI Explainability 360 (this http URL), an open-source software toolkit featuring eight diverse and state-of-the-art explainability methods and two evaluation metrics. Equally important, we provide a taxonomy to help entities requiring explanations to navigate the space of explanation methods, not only those in the toolkit but also in the broader literature on explainability. For data scientists and other users of the toolkit, we have implemented an extensible software architecture that organizes methods according to their place in the AI modeling pipeline. We also discuss enhancements to bring research innovations closer to consumers of explanations, ranging from simplified, more accessible versions of algorithms, to tutorials and an interactive web demo to introduce AI explainability to different audiences and application domains. Together, our toolkit and taxonomy can help identify gaps where more explainability methods are needed and provide a platform to incorporate them as they are developed.", "year": 2019, "venue": "arXiv.org", "authors": [ "V. Arya", "R. Bellamy", "Pin-Yu Chen", "Amit Dhurandhar", "M. Hind", "Samuel C. Hoffman", "Stephanie Houde", "Q. Liao", "Ronny Luss", "A. Mojsilovic", "Sami Mourad", "Pablo Pedemonte", "R. Raghavendra", "John T. Richards", "P. Sattigeri", "Karthikeyan Shanmugam", "Moninder Singh", "Kush R. Varshney", "Dennis Wei", "Yunfeng Zhang" ], "externalIds": { "MAG": "2972059645", "DBLP": "journals/corr/abs-1909-03012", "ArXiv": "1909.03012", "CorpusId": 202237813 }, "url": "https://www.semanticscholar.org/paper/c57a28759b6e1ddd7be03fa41e82b01130c8f8b2", "referenceCount": 61, "citationCount": 345, "influentialCitationCount": 25, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Towards A Rigorous Science of Interpretable Machine Learning", "abstract": "As machine learning systems become ubiquitous, there has been a surge of interest in interpretable machine learning: systems that provide explanation for their outputs. These explanations are often used to qualitatively assess other criteria such as safety or non-discrimination. However, despite the interest in interpretability, there is very little consensus on what interpretable machine learning is and how it should be measured. In this position paper, we first define interpretability and describe when interpretability is needed (and when it is not). Next, we suggest a taxonomy for rigorous evaluation and expose open questions towards a more rigorous science of interpretable machine learning.", "year": 2017, "venue": "", "authors": [ "F. Doshi-Velez", "Been Kim" ], "externalIds": { "MAG": "2594475271", "ArXiv": "1702.08608", "CorpusId": 11319376 }, "url": "https://www.semanticscholar.org/paper/5c39e37022661f81f79e481240ed9b175dec6513", "referenceCount": 57, "citationCount": 3217, "influentialCitationCount": 257, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "“Why Should I Trust You?”: Explaining the Predictions of Any Classifier", "abstract": "Despite widespread adoption, machine learning models remain mostly black boxes. Understanding the reasons behind predictions is, however, quite important in assessing trust, which is fundamental if one plans to take action based on a prediction, or when choosing whether to deploy a new model. Such understanding also provides insights into the model, which can be used to transform an untrustworthy model or prediction into a trustworthy one. In this work, we propose LIME, a novel explanation technique that explains the predictions of any classifier in an interpretable and faithful manner, by learning an interpretable model locally varound the prediction. We also propose a method to explain models by presenting representative individual predictions and their explanations in a non-redundant way, framing the task as a submodular optimization problem. We demonstrate the flexibility of these methods by explaining different models for text (e.g. random forests) and image classification (e.g. neural networks). We show the utility of explanations via novel experiments, both simulated and with human subjects, on various scenarios that require trust: deciding if one should trust a prediction, choosing between models, improving an untrustworthy classifier, and identifying why a classifier should not be trusted.", "year": 2016, "venue": "North American Chapter of the Association for Computational Linguistics", "authors": [ "Marco Tulio Ribeiro", "Sameer Singh", "Carlos Guestrin" ], "externalIds": { "ACL": "N16-3020", "MAG": "2951501516", "DBLP": "journals/corr/RibeiroSG16", "ArXiv": "1602.04938", "DOI": "10.1145/2939672.2939778", "CorpusId": 13029170 }, "url": "https://www.semanticscholar.org/paper/c0883f5930a232a9c1ad601c978caede29155979", "referenceCount": 41, "citationCount": 14452, "influentialCitationCount": 1693, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Bootstrap Methods for Time Series", "abstract": "The bootstrap is a method for estimating the distribution of an estimator or test statistic by resampling one's data or a model estimated from the data. The methods that are available for implementing the bootstrap and the accuracy of bootstrap estimates depend on whether the data are an independent random sample or a time series. This paper is concerned with the application of the bootstrap to time‐series data when one does not have a finite‐dimensional parametric model that reduces the data generation process to independent random sampling. We review the methods that have been proposed for implementing the bootstrap in this situation and discuss the accuracy of these methods relative to that of first‐order asymptotic approximations. We argue that methods for implementing the bootstrap with time‐series data are not as well understood as methods for data that are independent random samples. Although promising bootstrap methods for time series are available, there is a considerable need for further research in the application of the bootstrap to time series. We describe some of the important unsolved problems.", "year": 2003, "venue": "", "authors": [ "W. Härdle", "J. Horowitz", "Jens-Peter Kreiss" ], "externalIds": { "MAG": "2148329315", "DOI": "10.1111/j.1751-5823.2003.tb00485.x", "CorpusId": 9107012 }, "url": "https://www.semanticscholar.org/paper/85668636b85fadb9e6672ac762dd48b99be5e222", "referenceCount": 122, "citationCount": 363, "influentialCitationCount": 16, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Explainable AI for Time Series Classification: A Review, Taxonomy and Research Directions", "abstract": "Time series data is increasingly used in a wide range of fields, and it is often relied on in crucial applications and high-stakes decision-making. For instance, sensors generate time series data to recognize different types of anomalies through automatic decision-making systems. Typically, these systems are realized with machine learning models that achieve top-tier performance on time series classification tasks. Unfortunately, the logic behind their prediction is opaque and hard to understand from a human standpoint. Recently, we observed a consistent increase in the development of explanation methods for time series classification justifying the need to structure and review the field. In this work, we (a) present the first extensive literature review on Explainable AI (XAI) for time series classification, (b) categorize the research field through a taxonomy subdividing the methods into time points-based, subsequences-based and instance-based, and (c) identify open research directions regarding the type of explanations and the evaluation of explanations and interpretability.", "year": 2022, "venue": "IEEE Access", "authors": [ "Andreas Theissler", "Francesco Spinnato", "U. Schlegel", "Riccardo Guidotti" ], "externalIds": { "DBLP": "journals/access/TheisslerSSG22", "DOI": "10.1109/ACCESS.2022.3207765", "CorpusId": 252395030 }, "url": "https://www.semanticscholar.org/paper/8b673ab5c32865549ae3bd23d494b5c12f462f9f", "referenceCount": 196, "citationCount": 56, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Bootstrap Methods: Another Look at the Jackknife", "abstract": null, "year": 2008, "venue": "", "authors": [ "D. Hinkley" ], "externalIds": { "MAG": "2481699070", "DOI": "10.1007/978-0-387-75692-9_9", "CorpusId": 124426327 }, "url": "https://www.semanticscholar.org/paper/ea825c57ffebc7065f5279165e6fc261ac317691", "referenceCount": 0, "citationCount": 8556, "influentialCitationCount": 741, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "“Counterfactual shapley values for explaining reinforcement learning,”", "abstract": null, "year": null, "venue": "arXiv", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Explainable AI needs formal notions of explanation correctness": { "paper_title": "Explainable AI needs formal notions of explanation correctness", "arxiv_id": "2409.14590v2", "keyword": "explainable ai", "authors": [ "Stefan Haufe", "Rick Wilming", "Benedict Clark", "Rustam Zhumagambetov", "Danny Panknin", "Ahcène Boubekki" ], "references": [ { "title": "XAI is in trouble", "abstract": "Researchers focusing on how artificial intelligence (AI) methods explain their decisions often discuss controversies and limitations. Some even assert that most publications offer little to no valuable contributions. In this article, we substantiate the claim that explainable AI (XAI) is in trouble by describing and illustrating four problems: the disagreements on the scope of XAI, the lack of definitional cohesion, precision, and adoption, the issues with motivations for XAI research, and limited and inconsistent evaluations. As we delve into their potential underlying sources, our analysis finds these problems seem to originate from AI researchers succumbing to the pitfalls of interdisciplinarity or from insufficient scientific rigor. Analyzing these potential factors, we discuss the literature at times coming across unexplored research questions. Hoping to alleviate existing problems, we make recommendations on precautions against the challenges of interdisciplinarity and propose directions in support of scientific rigor.", "year": 2024, "venue": "The AI Magazine", "authors": [ "Rosina O. Weber", "Adam J. Johs", "Prateek Goel", "João Marques Silva" ], "externalIds": { "DOI": "10.1002/aaai.12184", "CorpusId": 271551592 }, "url": "https://www.semanticscholar.org/paper/76877fc4ff78e6ddbb910f66b604598561a251a0", "referenceCount": 105, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "GECOBench: A Gender-Controlled Text Dataset and Benchmark for Quantifying Biases in Explanations", "abstract": "Large pre-trained language models have become popular for many applications and form an important backbone of many downstream tasks in natural language processing (NLP). Applying 'explainable artificial intelligence' (XAI) techniques to enrich such models' outputs is considered crucial for assuring their quality and shedding light on their inner workings. However, large language models are trained on a plethora of data containing a variety of biases, such as gender biases, affecting model weights and, potentially, behavior. Currently, it is unclear to what extent such biases also impact model explanations in possibly unfavorable ways. We create a gender-controlled text dataset, GECO, in which otherwise identical sentences appear in male and female forms. This gives rise to ground-truth 'world explanations' for gender classification tasks, enabling the objective evaluation of the correctness of XAI methods. We also provide GECOBench, a rigorous quantitative evaluation framework benchmarking popular XAI methods, applying them to pre-trained language models fine-tuned to different degrees. This allows us to investigate how pre-training induces undesirable bias in model explanations and to what extent fine-tuning can mitigate such explanation bias. We show a clear dependency between explanation performance and the number of fine-tuned layers, where XAI methods are observed to particularly benefit from fine-tuning or complete retraining of embedding layers. Remarkably, this relationship holds for models achieving similar classification performance on the same task. With that, we highlight the utility of the proposed gender-controlled dataset and novel benchmarking approach for research and development of novel XAI methods. All code including dataset generation, model training, evaluation and visualization is available at: https://github.com/braindatalab/gecobench", "year": 2024, "venue": "arXiv.org", "authors": [ "Rick Wilming", "Artur Dox", "Hjalmar Schulz", "Marta Oliveira", "Benedict Clark", "Stefan Haufe" ], "externalIds": { "DBLP": "journals/corr/abs-2406-11547", "ArXiv": "2406.11547", "DOI": "10.48550/arXiv.2406.11547", "CorpusId": 270559945 }, "url": "https://www.semanticscholar.org/paper/88a3e9af374aae76590bcce8fccd343a1cb29cd2", "referenceCount": 58, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Benchmarking the influence of pre-training on explanation performance in MR image classification", "abstract": "Convolutional Neural Networks (CNNs) are frequently and successfully used in medical prediction tasks. They are often used in combination with transfer learning, leading to improved performance when training data for the task are scarce. The resulting models are highly complex and typically do not provide any insight into their predictive mechanisms, motivating the field of “explainable” artificial intelligence (XAI). However, previous studies have rarely quantitatively evaluated the “explanation performance” of XAI methods against ground-truth data, and transfer learning and its influence on objective measures of explanation performance has not been investigated. Here, we propose a benchmark dataset that allows for quantifying explanation performance in a realistic magnetic resonance imaging (MRI) classification task. We employ this benchmark to understand the influence of transfer learning on the quality of explanations. Experimental results show that popular XAI methods applied to the same underlying model differ vastly in performance, even when considering only correctly classified examples. We further observe that explanation performance strongly depends on the task used for pre-training and the number of CNN layers pre-trained. These results hold after correcting for a substantial correlation between explanation and classification performance.", "year": 2024, "venue": "Frontiers Artif. Intell.", "authors": [ "Marta Oliveira", "Rick Wilming", "Benedict Clark", "C'eline Budding", "Fabian Eitel", "Kerstin Ritter", "Stefan Haufe" ], "externalIds": { "PubMedCentral": "10925627", "DBLP": "journals/frai/OliveiraWCBERH24", "DOI": "10.3389/frai.2024.1330919", "CorpusId": 268181322, "PubMed": "38469161" }, "url": "https://www.semanticscholar.org/paper/62bacd00591cc571d6fd9d9a8804244306a1d546", "referenceCount": 46, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "XAI-TRIS: non-linear image benchmarks to quantify false positive post-hoc attribution of feature importance", "abstract": null, "year": 2023, "venue": "Machine-mediated learning", "authors": [ "Benedict Clark", "Rick Wilming", "S. Haufe" ], "externalIds": { "PubMedCentral": "11306297", "ArXiv": "2306.12816", "DBLP": "journals/ml/ClarkWH24", "DOI": "10.1007/s10994-024-06574-3", "CorpusId": 259224729, "PubMed": "39132312" }, "url": "https://www.semanticscholar.org/paper/7d7a17da63a107424ddeec91612341100d8de42e", "referenceCount": 38, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Theoretical Behavior of XAI Methods in the Presence of Suppressor Variables", "abstract": "In recent years, the community of 'explainable artificial intelligence' (XAI) has created a vast body of methods to bridge a perceived gap between model 'complexity' and 'interpretability'. However, a concrete problem to be solved by XAI methods has not yet been formally stated. As a result, XAI methods are lacking theoretical and empirical evidence for the 'correctness' of their explanations, limiting their potential use for quality-control and transparency purposes. At the same time, Haufe et al. (2014) showed, using simple toy examples, that even standard interpretations of linear models can be highly misleading. Specifically, high importance may be attributed to so-called suppressor variables lacking any statistical relation to the prediction target. This behavior has been confirmed empirically for a large array of XAI methods in Wilming et al. (2022). Here, we go one step further by deriving analytical expressions for the behavior of a variety of popular XAI methods on a simple two-dimensional binary classification problem involving Gaussian class-conditional distributions. We show that the majority of the studied approaches will attribute non-zero importance to a non-class-related suppressor feature in the presence of correlated noise. This poses important limitations on the interpretations and conclusions that the outputs of these XAI methods can afford.", "year": 2023, "venue": "International Conference on Machine Learning", "authors": [ "Rick Wilming", "Leo Kieslich", "Benedict Clark", "S. Haufe" ], "externalIds": { "ArXiv": "2306.01464", "DBLP": "conf/icml/WilmingKCH23", "DOI": "10.48550/arXiv.2306.01464", "CorpusId": 259064037 }, "url": "https://www.semanticscholar.org/paper/05708173e750e4b4cd7aa14249cd94f3e74c8e43", "referenceCount": 53, "citationCount": 7, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Explaining classifiers with measures of statistical association", "abstract": null, "year": 2023, "venue": "Computational Statistics & Data Analysis", "authors": [ "E. Borgonovo", "Valentina Ghidini", "Roman Hahn", "E. Plischke" ], "externalIds": { "DBLP": "journals/csda/BorgonovoGHP23", "DOI": "10.1016/j.csda.2023.107701", "CorpusId": 256531317 }, "url": "https://www.semanticscholar.org/paper/fb4453e4eef61b8e252cd77b6e7e6b24660399d2", "referenceCount": 41, "citationCount": 5, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Impossibility theorems for feature attribution", "abstract": "Significance Machine learning models can learn complex patterns from data, but it is often difficult to understand why they make particular predictions. To tackle this problem, practitioners typically turn to feature attribution methods, which seek to attribute the model’s behavior f(x) around an example x to particular features, or dimensions of x, that are most important for the prediction. In recent years, a new class of feature attribution methods—namely, complete and linear methods—has become popular. Our work shows that, unfortunately, such methods can be misleading: Complete and linear methods are provably less reliable than simpler methods at answering basic feature attribution questions. We provide impossibility results that highlight their failure cases and discuss how we might instead obtain reliable feature attributions.", "year": 2022, "venue": "Proceedings of the National Academy of Sciences of the United States of America", "authors": [ "Blair Bilodeau", "Natasha Jaques", "Pang Wei Koh", "Been Kim" ], "externalIds": { "DBLP": "journals/corr/abs-2212-11870", "ArXiv": "2212.11870", "PubMedCentral": "10786278", "DOI": "10.1073/pnas.2304406120", "CorpusId": 254974246, "PubMed": "38181057" }, "url": "https://www.semanticscholar.org/paper/8102cad29ec8e808c7395ac6ee668da495f07206", "referenceCount": 55, "citationCount": 36, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Lessons Learned from Assessing Trustworthy AI in Practice", "abstract": null, "year": 2022, "venue": "Digital Society", "authors": [ "R. Zicari", "J. Amann", "Frédérick Bruneault", "M. Coffee", "Boris Düdder", "Eleanore Hickman", "Alessio Gallucci", "T. Gilbert", "Thilo Hagendorff", "Irmhild van Halem", "E. Hildt", "G. Kararigas", "P. Kringen", "V. Madai", "Emilie Wiinblad Mathez", "Jesmin Jahan Tithi", "Dennis Vetter", "Magnus Westerlund", "Reneé C. Wurth" ], "externalIds": { "DBLP": "journals/corr/abs-2206-09887", "ArXiv": "2206.09887", "DOI": "10.1007/s44206-023-00063-1", "CorpusId": 249890365 }, "url": "https://www.semanticscholar.org/paper/295990165af1dfa4ed0dc338d9e4f0189fc75b75", "referenceCount": 94, "citationCount": 9, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Quantus: An Explainable AI Toolkit for Responsible Evaluation of Neural Network Explanations", "abstract": "The evaluation of explanation methods is a research topic that has not yet been explored deeply, however, since explainability is supposed to strengthen trust in artificial intelligence, it is necessary to systematically review and compare explanation methods in order to confirm their correctness. Until now, no tool with focus on XAI evaluation exists that exhaustively and speedily allows researchers to evaluate the performance of explanations of neural network predictions. To increase transparency and reproducibility in the field, we therefore built Quantus -- a comprehensive, evaluation toolkit in Python that includes a growing, well-organised collection of evaluation metrics and tutorials for evaluating explainable methods. The toolkit has been thoroughly tested and is available under an open-source license on PyPi (or on https://github.com/understandable-machine-intelligence-lab/Quantus/).", "year": 2022, "venue": "Journal of machine learning research", "authors": [ "Anna Hedström", "Leander Weber", "Dilyara Bareeva", "Franz Motzkus", "W. Samek", "Sebastian Lapuschkin", "Marina M.-C. Höhne" ], "externalIds": { "DBLP": "journals/jmlr/HedstromWKBMSLH23", "ArXiv": "2202.06861", "CorpusId": 246823004 }, "url": "https://www.semanticscholar.org/paper/30e776268268e84becd2863b0632247da61238b9", "referenceCount": 58, "citationCount": 130, "influentialCitationCount": 6, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Consistent and Efficient Evaluation Strategy for Attribution Methods", "abstract": "With a variety of local feature attribution methods being proposed in recent years, follow-up work suggested several evaluation strategies. To assess the attribution quality across different attribution techniques, the most popular among these evaluation strategies in the image domain use pixel perturbations. However, recent advances discovered that different evaluation strategies produce conflicting rankings of attribution methods and can be prohibitively expensive to compute. In this work, we present an information-theoretic analysis of evaluation strategies based on pixel perturbations. Our findings reveal that the results are strongly affected by information leakage through the shape of the removed pixels as opposed to their actual values. Using our theoretical insights, we propose a novel evaluation framework termed Remove and Debias (ROAD) which offers two contributions: First, it mitigates the impact of the confounders, which entails higher consistency among evaluation strategies. Second, ROAD does not require the computationally expensive retraining step and saves up to 99% in computational costs compared to the state-of-the-art. We release our source code at https://github.com/tleemann/road_evaluation.", "year": 2022, "venue": "International Conference on Machine Learning", "authors": [ "Yao Rong", "Tobias Leemann", "V. Borisov", "Gjergji Kasneci", "Enkelejda Kasneci" ], "externalIds": { "ArXiv": "2202.00449", "DBLP": "conf/icml/RongLBKK22", "CorpusId": 249642068 }, "url": "https://www.semanticscholar.org/paper/04e4183fd18093ab34e8c82bc4403b75901e7cec", "referenceCount": 53, "citationCount": 68, "influentialCitationCount": 10, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Evaluating saliency methods on artificial data with different background types", "abstract": "Over the last years, many 'explainable artificial intelligence' (xAI) approaches have been developed, but these have not always been objectively evaluated. To evaluate the quality of heatmaps generated by various saliency methods, we developed a framework to generate artificial data with synthetic lesions and a known ground truth map. Using this framework, we evaluated two data sets with different backgrounds, Perlin noise and 2D brain MRI slices, and found that the heatmaps vary strongly between saliency methods and backgrounds. We strongly encourage further evaluation of saliency maps and xAI methods using this framework before applying these in clinical or other safety-critical settings.", "year": 2021, "venue": "arXiv.org", "authors": [ "C'eline Budding", "Fabian Eitel", "K. Ritter", "S. Haufe" ], "externalIds": { "DBLP": "journals/corr/abs-2112-04882", "ArXiv": "2112.04882", "CorpusId": 245006322 }, "url": "https://www.semanticscholar.org/paper/0cf26a0ca4aca207764c86817092e6ede1c40689", "referenceCount": 31, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "Scrutinizing XAI using linear ground-truth data with suppressor variables", "abstract": null, "year": 2021, "venue": "Machine-mediated learning", "authors": [ "Rick Wilming", "C'eline Budding", "K. Müller", "S. Haufe" ], "externalIds": { "DBLP": "journals/ml/WilmingBMH22", "ArXiv": "2111.07473", "PubMedCentral": "9123083", "DOI": "10.1007/s10994-022-06167-y", "CorpusId": 244117050, "PubMed": "35611184" }, "url": "https://www.semanticscholar.org/paper/f1178162b2b5c46feca962eb422b8bd48dd7073d", "referenceCount": 56, "citationCount": 22, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science", "Medicine" ] }, { "title": "Machine Learning for Health: Algorithm Auditing & Quality Control", "abstract": null, "year": 2021, "venue": "Journal of medical systems", "authors": [ "Luis Oala", "A. Murchison", "Pradeep Balachandran", "Shruti Choudhary", "J. Fehr", "Alixandro Werneck Leite", "P. Goldschmidt", "Christian Johner", "Elora D. M. Schörverth", "Rose Nakasi", "Martin Meyer", "F. Cabitza", "P. Baird", "Carolin Prabhu", "Eva Weicken", "Xiaoxuan Liu", "M. Wenzel", "Steffen Vogler", "D. Akogo", "Shada Alsalamah", "Emre Kazim", "A. Koshiyama", "Sven Piechottka", "Sheena Macpherson", "I. Shadforth", "Regina Geierhofer", "Christian Matek", "J. Krois", "Bruno Sanguinetti", "M. Arentz", "Pavol Bielik", "Saul Calderon-Ramirez", "Auss Abbood", "Nicolas Langer", "S. Haufe", "Ferath Kherif", "Sameer Pujari", "W. Samek", "Thomas Wiegand" ], "externalIds": { "DBLP": "journals/jms/OalaMBCFLGJSNMC21", "PubMedCentral": "8562935", "DOI": "10.1007/s10916-021-01783-y", "CorpusId": 240443732, "PubMed": "34729675" }, "url": "https://www.semanticscholar.org/paper/9986fab347bcec1bc01e73f5c7955c1ae2c75164", "referenceCount": 91, "citationCount": 34, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "The false hope of current approaches to explainable artificial intelligence in health care.", "abstract": null, "year": 2021, "venue": "The Lancet Digital Health", "authors": [ "M. Ghassemi", "Luke Oakden-Rayner", "Andrew Beam" ], "externalIds": { "DOI": "10.1016/s2589-7500(21)00208-9", "CorpusId": 239963176, "PubMed": "34711379" }, "url": "https://www.semanticscholar.org/paper/634ed64dd2d4c53381fbcc53f4d0fa339711d799", "referenceCount": 48, "citationCount": 518, "influentialCitationCount": 17, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Beware explanations from AI in health care", "abstract": "The benefits of explainable artificial intelligence are not what they appear Artificial intelligence and machine learning (AI/ML) algorithms are increasingly developed in health care for diagnosis and treatment of a variety of medical conditions (1). However, despite the technical prowess of such systems, their adoption has been challenging, and whether and how much they will actually improve health care remains to be seen. A central reason for this is that the effectiveness of AI/ML-based medical devices depends largely on the behavioral characteristics of its users, who, for example, are often vulnerable to well-documented biases or algorithmic aversion (2). Many stakeholders increasingly identify the so-called black-box nature of predictive algorithms as the core source of users' skepticism, lack of trust, and slow uptake (3, 4). As a result, lawmakers have been moving in the direction of requiring the availability of explanations for black-box algorithmic decisions (5). Indeed, a near-consensus is emerging in favor of explainable AI/ML among academics, governments, and civil society groups. Many are drawn to this approach to harness the accuracy benefits of noninterpretable AI/ML such as deep learning or neural nets while also supporting transparency, trust, and adoption. We argue that this consensus, at least as applied to health care, both overstates the benefits and undercounts the drawbacks of requiring black-box algorithms to be explainable.", "year": 2021, "venue": "Science", "authors": [ "Boris Babic", "S. Gerke", "T. Evgeniou", "I. Cohen" ], "externalIds": { "DOI": "10.1126/science.abg1834", "CorpusId": 235914988, "PubMed": "34437144" }, "url": "https://www.semanticscholar.org/paper/f106ef1bad05ed38011cbd711d7c397080023b86", "referenceCount": 0, "citationCount": 126, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Quality Metrics for Transparent Machine Learning With and Without Humans In the Loop Are Not Correlated", "abstract": "The field explainable artificial intelligence (XAI) has brought about an arsenal of methods to render Machine Learning (ML) predictions more interpretable. But how useful explanations provided by transparent ML methods are for humans remains difficult to assess. Here we investigate the quality of interpretable computer vision algorithms using techniques from psychophysics. In crowdsourced annotation tasks we study the impact of different interpretability approaches on annotation accuracy and task time. We compare these quality metrics with classical XAI, automated quality metrics. Our results demonstrate that psychophysical experiments allow for robust quality assessment of transparency in machine learning. Interestingly the quality metrics computed without humans in the loop did not provide a consistent ranking of interpretability methods nor were they representative for how useful an explanation was for humans. These findings highlight the potential of methods from classical psychophysics for modern machine learning applications. We hope that our results provide convincing arguments for evaluating interpretability in its natural habitat, human-ML interaction, if the goal is to obtain an authentic assessment of interpretability.", "year": 2021, "venue": "arXiv.org", "authors": [ "F. Biessmann", "D. Refiano" ], "externalIds": { "ArXiv": "2107.02033", "DBLP": "journals/corr/abs-2107-02033", "CorpusId": 235731663 }, "url": "https://www.semanticscholar.org/paper/01580cb1af3a7e086b3a969a810eba62237a9ea7", "referenceCount": 20, "citationCount": 8, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Do Feature Attribution Methods Correctly Attribute Features?", "abstract": "Feature attribution methods are popular in interpretable machine learning. These methods compute the attribution of each input feature to represent its importance, but there is no consensus on the definition of \"attribution\", leading to many competing methods with little systematic evaluation, complicated in particular by the lack of ground truth attribution. To address this, we propose a dataset modification procedure to induce such ground truth. Using this procedure, we evaluate three common methods: saliency maps, rationales, and attentions. We identify several deficiencies and add new perspectives to the growing body of evidence questioning the correctness and reliability of these methods applied on datasets in the wild. We further discuss possible avenues for remedy and recommend new attribution methods to be tested against ground truth before deployment. The code and appendix are available at https://yilunzhou.github.io/feature-attribution-evaluation/.", "year": 2021, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Yilun Zhou", "S. Booth", "Marco Tulio Ribeiro", "J. Shah" ], "externalIds": { "DBLP": "conf/aaai/ZhouBRS22", "ArXiv": "2104.14403", "MAG": "3157950068", "DOI": "10.1609/aaai.v36i9.21196", "CorpusId": 233443847 }, "url": "https://www.semanticscholar.org/paper/426734685283b4a0c08b34cd9e996e2e30e7f7ee", "referenceCount": 48, "citationCount": 116, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Automated biomarker candidate discovery in imaging mass spectrometry data through spatially localized Shapley additive explanations", "abstract": "The search for molecular species that are differentially expressed between biological states is an important step towards discovering promising biomarker candidates. In imaging mass spectrometry (IMS), performing this search manually is often impractical due to the large size and high-dimensionality of IMS datasets. Instead, we propose an interpretable machine learning workflow that automatically identifies biomarker candidates by their mass-to-charge ratios, and that quantitatively estimates their relevance to recognizing a given biological class using Shapley additive explanations (SHAP). The task of biomarker candidate discovery is translated into a feature ranking problem: given a classification model that assigns pixels to different biological classes on the basis of their mass spectra, the molecular species that the model uses as features are ranked in descending order of relative predictive importance such that the top-ranking features have a higher likelihood of being useful biomarkers. Besides providing the user with an experiment-wide measure of a molecular species’ biomarker potential, our workflow delivers spatially localized explanations of the classification model’s decision-making process in the form of a novel representation called SHAP maps. SHAP maps deliver insight into the spatial specificity of biomarker candidates by highlighting in which regions of the tissue sample each feature provides discriminative information and in which regions it does not. SHAP maps also enable one to determine whether the relationship between a biomarker candidate and a biological state of interest is correlative or anticorrelative. Our automated approach to estimating a molecular species’ potential for characterizing a user-provided biological class, combined with the untargeted and multiplexed nature of IMS, allows for the rapid screening of thousands of molecular species and the obtention of a broader biomarker candidate shortlist than would be possible through targeted manual assessment. Our biomarker candidate discovery workflow is demonstrated on mouse-pup and rat kidney case studies. Highlights Our workflow automates the discovery of biomarker candidates in imaging mass spectrometry data by using state-of-the-art machine learning methodology to produce a shortlist of molecular species that are differentially expressed with regards to a user-provided biological class. A model interpretability method called Shapley additive explanations (SHAP), with observational Shapley values, enables us to quantify the local and global predictive importance of molecular species with respect to recognizing a user-provided biological class. By providing spatially localized explanations for a classification model’s decision-making process, SHAP maps deliver insight into the spatial specificity of biomarker candidates and enable one to determine whether (and where) the relationship between a biomarker candidate and the class of interest is correlative or anticorrelative.", "year": 2020, "venue": "bioRxiv", "authors": [ "L. Tideman", "Lukasz G. Migas", "K. Djambazova", "N. Patterson", "R. Caprioli", "J. Spraggins", "Raf Van de Plas" ], "externalIds": { "PubMedCentral": "10124144", "MAG": "3159226767", "DOI": "10.1101/2020.12.23.424201", "CorpusId": 229935530, "PubMed": "34482894" }, "url": "https://www.semanticscholar.org/paper/2faeb46061fafd5034c28f100b910c1d2eee240f", "referenceCount": 76, "citationCount": 28, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Chemistry", "Biology", "Medicine" ] }, { "title": "Counterfactual Explanations for Multivariate Time Series", "abstract": "Multivariate time series are used in many science and engineering domains, including health-care, astronomy, and high-performance computing. A recent trend is to use machine learning (ML) to process this complex data and these ML-based frameworks are starting to play a critical role for a variety of applications. However, barriers such as user distrust or difficulty of debugging need to be overcome to enable widespread adoption of such frameworks in production systems. To address this challenge, we propose a novel explainability technique, CoMTE, that provides counterfactual explanations for supervised machine learning frameworks on multivariate time series data. Using various machine learning frameworks and data sets, we compare CoMTE with several state-of-the-art explainability methods and show that we outperform existing methods in comprehensibility and robustness. We also show how CoMTE can be used to debug machine learning frameworks and gain a better understanding of the underlying multivariate time series data.", "year": 2020, "venue": "2021 International Conference on Applied Artificial Intelligence (ICAPAI)", "authors": [ "E. Ates", "Burak Aksar", "V. Leung", "A. Coskun" ], "externalIds": { "ArXiv": "2008.10781", "DBLP": "journals/corr/abs-2008-10781", "MAG": "3081435493", "DOI": "10.1109/ICAPAI49758.2021.9462056", "CorpusId": 235739487 }, "url": "https://www.semanticscholar.org/paper/8965d19db2055249c3c00af24e287f47f51f669c", "referenceCount": 67, "citationCount": 50, "influentialCitationCount": 10, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Drug discovery with explainable artificial intelligence", "abstract": null, "year": 2020, "venue": "Nature Machine Intelligence", "authors": [ "José Jiménez-Luna", "F. Grisoni", "G. Schneider" ], "externalIds": { "MAG": "3093687066", "DBLP": "journals/natmi/Jimenez-LunaGS20", "ArXiv": "2007.00523", "DOI": "10.1038/s42256-020-00236-4", "CorpusId": 220280875 }, "url": "https://www.semanticscholar.org/paper/d3a857a4bccf9a23b2d1e523c89456de728403b3", "referenceCount": 228, "citationCount": 529, "influentialCitationCount": 13, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Algorithmic recourse under imperfect causal knowledge: a probabilistic approach", "abstract": "Recent work has discussed the limitations of counterfactual explanations to recommend actions for algorithmic recourse, and argued for the need of taking causal relationships between features into consideration. Unfortunately, in practice, the true underlying structural causal model is generally unknown. In this work, we first show that it is impossible to guarantee recourse without access to the true structural equations. To address this limitation, we propose two probabilistic approaches to select optimal actions that achieve recourse with high probability given limited causal knowledge (e.g., only the causal graph). The first captures uncertainty over structural equations under additive Gaussian noise, and uses Bayesian model averaging to estimate the counterfactual distribution. The second removes any assumptions on the structural equations by instead computing the average effect of recourse actions on individuals similar to the person who seeks recourse, leading to a novel subpopulation-based interventional notion of recourse. We then derive a gradient-based procedure for selecting optimal recourse actions, and empirically show that the proposed approaches lead to more reliable recommendations under imperfect causal knowledge than non-probabilistic baselines.", "year": 2020, "venue": "Neural Information Processing Systems", "authors": [ "Amir-Hossein Karimi", "Julius von Kügelgen", "B. Scholkopf", "Isabel Valera" ], "externalIds": { "MAG": "3034616174", "DBLP": "conf/nips/KarimiKSV20", "ArXiv": "2006.06831", "CorpusId": 219635985 }, "url": "https://www.semanticscholar.org/paper/06de04ade91f35446c21fa26c5621c8c88458edd", "referenceCount": 68, "citationCount": 154, "influentialCitationCount": 22, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Towards Faithfully Interpretable NLP Systems: How Should We Define and Evaluate Faithfulness?", "abstract": "With the growing popularity of deep-learning based NLP models, comes a need for interpretable systems. But what is interpretability, and what constitutes a high-quality interpretation? In this opinion piece we reflect on the current state of interpretability evaluation research. We call for more clearly differentiating between different desired criteria an interpretation should satisfy, and focus on the faithfulness criteria. We survey the literature with respect to faithfulness evaluation, and arrange the current approaches around three assumptions, providing an explicit form to how faithfulness is “defined” by the community. We provide concrete guidelines on how evaluation of interpretation methods should and should not be conducted. Finally, we claim that the current binary definition for faithfulness sets a potentially unrealistic bar for being considered faithful. We call for discarding the binary notion of faithfulness in favor of a more graded one, which we believe will be of greater practical utility.", "year": 2020, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Alon Jacovi", "Yoav Goldberg" ], "externalIds": { "MAG": "3015575765", "DBLP": "journals/corr/abs-2004-03685", "ACL": "2020.acl-main.386", "ArXiv": "2004.03685", "DOI": "10.18653/v1/2020.acl-main.386", "CorpusId": 215416110 }, "url": "https://www.semanticscholar.org/paper/579476d19566efc842929ea6bdd18ab760c8cfa2", "referenceCount": 50, "citationCount": 477, "influentialCitationCount": 51, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "CLEVR-XAI: A benchmark dataset for the ground truth evaluation of neural network explanations", "abstract": null, "year": 2020, "venue": "Information Fusion", "authors": [ "L. Arras", "Ahmed Osman", "W. Samek" ], "externalIds": { "ArXiv": "2003.07258", "DBLP": "journals/inffus/ArrasOS22", "DOI": "10.1016/j.inffus.2021.11.008", "CorpusId": 244125057 }, "url": "https://www.semanticscholar.org/paper/e0714f730d557d68e552284b3c9bd6567f116ca5", "referenceCount": 74, "citationCount": 112, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering" ] }, { "title": "Algorithmic Recourse: from Counterfactual Explanations to Interventions", "abstract": "As machine learning is increasingly used to inform consequential decision-making (e.g., pre-trial bail and loan approval), it becomes important to explain how the system arrived at its decision, and also suggest actions to achieve a favorable decision. Counterfactual explanations -\"how the world would have (had) to be different for a desirable outcome to occur\"- aim to satisfy these criteria. Existing works have primarily focused on designing algorithms to obtain counterfactual explanations for a wide range of settings. However, it has largely been overlooked that ultimately, one of the main objectives is to allow people to act rather than just understand. In layman's terms, counterfactual explanations inform an individual where they need to get to, but not how to get there. In this work, we rely on causal reasoning to caution against the use of counterfactual explanations as a recommendable set of actions for recourse. Instead, we propose a shift of paradigm from recourse via nearest counterfactual explanations to recourse through minimal interventions, shifting the focus from explanations to interventions.", "year": 2020, "venue": "Conference on Fairness, Accountability and Transparency", "authors": [ "Amir-Hossein Karimi", "B. Scholkopf", "Isabel Valera" ], "externalIds": { "DBLP": "conf/fat/KarimiSV21", "ArXiv": "2002.06278", "MAG": "3005632426", "DOI": "10.1145/3442188.3445899", "CorpusId": 211133257 }, "url": "https://www.semanticscholar.org/paper/0367827a9162f981ee02c4b3130f58085fba93f1", "referenceCount": 57, "citationCount": 303, "influentialCitationCount": 35, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "One Explanation Does Not Fit All", "abstract": null, "year": 2020, "venue": "KI - Künstliche Intelligenz", "authors": [ "Kacper Sokol", "Peter A. Flach" ], "externalIds": { "MAG": "3001808881", "DBLP": "journals/corr/abs-2001-09734", "ArXiv": "2001.09734", "DOI": "10.1007/s13218-020-00637-y", "CorpusId": 210920025 }, "url": "https://www.semanticscholar.org/paper/1efe9c3efd044e0d3d5ee7e7cab970273d2b6cfa", "referenceCount": 46, "citationCount": 144, "influentialCitationCount": 8, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "When Explanations Lie: Why Many Modified BP Attributions Fail", "abstract": "Attribution methods aim to explain a neural network's prediction by highlighting the most relevant image areas. A popular approach is to backpropagate (BP) a custom relevance score using modified rules, rather than the gradient. We analyze an extensive set of modified BP methods: Deep Taylor Decomposition, Layer-wise Relevance Propagation (LRP), Excitation BP, PatternAttribution, DeepLIFT, Deconv, RectGrad, and Guided BP. We find empirically that the explanations of all mentioned methods, except for DeepLIFT, are independent of the parameters of later layers. We provide theoretical insights for this surprising behavior and also analyze why DeepLIFT does not suffer from this limitation. Empirically, we measure how information of later layers is ignored by using our new metric, cosine similarity convergence (CSC). The paper provides a framework to assess the faithfulness of new and existing modified BP methods theoretically and empirically. For code see: this https URL", "year": 2019, "venue": "International Conference on Machine Learning", "authors": [ "Leon Sixt", "Maximilian Granz", "Tim Landgraf" ], "externalIds": { "MAG": "3006874766", "DBLP": "conf/icml/SixtGL20", "ArXiv": "1912.09818", "CorpusId": 212888344 }, "url": "https://www.semanticscholar.org/paper/8ee1adbd7aba181e19fb95c2130675551c350ad4", "referenceCount": 62, "citationCount": 116, "influentialCitationCount": 6, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics", "Psychology" ] }, { "title": "Explainable AI: from black box to glass box", "abstract": null, "year": 2019, "venue": "Journal of the Academy of Marketing Science", "authors": [ "Arun Rai" ], "externalIds": { "MAG": "2994898777", "DOI": "10.1007/s11747-019-00710-5", "CorpusId": 210119147 }, "url": "https://www.semanticscholar.org/paper/2cc3338709ea9c14ff422025ae4a8ad09f9598ba", "referenceCount": 6, "citationCount": 574, "influentialCitationCount": 22, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Explainability fact sheets: a framework for systematic assessment of explainable approaches", "abstract": "Explanations in Machine Learning come in many forms, but a consensus regarding their desired properties is yet to emerge. In this paper we introduce a taxonomy and a set of descriptors that can be used to characterise and systematically assess explainable systems along five key dimensions: functional, operational, usability, safety and validation. In order to design a comprehensive and representative taxonomy and associated descriptors we surveyed the eXplainable Artificial Intelligence literature, extracting the criteria and desiderata that other authors have proposed or implicitly used in their research. The survey includes papers introducing new explainability algorithms to see what criteria are used to guide their development and how these algorithms are evaluated, as well as papers proposing such criteria from both computer science and social science perspectives. This novel framework allows to systematically compare and contrast explainability approaches, not just to better understand their capabilities but also to identify discrepancies between their theoretical qualities and properties of their implementations. We developed an operationalisation of the framework in the form of Explainability Fact Sheets, which enable researchers and practitioners alike to quickly grasp capabilities and limitations of a particular explainable method. When used as a Work Sheet, our taxonomy can guide the development of new explainability approaches by aiding in their critical evaluation along the five proposed dimensions.", "year": 2019, "venue": "FAT*", "authors": [ "Kacper Sokol", "Peter A. Flach" ], "externalIds": { "DBLP": "conf/fat/SokolF20", "MAG": "2995167739", "ArXiv": "1912.05100", "DOI": "10.1145/3351095.3372870", "CorpusId": 209202734 }, "url": "https://www.semanticscholar.org/paper/c021955e06618511f58df4e41882d7c1ac96a459", "referenceCount": 67, "citationCount": 269, "influentialCitationCount": 23, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Interpretable Machine Learning", "abstract": "Interpretable machine learning has become a popular research direction as deep neural networks (DNNs) have become more powerful and their applications more mainstream, yet DNNs remain difficult to understand. Testing with Concept Activation Vectors, TCAV, (Kim et al. 2017) is an approach to interpreting DNNs in a human-friendly way and has recently received significant attention in the machine learning community. The TCAV algorithm achieves a degree of global interpretability for DNNs through human-defined concepts as explanations. This project introduces Robust TCAV, which builds on TCAV and experimentally determines best practices for this method. The objectives for Robust TCAV are 1) Making TCAV more consistent by reducing variance in the TCAV score distribution and 2) Increasing CAV and TCAV score resistance to perturbations. A difference of means method for CAV generation was determined to be the best practice to achieve both objectives. Many areas of the TCAV process are explored including CAV visualization in low dimensions, negative class selection, and activation perturbation in the direction of a CAV. Finally, a thresholding technique is considered to remove noise in TCAV scores. This project is a step in the direction of making TCAV, an already impactful algorithm in interpretability, more reliable and useful for practitioners.", "year": 2019, "venue": "Hands-On Machine Learning with R", "authors": [ "Bradley C. Boehmke", "Brandon M. Greenwell" ], "externalIds": { "MAG": "2999362542", "DOI": "10.1201/9780367816377-16", "CorpusId": 209379623 }, "url": "https://www.semanticscholar.org/paper/b0c34618ffd1154f35863e2ce7250ac6b6f2c424", "referenceCount": 165, "citationCount": 2300, "influentialCitationCount": 242, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Input-Cell Attention Reduces Vanishing Saliency of Recurrent Neural Networks", "abstract": "Recent efforts to improve the interpretability of deep neural networks use saliency to characterize the importance of input features to predictions made by models. Work on interpretability using saliency-based methods on Recurrent Neural Networks (RNNs) has mostly targeted language tasks, and their applicability to time series data is less understood. In this work we analyze saliency-based methods for RNNs, both classical and gated cell architectures. We show that RNN saliency vanishes over time, biasing detection of salient features only to later time steps and are, therefore, incapable of reliably detecting important features at arbitrary time intervals. To address this vanishing saliency problem, we propose a novel RNN cell structure (input-cell attention), which can extend any RNN cell architecture. At each time step, instead of only looking at the current input vector, input-cell attention uses a fixed-size matrix embedding, each row of the matrix attending to different inputs from current or previous time steps. Using synthetic data, we show that the saliency map produced by the input-cell attention RNN is able to faithfully detect important features regardless of their occurrence in time. We also apply the input-cell attention RNN on a neuroscience task analyzing functional Magnetic Resonance Imaging (fMRI) data for human subjects performing a variety of tasks. In this case, we use saliency to characterize brain regions (input features) for which activity is important to distinguish between tasks. We show that standard RNN architectures are only capable of detecting important brain regions in the last few time steps of the fMRI data, while the input-cell attention model is able to detect important brain region activity across time without latter time step biases.", "year": 2019, "venue": "Neural Information Processing Systems", "authors": [ "Aya Abdelsalam Ismail", "Mohamed K. Gunady", "L. Pessoa", "H. C. Bravo", "S. Feizi" ], "externalIds": { "DBLP": "conf/nips/IsmailGPBF19", "ArXiv": "1910.12370", "MAG": "2970840267", "CorpusId": 204904776 }, "url": "https://www.semanticscholar.org/paper/5a7229010483ab60839a65f5cb3c391eeea711dc", "referenceCount": 35, "citationCount": 46, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Towards Explainable Artificial Intelligence", "abstract": null, "year": 2019, "venue": "Explainable AI", "authors": [ "W. Samek", "K. Müller" ], "externalIds": { "DBLP": "journals/corr/abs-1909-12072", "ArXiv": "1909.12072", "MAG": "3104997039", "DOI": "10.1007/978-3-030-28954-6_1", "CorpusId": 202579608 }, "url": "https://www.semanticscholar.org/paper/21f214165bda8813c95cfa3e471e24aaf8d5776b", "referenceCount": 100, "citationCount": 382, "influentialCitationCount": 16, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Benchmarking Attribution Methods with Relative Feature Importance", "abstract": "Interpretability is an important area of research for safe deployment of machine learning systems. One particular type of interpretability method attributes model decisions to input features. Despite active development, quantitative evaluation of feature attribution methods remains difficult due to the lack of ground truth: we do not know which input features are in fact important to a model. In this work, we propose a framework for Benchmarking Attribution Methods (BAM) with a priori knowledge of relative feature importance. BAM includes 1) a carefully crafted dataset and models trained with known relative feature importance and 2) three complementary metrics to quantitatively evaluate attribution methods by comparing feature attributions between pairs of models and pairs of inputs. Our evaluation on several widely-used attribution methods suggests that certain methods are more likely to produce false positive explanations---features that are incorrectly attributed as more important to model prediction. We open source our dataset, models, and metrics.", "year": 2019, "venue": "", "authors": [ "Mengjiao Yang", "Been Kim" ], "externalIds": { "MAG": "2987200109", "ArXiv": "1907.09701", "CorpusId": 207780661 }, "url": "https://www.semanticscholar.org/paper/96d1b48417113b41a67557c50e3e5405c3c86a55", "referenceCount": 32, "citationCount": 130, "influentialCitationCount": 9, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Explanations can be manipulated and geometry is to blame", "abstract": "Explanation methods aim to make neural networks more trustworthy and interpretable. In this paper, we demonstrate a property of explanation methods which is disconcerting for both of these purposes. Namely, we show that explanations can be manipulated arbitrarily by applying visually hardly perceptible perturbations to the input that keep the network's output approximately constant. We establish theoretically that this phenomenon can be related to certain geometrical properties of neural networks. This allows us to derive an upper bound on the susceptibility of explanations to manipulations. Based on this result, we propose effective mechanisms to enhance the robustness of explanations.", "year": 2019, "venue": "Neural Information Processing Systems", "authors": [ "Ann-Kathrin Dombrowski", "M. Alber", "Christopher J. Anders", "M. Ackermann", "K. Müller", "P. Kessel" ], "externalIds": { "DBLP": "journals/corr/abs-1906-07983", "MAG": "2970242004", "ArXiv": "1906.07983", "CorpusId": 195069127 }, "url": "https://www.semanticscholar.org/paper/a0f0a94927c0013fa924ee43c8ddbace1d71e3fb", "referenceCount": 36, "citationCount": 304, "influentialCitationCount": 38, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Explaining individual predictions when features are dependent: More accurate approximations to Shapley values", "abstract": null, "year": 2019, "venue": "Artificial Intelligence", "authors": [ "K. Aas", "Martin Jullum", "Anders Løland" ], "externalIds": { "MAG": "3146613606", "ArXiv": "1903.10464", "DBLP": "journals/corr/abs-1903-10464", "DOI": "10.1016/J.ARTINT.2021.103502", "CorpusId": 85497080 }, "url": "https://www.semanticscholar.org/paper/01356797005cc02c414e358dcc2b417b9b81cceb", "referenceCount": 60, "citationCount": 476, "influentialCitationCount": 59, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Unmasking Clever Hans predictors and assessing what machines really learn", "abstract": null, "year": 2019, "venue": "Nature Communications", "authors": [ "Sebastian Lapuschkin", "S. Wäldchen", "Alexander Binder", "G. Montavon", "W. Samek", "K. Müller" ], "externalIds": { "MAG": "2921802966", "DBLP": "journals/corr/abs-1902-10178", "ArXiv": "1902.10178", "PubMedCentral": "6411769", "DOI": "10.1038/s41467-019-08987-4", "CorpusId": 67856367, "PubMed": "30858366" }, "url": "https://www.semanticscholar.org/paper/4f51a64793d3b2a60e9e5846c31dae023cf5c69a", "referenceCount": 150, "citationCount": 900, "influentialCitationCount": 36, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics", "Medicine" ] }, { "title": "Definitions, methods, and applications in interpretable machine learning", "abstract": "Significance The recent surge in interpretability research has led to confusion on numerous fronts. In particular, it is unclear what it means to be interpretable and how to select, evaluate, or even discuss methods for producing interpretations of machine-learning models. We aim to clarify these concerns by defining interpretable machine learning and constructing a unifying framework for existing methods which highlights the underappreciated role played by human audiences. Within this framework, methods are organized into 2 classes: model based and post hoc. To provide guidance in selecting and evaluating interpretation methods, we introduce 3 desiderata: predictive accuracy, descriptive accuracy, and relevancy. Using our framework, we review existing work, grounded in real-world studies which exemplify our desiderata, and suggest directions for future work. Machine-learning models have demonstrated great success in learning complex patterns that enable them to make predictions about unobserved data. In addition to using models for prediction, the ability to interpret what a model has learned is receiving an increasing amount of attention. However, this increased focus has led to considerable confusion about the notion of interpretability. In particular, it is unclear how the wide array of proposed interpretation methods are related and what common concepts can be used to evaluate them. We aim to address these concerns by defining interpretability in the context of machine learning and introducing the predictive, descriptive, relevant (PDR) framework for discussing interpretations. The PDR framework provides 3 overarching desiderata for evaluation: predictive accuracy, descriptive accuracy, and relevancy, with relevancy judged relative to a human audience. Moreover, to help manage the deluge of interpretation methods, we introduce a categorization of existing techniques into model-based and post hoc categories, with subgroups including sparsity, modularity, and simulatability. To demonstrate how practitioners can use the PDR framework to evaluate and understand interpretations, we provide numerous real-world examples. These examples highlight the often underappreciated role played by human audiences in discussions of interpretability. Finally, based on our framework, we discuss limitations of existing methods and directions for future work. We hope that this work will provide a common vocabulary that will make it easier for both practitioners and researchers to discuss and choose from the full range of interpretation methods.", "year": 2019, "venue": "Proceedings of the National Academy of Sciences of the United States of America", "authors": [ "W. James Murdoch", "Chandan Singh", "Karl Kumbier", "R. Abbasi-Asl", "Bin Yu" ], "externalIds": { "MAG": "2910705748", "DBLP": "journals/corr/abs-1901-04592", "ArXiv": "1901.04592", "DOI": "10.1073/pnas.1900654116", "CorpusId": 204755862, "PubMed": "31619572" }, "url": "https://www.semanticscholar.org/paper/b9518627db25f05930e931f56497602363a75491", "referenceCount": 113, "citationCount": 1224, "influentialCitationCount": 37, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science", "Mathematics" ] }, { "title": "Stop explaining black box machine learning models for high stakes decisions and use interpretable models instead", "abstract": null, "year": 2018, "venue": "Nature Machine Intelligence", "authors": [ "C. Rudin" ], "externalIds": { "MAG": "2974440810", "DBLP": "journals/natmi/Rudin19", "DOI": "10.1038/s42256-019-0048-x", "CorpusId": 182656421, "PubMed": "35603010" }, "url": "https://www.semanticscholar.org/paper/bc00ff34ec7772080c7039b17f7069a2f7df0889", "referenceCount": 86, "citationCount": 4952, "influentialCitationCount": 298, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Sanity Checks for Saliency Maps", "abstract": "Saliency methods have emerged as a popular tool to highlight features in an input deemed relevant for the prediction of a learned model. Several saliency methods have been proposed, often guided by visual appeal on image data. In this work, we propose an actionable methodology to evaluate what kinds of explanations a given method can and cannot provide. We find that reliance, solely, on visual assessment can be misleading. Through extensive experiments we show that some existing saliency methods are independent both of the model and of the data generating process. Consequently, methods that fail the proposed tests are inadequate for tasks that are sensitive to either data or model, such as, finding outliers in the data, explaining the relationship between inputs and outputs that the model learned, and debugging the model. We interpret our findings through an analogy with edge detection in images, a technique that requires neither training data nor model. Theory in the case of a linear model and a single-layer convolutional neural network supports our experimental findings.", "year": 2018, "venue": "Neural Information Processing Systems", "authors": [ "Julius Adebayo", "J. Gilmer", "M. Muelly", "I. Goodfellow", "Moritz Hardt", "Been Kim" ], "externalIds": { "MAG": "2891612330", "DBLP": "journals/corr/abs-1810-03292", "ArXiv": "1810.03292", "CorpusId": 52938797 }, "url": "https://www.semanticscholar.org/paper/8dc8f3e0127adc6985d4695e9b69d04717b2fde8", "referenceCount": 43, "citationCount": 1735, "influentialCitationCount": 173, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Actionable Recourse in Linear Classification", "abstract": "Classification models are often used to make decisions that affect humans: whether to approve a loan application, extend a job offer, or provide insurance. In such applications, individuals should have the ability to change the decision of the model. When a person is denied a loan by a credit scoring model, for example, they should be able to change the input variables of the model in a way that will guarantee approval. Otherwise, this person will be denied the loan so long as the model is deployed, and -- more importantly --will lack agency over a decision that affects their livelihood. In this paper, we propose to evaluate a linear classification model in terms of recourse, which we define as the ability of a person to change the decision of the model through actionable input variables (e.g., income vs. age or marital status). We present an integer programming toolkit to: (i) measure the feasibility and difficulty of recourse in a target population; and (ii) generate a list of actionable changes for a person to obtain a desired outcome. We discuss how our tools can inform different stakeholders by using them to audit recourse for credit scoring models built with real-world datasets. Our results illustrate how recourse can be significantly affected by common modeling practices, and motivate the need to evaluate recourse in algorithmic decision-making.", "year": 2018, "venue": "FAT", "authors": [ "Berk Ustun", "Alexander Spangher", "Yang Liu" ], "externalIds": { "DBLP": "conf/fat/UstunSL19", "ArXiv": "1809.06514", "MAG": "2891340972", "DOI": "10.1145/3287560.3287566", "CorpusId": 51734899 }, "url": "https://www.semanticscholar.org/paper/86841a74f0fd99ba369f635715ecae3007f22611", "referenceCount": 66, "citationCount": 489, "influentialCitationCount": 84, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "A Benchmark for Interpretability Methods in Deep Neural Networks", "abstract": "We propose an empirical measure of the approximate accuracy of feature importance estimates in deep neural networks. Our results across several large-scale image classification datasets show that many popular interpretability methods produce estimates of feature importance that are not better than a random designation of feature importance. Only certain ensemble based approaches---VarGrad and SmoothGrad-Squared---outperform such a random assignment of importance. The manner of ensembling remains critical, we show that some approaches do no better then the underlying method but carry a far higher computational burden.", "year": 2018, "venue": "Neural Information Processing Systems", "authors": [ "Sara Hooker", "D. Erhan", "Pieter-Jan Kindermans", "Been Kim" ], "externalIds": { "MAG": "2970447476", "DBLP": "conf/nips/HookerEKK19", "ArXiv": "1806.10758", "CorpusId": 202782699 }, "url": "https://www.semanticscholar.org/paper/5f614777d25efd14b7426e99cb2544f2d6be133e", "referenceCount": 44, "citationCount": 597, "influentialCitationCount": 81, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Detecting non-causal artifacts in multivariate linear regression models", "abstract": "We consider linear models where $d$ potential causes $X_1,...,X_d$ are correlated with one target quantity $Y$ and propose a method to infer whether the association is causal or whether it is an artifact caused by overfitting or hidden common causes. We employ the idea that in the former case the vector of regression coefficients has 'generic' orientation relative to the covariance matrix $\\Sigma_{XX}$ of $X$. Using an ICA based model for confounding, we show that both confounding and overfitting yield regression vectors that concentrate mainly in the space of low eigenvalues of $\\Sigma_{XX}$.", "year": 2018, "venue": "International Conference on Machine Learning", "authors": [ "D. Janzing", "B. Scholkopf" ], "externalIds": { "DBLP": "conf/icml/JanzingS18", "MAG": "2952165734", "ArXiv": "1803.00810", "CorpusId": 3697979 }, "url": "https://www.semanticscholar.org/paper/ca0c590c9b3f1e95d92f776ff56a22177da6c1ac", "referenceCount": 21, "citationCount": 33, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "A Survey of Methods for Explaining Black Box Models", "abstract": "In recent years, many accurate decision support systems have been constructed as black boxes, that is as systems that hide their internal logic to the user. This lack of explanation constitutes both a practical and an ethical issue. The literature reports many approaches aimed at overcoming this crucial weakness, sometimes at the cost of sacrificing accuracy for interpretability. The applications in which black box decision systems can be used are various, and each approach is typically developed to provide a solution for a specific problem and, as a consequence, it explicitly or implicitly delineates its own definition of interpretability and explanation. The aim of this article is to provide a classification of the main problems addressed in the literature with respect to the notion of explanation and the type of black box system. Given a problem definition, a black box type, and a desired explanation, this survey should help the researcher to find the proposals more useful for his own work. The proposed classification of approaches to open black box models should also be useful for putting the many research open questions in perspective.", "year": 2018, "venue": "ACM Computing Surveys", "authors": [ "Riccardo Guidotti", "A. Monreale", "F. Turini", "D. Pedreschi", "F. Giannotti" ], "externalIds": { "MAG": "2951278035", "DBLP": "journals/csur/GuidottiMRTGP19", "ArXiv": "1802.01933", "DOI": "10.1145/3236009", "CorpusId": 3342225 }, "url": "https://www.semanticscholar.org/paper/f7325d232c7ac7d2daaf6605377058db5b5b83cc", "referenceCount": 160, "citationCount": 3438, "influentialCitationCount": 188, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Visual Explanation by Interpretation: Improving Visual Feedback Capabilities of Deep Neural Networks", "abstract": "Learning-based representations have become the defacto means to address computer vision tasks. Despite their massive adoption, the amount of work aiming at understanding the internal representations learned by these models is rather limited. Existing methods aimed at model interpretation either require exhaustive manual inspection of visualizations, or link internal network activations with external \"possibly useful\" annotated concepts. We propose an intermediate scheme in which, given a pretrained model, we automatically identify internal features relevant for the set of classes considered by the model, without requiring additional annotations. We interpret the model through average visualizations of these features. Then, at test time, we explain the network prediction by accompanying the predicted class label with supporting heatmap visualizations derived from the identified relevant features. In addition, we propose a method to address the artifacts introduced by strided operations in deconvnet-based visualizations. Our evaluation on the MNIST, ILSVRC 12 and Fashion 144k datasets quantitatively shows that the proposed method is able to identify relevant internal features for the classes of interest while improving the quality of the produced visualizations.", "year": 2017, "venue": "International Conference on Learning Representations", "authors": [ "José Oramas", "Kaili Wang", "T. Tuytelaars" ], "externalIds": { "DBLP": "conf/iclr/MWT19", "ArXiv": "1712.06302", "MAG": "2949466881", "CorpusId": 36060542 }, "url": "https://www.semanticscholar.org/paper/8d8bc608da14bc0ce32c3a5d1fdfbe037993626d", "referenceCount": 43, "citationCount": 59, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Counterfactual Explanations Without Opening the Black Box: Automated Decisions and the GDPR", "abstract": "There has been much discussion of the right to explanation in the EU General Data Protection Regulation, and its existence, merits, and disadvantages. Implementing a right to explanation that opens the black box of algorithmic decision-making faces major legal and technical barriers. Explaining the functionality of complex algorithmic decision-making systems and their rationale in specific cases is a technically challenging problem. Some explanations may offer little meaningful information to data subjects, raising questions around their value. Explanations of automated decisions need not hinge on the general public understanding how algorithmic systems function. Even though such interpretability is of great importance and should be pursued, explanations can, in principle, be offered without opening the black box. Looking at explanations as a means to help a data subject act rather than merely understand, one could gauge the scope and content of explanations according to the specific goal or action they are intended to support. From the perspective of individuals affected by automated decision-making, we propose three aims for explanations: (1) to inform and help the individual understand why a particular decision was reached, (2) to provide grounds to contest the decision if the outcome is undesired, and (3) to understand what would need to change in order to receive a desired result in the future, based on the current decision-making model. We assess how each of these goals finds support in the GDPR. We suggest data controllers should offer a particular type of explanation, unconditional counterfactual explanations, to support these three aims. These counterfactual explanations describe the smallest change to the world that can be made to obtain a desirable outcome, or to arrive at the closest possible world, without needing to explain the internal logic of the system.", "year": 2017, "venue": "arXiv.org", "authors": [ "Sandra Wachter", "B. Mittelstadt", "Chris Russell" ], "externalIds": { "MAG": "2765204106", "ArXiv": "1711.00399", "DBLP": "journals/corr/abs-1711-00399", "DOI": "10.2139/ssrn.3063289", "CorpusId": 3995299 }, "url": "https://www.semanticscholar.org/paper/4f309712e705210df5695240a5d5fb53ea1f8641", "referenceCount": 21, "citationCount": 2019, "influentialCitationCount": 279, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Unified Approach to Interpreting Model Predictions", "abstract": "Understanding why a model makes a certain prediction can be as crucial as the prediction's accuracy in many applications. However, the highest accuracy for large modern datasets is often achieved by complex models that even experts struggle to interpret, such as ensemble or deep learning models, creating a tension between accuracy and interpretability. In response, various methods have recently been proposed to help users interpret the predictions of complex models, but it is often unclear how these methods are related and when one method is preferable over another. To address this problem, we present a unified framework for interpreting predictions, SHAP (SHapley Additive exPlanations). SHAP assigns each feature an importance value for a particular prediction. Its novel components include: (1) the identification of a new class of additive feature importance measures, and (2) theoretical results showing there is a unique solution in this class with a set of desirable properties. The new class unifies six existing methods, notable because several recent methods in the class lack the proposed desirable properties. Based on insights from this unification, we present new methods that show improved computational performance and/or better consistency with human intuition than previous approaches.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "Scott M. Lundberg", "Su-In Lee" ], "externalIds": { "MAG": "2618851150", "DBLP": "journals/corr/LundbergL17", "ArXiv": "1705.07874", "CorpusId": 21889700 }, "url": "https://www.semanticscholar.org/paper/442e10a3c6640ded9408622005e3c2a8906ce4c2", "referenceCount": 10, "citationCount": 16601, "influentialCitationCount": 1905, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Learning how to explain neural networks: PatternNet and PatternAttribution", "abstract": "DeConvNet, Guided BackProp, LRP, were invented to better understand deep neural networks. We show that these methods do not produce the theoretically correct explanation for a linear model. Yet they are used on multi-layer networks with millions of parameters. This is a cause for concern since linear models are simple neural networks. We argue that explanation methods for neural nets should work reliably in the limit of simplicity, the linear models. Based on our analysis of linear models we propose a generalization that yields two explanation techniques (PatternNet and PatternAttribution) that are theoretically sound for linear models and produce improved explanations for deep networks.", "year": 2017, "venue": "International Conference on Learning Representations", "authors": [ "Pieter-Jan Kindermans", "Kristof T. Schütt", "M. Alber", "K. Müller", "D. Erhan", "Been Kim", "Sven Dähne" ], "externalIds": { "MAG": "2773497437", "DBLP": "conf/iclr/KindermansSAMEK18", "ArXiv": "1705.05598", "CorpusId": 32654687 }, "url": "https://www.semanticscholar.org/paper/ca2ed3e2e0514feb6f613b9875f3d98ade1b1dc1", "referenceCount": 28, "citationCount": 327, "influentialCitationCount": 25, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Axiomatic Attribution for Deep Networks", "abstract": "We study the problem of attributing the prediction of a deep network to its input features, a problem previously studied by several other works. We identify two fundamental axioms— Sensitivity and Implementation Invariance that attribution methods ought to satisfy. We show that they are not satisfied by most known attribution methods, which we consider to be a fundamental weakness of those methods. We use the axioms to guide the design of a new attribution method called Integrated Gradients. Our method requires no modification to the original network and is extremely simple to implement; it just needs a few calls to the standard gradient operator. We apply this method to a couple of image models, a couple of text models and a chemistry model, demonstrating its ability to debug networks, to extract rules from a network, and to enable users to engage with models better.", "year": 2017, "venue": "International Conference on Machine Learning", "authors": [ "Mukund Sundararajan", "Ankur Taly", "Qiqi Yan" ], "externalIds": { "DBLP": "journals/corr/SundararajanTY17", "MAG": "2949197630", "ArXiv": "1703.01365", "CorpusId": 16747630 }, "url": "https://www.semanticscholar.org/paper/f302e136c41db5de1d624412f68c9174cf7ae8be", "referenceCount": 35, "citationCount": 5097, "influentialCitationCount": 786, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Visualizing the effects of predictor variables in black box supervised learning models", "abstract": "In many supervised learning applications, understanding and visualizing the effects of the predictor variables on the predicted response is of paramount importance. A shortcoming of black box supervised learning models (e.g. complex trees, neural networks, boosted trees, random forests, nearest neighbours, local kernel‐weighted methods and support vector regression) in this regard is their lack of interpretability or transparency. Partial dependence plots, which are the most popular approach for visualizing the effects of the predictors with black box supervised learning models, can produce erroneous results if the predictors are strongly correlated, because they require extrapolation of the response at predictor values that are far outside the multivariate envelope of the training data. As an alternative to partial dependence plots, we present a new visualization approach that we term accumulated local effects plots, which do not require this unreliable extrapolation with correlated predictors. Moreover, accumulated local effects plots are far less computationally expensive than partial dependence plots. We also provide an R package ALEPlot as supplementary material to implement our proposed method.", "year": 2016, "venue": "Journal of the Royal Statistical Society: Series B (Statistical Methodology)", "authors": [ "D. Apley", "Jingyu Zhu" ], "externalIds": { "MAG": "2564414379", "ArXiv": "1612.08468", "DOI": "10.1111/rssb.12377", "CorpusId": 88522102 }, "url": "https://www.semanticscholar.org/paper/59b83c823a20cf1ce7a47805857d3c41c94357fe", "referenceCount": 24, "citationCount": 860, "influentialCitationCount": 85, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "“Why Should I Trust You?”: Explaining the Predictions of Any Classifier", "abstract": "Despite widespread adoption, machine learning models remain mostly black boxes. Understanding the reasons behind predictions is, however, quite important in assessing trust, which is fundamental if one plans to take action based on a prediction, or when choosing whether to deploy a new model. Such understanding also provides insights into the model, which can be used to transform an untrustworthy model or prediction into a trustworthy one. In this work, we propose LIME, a novel explanation technique that explains the predictions of any classifier in an interpretable and faithful manner, by learning an interpretable model locally varound the prediction. We also propose a method to explain models by presenting representative individual predictions and their explanations in a non-redundant way, framing the task as a submodular optimization problem. We demonstrate the flexibility of these methods by explaining different models for text (e.g. random forests) and image classification (e.g. neural networks). We show the utility of explanations via novel experiments, both simulated and with human subjects, on various scenarios that require trust: deciding if one should trust a prediction, choosing between models, improving an untrustworthy classifier, and identifying why a classifier should not be trusted.", "year": 2016, "venue": "North American Chapter of the Association for Computational Linguistics", "authors": [ "Marco Tulio Ribeiro", "Sameer Singh", "Carlos Guestrin" ], "externalIds": { "ACL": "N16-3020", "MAG": "2951501516", "DBLP": "journals/corr/RibeiroSG16", "ArXiv": "1602.04938", "DOI": "10.1145/2939672.2939778", "CorpusId": 13029170 }, "url": "https://www.semanticscholar.org/paper/c0883f5930a232a9c1ad601c978caede29155979", "referenceCount": 41, "citationCount": 14452, "influentialCitationCount": 1693, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Explaining nonlinear classification decisions with deep Taylor decomposition", "abstract": null, "year": 2015, "venue": "Pattern Recognition", "authors": [ "G. Montavon", "Sebastian Lapuschkin", "Alexander Binder", "W. Samek", "Klaus-Robert Müller" ], "externalIds": { "MAG": "2195388612", "DBLP": "journals/corr/MontavonBBSM15", "ArXiv": "1512.02479", "DOI": "10.1016/j.patcog.2016.11.008", "CorpusId": 266022338 }, "url": "https://www.semanticscholar.org/paper/056713e422a0753c5eb1733d73e9f8185e2015d4", "referenceCount": 56, "citationCount": 627, "influentialCitationCount": 48, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Evaluating the Visualization of What a Deep Neural Network Has Learned", "abstract": "Deep neural networks (DNNs) have demonstrated impressive performance in complex machine learning tasks such as image classification or speech recognition. However, due to their multilayer nonlinear structure, they are not transparent, i.e., it is hard to grasp what makes them arrive at a particular classification or recognition decision, given a new unseen data sample. Recently, several approaches have been proposed enabling one to understand and interpret the reasoning embodied in a DNN for a single test image. These methods quantify the “importance” of individual pixels with respect to the classification decision and allow a visualization in terms of a heatmap in pixel/input space. While the usefulness of heatmaps can be judged subjectively by a human, an objective quality measure is missing. In this paper, we present a general methodology based on region perturbation for evaluating ordered collections of pixels such as heatmaps. We compare heatmaps computed by three different methods on the SUN397, ILSVRC2012, and MIT Places data sets. Our main result is that the recently proposed layer-wise relevance propagation algorithm qualitatively and quantitatively provides a better explanation of what made a DNN arrive at a particular classification decision than the sensitivity-based approach or the deconvolution method. We provide theoretical arguments to explain this result and discuss its practical implications. Finally, we investigate the use of heatmaps for unsupervised assessment of the neural network performance.", "year": 2015, "venue": "IEEE Transactions on Neural Networks and Learning Systems", "authors": [ "W. Samek", "Alexander Binder", "G. Montavon", "Sebastian Lapuschkin", "K. Müller" ], "externalIds": { "MAG": "2240067561", "ArXiv": "1509.06321", "DBLP": "journals/corr/SamekBMBM15", "DOI": "10.1109/TNNLS.2016.2599820", "CorpusId": 7689122, "PubMed": "27576267" }, "url": "https://www.semanticscholar.org/paper/6df11b0bb0244d4d36e8955436067cc5d19734fa", "referenceCount": 44, "citationCount": 1086, "influentialCitationCount": 87, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "On Pixel-Wise Explanations for Non-Linear Classifier Decisions by Layer-Wise Relevance Propagation", "abstract": "Understanding and interpreting classification decisions of automated image classification systems is of high value in many applications, as it allows to verify the reasoning of the system and provides additional information to the human expert. Although machine learning methods are solving very successfully a plethora of tasks, they have in most cases the disadvantage of acting as a black box, not providing any information about what made them arrive at a particular decision. This work proposes a general solution to the problem of understanding classification decisions by pixel-wise decomposition of nonlinear classifiers. We introduce a methodology that allows to visualize the contributions of single pixels to predictions for kernel-based classifiers over Bag of Words features and for multilayered neural networks. These pixel contributions can be visualized as heatmaps and are provided to a human expert who can intuitively not only verify the validity of the classification decision, but also focus further analysis on regions of potential interest. We evaluate our method for classifiers trained on PASCAL VOC 2009 images, synthetic image data containing geometric shapes, the MNIST handwritten digits data set and for the pre-trained ImageNet model available as part of the Caffe open source package.", "year": 2015, "venue": "PLoS ONE", "authors": [ "Sebastian Bach", "Alexander Binder", "G. Montavon", "F. Klauschen", "K. Müller", "W. Samek" ], "externalIds": { "MAG": "1787224781", "PubMedCentral": "4498753", "DOI": "10.1371/journal.pone.0130140", "CorpusId": 9327892, "PubMed": "26161953" }, "url": "https://www.semanticscholar.org/paper/17a273bbd4448083b01b5a9389b3c37f5425aac0", "referenceCount": 68, "citationCount": 3790, "influentialCitationCount": 445, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Causal interpretation rules for encoding and decoding models in neuroimaging", "abstract": null, "year": 2015, "venue": "NeuroImage", "authors": [ "S. Weichwald", "Timm Meyer", "Ozan Özdenizci", "B. Scholkopf", "T. Ball", "M. Grosse-Wentrup" ], "externalIds": { "MAG": "2091828766", "ArXiv": "1511.04780", "DBLP": "journals/neuroimage/WeichwaldMOSBG15", "DOI": "10.1016/j.neuroimage.2015.01.036", "CorpusId": 6003605, "PubMed": "25623501" }, "url": "https://www.semanticscholar.org/paper/67529f0a3c2031282ec93440b76580de840789fa", "referenceCount": 50, "citationCount": 104, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics", "Biology", "Medicine" ] }, { "title": "On the interpretation of weight vectors of linear models in multivariate neuroimaging", "abstract": null, "year": 2014, "venue": "NeuroImage", "authors": [ "S. Haufe", "F. Meinecke", "Kai Görgen", "Sven Dähne", "J. Haynes", "B. Blankertz", "F. Biessmann" ], "externalIds": { "DBLP": "journals/neuroimage/HaufeMGDHBB14", "MAG": "2011402106", "DOI": "10.1016/j.neuroimage.2013.10.067", "CorpusId": 4512713, "PubMed": "24239590" }, "url": "https://www.semanticscholar.org/paper/3a6317c307688dc9cd82f34fda074b271d8b32ab", "referenceCount": 71, "citationCount": 1121, "influentialCitationCount": 85, "isOpenAccess": true, "fieldsOfStudy": [ "Psychology", "Medicine", "Computer Science" ] }, { "title": "To Explain or To Predict?", "abstract": "Statistical modeling is a powerful tool for developing and testing theories by way of causal explanation, prediction, and description. In many disciplines there is near-exclusive use of statistical modeling for causal ex- planation and the assumption that models with high explanatory power are inherently of high predictive power. Conflation between explanation and pre- diction is common, yet the distinction must be understood for progressing scientific knowledge. While this distinction has been recognized in the phi- losophy of science, the statistical literature lacks a thorough discussion of the many differences that arise in the process of modeling for an explanatory ver- sus a predictive goal. The purpose of this article is to clarify the distinction between explanatory and predictive modeling, to discuss its sources, and to reveal the practical implications of the distinction to each step in the model- ing process.", "year": 2010, "venue": "", "authors": [ "G. Shmueli" ], "externalIds": { "MAG": "2951936974", "ArXiv": "1101.0891", "DOI": "10.1214/10-STS330", "CorpusId": 15900983 }, "url": "https://www.semanticscholar.org/paper/9cded4694082bc1234204aca6507b47a5a039c1c", "referenceCount": 170, "citationCount": 1789, "influentialCitationCount": 91, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "How to Explain Individual Classification Decisions", "abstract": "After building a classifier with modern tools of machine learning we typically have a black box at hand that is able to predict well for unseen data. Thus, we get an answer to the question what is the most likely label of a given unseen data point. However, most methods will provide no answer why the model predicted a particular label for a single instance and what features were most influential for that particular instance. The only method that is currently able to provide such explanations are decision trees. This paper proposes a procedure which (based on a set of assumptions) allows to explain the decisions of any classification method.", "year": 2009, "venue": "Journal of machine learning research", "authors": [ "D. Baehrens", "T. Schroeter", "S. Harmeling", "M. Kawanabe", "K. Hansen", "K. Müller" ], "externalIds": { "ArXiv": "0912.1128", "DBLP": "journals/jmlr/BaehrensSHKHM10", "MAG": "2150165932", "DOI": "10.5555/1756006.1859912", "CorpusId": 14664111 }, "url": "https://www.semanticscholar.org/paper/4e6238c8613b5b81f81552939bce33296aedfbfe", "referenceCount": 51, "citationCount": 1037, "influentialCitationCount": 61, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "The Feature Importance Ranking Measure", "abstract": null, "year": 2009, "venue": "ECML/PKDD", "authors": [ "A. Zien", "Nicole Krämer", "S. Sonnenburg", "G. Rätsch" ], "externalIds": { "MAG": "2952883129", "DBLP": "conf/pkdd/ZienKSR09", "ArXiv": "0906.4258", "DOI": "10.1007/978-3-642-04174-7_45", "CorpusId": 15174037 }, "url": "https://www.semanticscholar.org/paper/1a8bad0d2d142f348337949162d1943d3dd802c7", "referenceCount": 17, "citationCount": 121, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Stability selection", "abstract": "Summary.  Estimation of structure, such as in variable selection, graphical modelling or cluster analysis, is notoriously difficult, especially for high dimensional data. We introduce stability selection. It is based on subsampling in combination with (high dimensional) selection algorithms. As such, the method is extremely general and has a very wide range of applicability. Stability selection provides finite sample control for some error rates of false discoveries and hence a transparent principle to choose a proper amount of regularization for structure estimation. Variable selection and structure estimation improve markedly for a range of selection methods if stability selection is applied. We prove for the randomized lasso that stability selection will be variable selection consistent even if the necessary conditions for consistency of the original lasso method are violated. We demonstrate stability selection for variable selection and Gaussian graphical modelling, using real and simulated data.", "year": 2008, "venue": "", "authors": [ "N. Meinshausen", "P. Buehlmann" ], "externalIds": { "MAG": "2562162676", "ArXiv": "0809.2932", "DOI": "10.1111/j.1467-9868.2010.00740.x", "CorpusId": 1231300 }, "url": "https://www.semanticscholar.org/paper/73a8a205a37f0169e89d3f0819a8ec36b39d3d2a", "referenceCount": 105, "citationCount": 2284, "influentialCitationCount": 220, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Analysis of regression in game theory approach", "abstract": "Working with multiple regression analysis a researcher usually wants to know a comparative importance of predictors in the model. However, the analysis can be made difficult because of multicollinearity among regressors, which produces biased coefficients and negative inputs to multiple determination from presum ably useful regressors. To solve this problem we apply a tool from the co-operative games theory, the Shapley Value imputation. We demonstrate the theoretical and practical advantages of the Shapley Value and show that it provides consistent results in the presence of multicollinearity. Copyright © 2001 John Wiley & Sons, Ltd.", "year": 2001, "venue": "", "authors": [ "S. Lipovetsky", "Michael Conklin" ], "externalIds": { "MAG": "1971916086", "DOI": "10.1002/ASMB.446", "CorpusId": 119847283 }, "url": "https://www.semanticscholar.org/paper/b7ef403dae5c7873ddf279e2a62a51e7b03038c7", "referenceCount": 18, "citationCount": 502, "influentialCitationCount": 24, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "A Value for n-person Games", "abstract": "Introduction At the foundation of the theory of games is the assumption that the players of a game can evaluate, in their utility scales, every “prospect” that might arise as a result of a play. In attempting to apply the theory to any field, one would normally expect to be permitted to include, in the class of “prospects,” the prospect of having to play a game. The possibility of evaluating games is therefore of critical importance. So long as the theory is unable to assign values to the games typically found in application, only relatively simple situations—where games do not depend on other games—will be susceptible to analysis and solution. In the finite theory of von Neumann and Morgenstern difficulty in evaluation persists for the “essential” games, and for only those. In this note we deduce a value for the “essential” case and examine a number of its elementary properties. We proceed from a set of three axioms, having simple intuitive interpretations, which suffice to determine the value uniquely. Our present work, though mathematically self-contained, is founded conceptually on the von Neumann—Morgenstern theory up to their introduction of characteristic functions. We thereby inherit certain important underlying assumptions: (a) that utility is objective and transferable; (b) that games are cooperative affairs; (c) that games, granting (a) and (b), are adequately represented by their characteristic functions.", "year": 1988, "venue": "", "authors": [ "L. Shapley" ], "externalIds": { "MAG": "1562353621", "DOI": "10.1017/CBO9780511528446.003", "CorpusId": 153629957 }, "url": "https://www.semanticscholar.org/paper/19088a582f2eb657ac1803f1ea1b79058d5c3dc7", "referenceCount": 0, "citationCount": 8139, "influentialCitationCount": 871, "isOpenAccess": false, "fieldsOfStudy": [ "Economics" ] }, { "title": "A Revised Definition for Suppressor Variables: a Guide To Their Identification and Interpretation", "abstract": "In the two-predictor situation it is shown that traditional and negative suppressors increase the predictive value of a standard predictor beyond that suggested by the predictor's zero order validity. This effect of suppression is used to provide a revised definition of suppression and completely accounts for traditional and negative suppression. The revised definition, in conjunction with a two factor model, is shown to lead to a previously undetected type of suppression (reciprocal suppression) which occurs when predictors with positive zero order validities are negatively correlated with one another. In terms of the definition and parameters of the model, limits are determined in which the types of suppression can occur. Furthermore, it is shown how suppressors can be identified in multiple regression equations and a procedure is given for interpreting whether the variables are contributing directly (by predicting relevant variance in the criterion) or indirectly (by removing irrelevant variance in another predictor) or both.", "year": 1974, "venue": "", "authors": [ "A. J. Conger" ], "externalIds": { "MAG": "2014219648", "DOI": "10.1177/001316447403400105", "CorpusId": 144049881 }, "url": "https://www.semanticscholar.org/paper/2d43224588ff0fd99357dfde60ed22acf592fc10", "referenceCount": 9, "citationCount": 618, "influentialCitationCount": 40, "isOpenAccess": false, "fieldsOfStudy": [ "Psychology" ] }, { "title": "Counterfactual Explanations for Data-Driven Decisions", "abstract": null, "year": 2019, "venue": "International Conference on Interaction Sciences", "authors": [ "Carlos Fernandez", "F. Provost", "Xintian Han" ], "externalIds": { "MAG": "2991357956", "DBLP": "conf/icis/FernandezPH19", "CorpusId": 209150287 }, "url": "https://www.semanticscholar.org/paper/93c9d9a50f0cc10e2d847850c9f5896c2d241fa8", "referenceCount": 0, "citationCount": 9, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Economics", "Computer Science" ] }, { "title": "Random Forests", "abstract": null, "year": 2001, "venue": "Machine-mediated learning", "authors": [ "L. Breiman" ], "externalIds": { "MAG": "2911964244", "DBLP": "reference/ml/X17sy", "DOI": "10.1023/A:1010933404324", "CorpusId": 89141 }, "url": "https://www.semanticscholar.org/paper/8e0be569ea77b8cb29bb0e8b031887630fe7a96c", "referenceCount": 25, "citationCount": 89809, "influentialCitationCount": 5836, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "2022. From clustering to cluster explanations via neural networks", "abstract": null, "year": null, "venue": "IEEE Transactions on Neural Networks and Learning Systems", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "DIN SPEC 92001-3:2023-04", "abstract": null, "year": null, "venue": "Artificial intelligence – life cycle processes and quality requirements – part 3: Explainability", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2022. To explain or not to explain?–artificial intelligence explainability in clinical decision support systems", "abstract": null, "year": null, "venue": "PLOS Digital Health", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Dermatologist-like explainable AI enhances melanoma diagnosis accuracy: eye-tracking study": { "paper_title": "Dermatologist-like explainable AI enhances melanoma diagnosis accuracy: eye-tracking study", "arxiv_id": "2409.13476v1", "keyword": "explainable ai", "authors": [ "Tirtha Chanda", "Sarah Haggenmueller", "Tabea-Clara Bucher", "Tim Holland-Letz", "Harald Kittler", "Philipp Tschandl", "Markus V. Heppt", "Carola Berking", "Jochen S. Utikal", "Bastian Schilling", "Claudia Buerger", "Cristian Navarrete-Dechent", "Matthias Goebeler", "Jakob Nikolas Kather", "Carolin V. Schneider", "Benjamin Durani", "Hendrike Durani", "Martin Jansen", "Juliane Wacker", "Joerg Wacker", "Reader Study Consortium", "Titus J. Brinker" ], "references": [ { "title": "Patients' and dermatologists' preferences in AI-driven skin cancer diagnostics: prospective multicentric survey study.", "abstract": null, "year": 2024, "venue": "Journal of American Academy of Dermatology", "authors": [ "Sarah Haggenmüller", "Roman C. Maron", "A. Hekler", "E. Krieghoff-Henning", "J. Utikal", "Maria Gaiser", "Verena Müller", "Sascha Fabian", "Friedegund Meier", "S. Hobelsberger", "F. Gellrich", "M. Sergon", "Axel Hauschild", "Michael Weichenthal", "Lars E. French", "Lucie M. Heinzerling", "Justin G. Schlager", "K. Ghoreschi", "Max Schlaak", "F. Hilke", "G. Poch", "Sören Korsing", "C. Berking", "M. Heppt", "Michael Erdmann", "S. Haferkamp", "K. Drexler", "D. Schadendorf", "W. Sondermann", "Matthias Goebeler", "Bastian Schilling", "Jakob Nikolas Kather", "Stefan Fröhling", "Katharina Kaminski", "Astrid Doppler", "Tabea-Clara Bucher", "T. Brinker" ], "externalIds": { "DOI": "10.1016/j.jaad.2024.04.033", "CorpusId": 269367211, "PubMed": "38670313" }, "url": "https://www.semanticscholar.org/paper/598053c28856da6659344a7091ead20bffe00523", "referenceCount": 0, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Improving Tuberculosis Diagnosis using Explainable Artificial Intelligence in Medical Imaging", "abstract": "The integration of artificial intelligence (AI) applications in the healthcare sector is ushering in a \nsignificant transformation, particularly in developing more effective strategies for early diagnosis and treatment \nof contagious diseases like tuberculosis. Tuberculosis, a global public health challenge, demands swift \ninterventions to prevent its spread. While deep learning and image processing techniques show potential in \nextracting meaningful insights from complex radiological images, their accuracy is often scrutinized due to a lack \nof explainability. \nThis research navigates the intersection of AI and tuberculosis diagnosis by focusing on explainable \nartificial intelligence (XAI). A meticulously designed deep learning model for tuberculosis detection is introduced \nalongside an exploration of XAI to unravel complex decisions. \nThe core belief is that XAI, by elucidating diagnostic decision rationale, enhances the reliability of AI \nin clinical settings. Emphasizing the pivotal role of XAI in tuberculosis diagnosis, this study aims to impact future \nresearch and practical implementations, fostering the adoption of AI-driven disease diagnosis methodologies for \nglobal health improvement.", "year": 2024, "venue": "Journal of Mathematical Sciences and Modelling", "authors": [ "Cem Özkurt" ], "externalIds": { "DOI": "10.33187/jmsm.1417160", "CorpusId": 268316605 }, "url": "https://www.semanticscholar.org/paper/1bbe761138f18c6626c4954312d3dd9425ef303a", "referenceCount": 0, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "Analyzing Cognitive Load Associated with Manual Text Classification Task Using Eye Tracking", "abstract": "The accuracy of machine learning-based automated text classification systems, such as spam filters and search engine results, heavily depends on the quality of manual text classification. However, the cognitive demands of manual text classification tasks, particularly when dealing with challenging or difficult-to-comprehend texts, have not been extensively explored in previous studies. This research aims to address this gap by investigating the cognitive load associated with manual text classification tasks through analyzing eye tracking data. In this study, 30 participants performed manual text classification tasks while their ocular parameters were recorded using an eye tracker. The findings of this study revealed that ocular parameters recorded through eye tracking provided valuable insights into the cognitive load experienced during manual text classification tasks. Furthermore, it was observed that complex narratives led to higher cognitive load estimation. Moreover, native English-speaking participants exhibited lower cognitive load, compared to non-native English speakers.", "year": 2023, "venue": "Proceedings of the Human Factors and Ergonomics Society Annual Meeting", "authors": [ "Jeevithashree Divya Venkatesh", "A. Jaiswal", "Gaurav Nanda" ], "externalIds": { "DOI": "10.1177/21695067231192221", "CorpusId": 264417243 }, "url": "https://www.semanticscholar.org/paper/f3bd8d5e64ca893182b336134f96d65f1b555135", "referenceCount": 12, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Trustworthy Artificial Intelligence in Medical Applications: A Mini Survey", "abstract": "Nowadays, a large amount of structured and unstructured data is being produced in various fields, creating tremendous opportunities to implement Machine Learning (ML) algorithms for decision-making. Although ML algorithms can outperform human performance in some fields, the black-box inherent characteristics of advanced models can hinder experts from exploiting them in sensitive domains such as medicine. The black-box nature of advanced ML models shadows the transparency of these algorithms, which could hamper their fair and robust performance due to the complexity of the algorithms. Consequently, individuals, organizations, and societies will not be able to achieve the full potential of ML without establishing trust in its development, deployment, and use. The field of eXplainable Artificial Intelligence (XAI) endeavors to solve this problem by providing human-understandable explanations for black-box models as a potential solution to acquire trustworthy AI. However, explainability is one of many requirements to fulfill trustworthy AI, and other prerequisites must also be met. Hence, this survey analyzes the fulfillment of five algorithmic requirements of accuracy, transparency, trust, robustness, and fairness through the lens of the literature in the medical domain. Regarding that medical experts are reluctant to put their judgment aside in favor of a machine, trustworthy AI algorithmic fulfillment could be a way to convince them to use ML. The results show there is still a long way to implement the algorithmic requirements in practice, and scholars need to consider them in future studies.", "year": 2023, "venue": "IEEE Symposium on Computational Intelligence in Bioinformatics and Computational Biology", "authors": [ "Mohsen Abbaspour Onari", "Isel Grau", "M. S. Nobile", "Yingqian Zhang" ], "externalIds": { "DBLP": "conf/cibcb/OnariGNZ23", "DOI": "10.1109/CIBCB56990.2023.10264883", "CorpusId": 263611968 }, "url": "https://www.semanticscholar.org/paper/75be86f8013219d4749e4845f2423c723cb60b12", "referenceCount": 63, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Improving trust and confidence in medical skin lesion diagnosis through explainable deep learning", "abstract": null, "year": 2023, "venue": "International Journal of Data Science and Analysis", "authors": [ "C. Metta", "Andrea Beretta", "Riccardo Guidotti", "Yuan Yin", "P. Gallinari", "S. Rinzivillo", "F. Giannotti" ], "externalIds": { "DOI": "10.1007/s41060-023-00401-z", "CorpusId": 259797412 }, "url": "https://www.semanticscholar.org/paper/fc6482af984f51ddebead9172e62b6cad05aaf27", "referenceCount": 16, "citationCount": 5, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "What is the weight of expectation bias in oncology trials?", "abstract": "In biomedical research, expectation bias, also referred to as Rosenthal effect, is the distorting effect on the results of an experiment caused by the expectation that the investigator, or the patient, has about the results themselves. The classical way to reduce the influence of this bias on the interpretation of results is through the blinding, or masking, of treatments, both terms referring to keeping the patients, the investigators, or the assessors unaware of the assigned treatment. Talking of expectations, can we postulate that the expectation bias will be greater the higher the unmet medical need underlying the research is? And, for the above reason, is the expectation bias stronger in oncology studies? This postulate does not seem to apply to the patients, since expectation (acting in patients via the placebo effect) is rarely associated with tumour positive responses. But, what about the investigators? If one looks at the recommendations of main regulatory bodies, the answer to this question will be that the investigator expectation is indeed considered an important confound in the interpretation of outcomes, thus requiring a blinded independent review to prevent the bias. In fact, in the current Food and Drug Administration (FDA) guideline on clinical trial endpoints for the approval of cancer drugs and biologics, the adoption of an independent blinded review is recommended for all the endpoints based on tumour assessments, namely disease-free survival, event-free survival, overall response rate (ORR), complete response, time to progression and progression-free survival (PFS), although for disease-free survival, event-free survival, time to progression and PFS, a decision should be taken on a case-by-case basis. Likewise, in the most recent European Medicines Agency (EMA) guideline, still available in draft, it is recommended that “if the study has to be conducted open label, this has implications with respect to choice of study endpoints, independent review, conduct of sensitivity analyses and other measures to be undertaken to limit potential bias related to the open-label nature of the trial”. We could not say how much the recommendations of FDA and EMA guidelines were based on the analysis of clinical evidence, and how much on methodological reasoning. As we will see below, some evidence is available, but limited to PFS in phase-3 trials. For certain, it is now possible to gain a quantitative measure of the influence of investigator expectation over the results of oncology trials, since many papers have been published so far that report the assessment of 2 pivotal endpoints, PFS and ORR, carried out both at local level (local assessment, LA) and by a blinded independent central review (BICR), within the same trial. If the assessment is conducted at local level, the local investigator, that is, the oncologist, will be in contact with the radiologist, hence will be aware of the treatment assigned (even though they are not directly involved in the imaging assessment). However, they will be blinded to the outcomes of BICR assessment, and the masking of assigned treatment will be maintained. By looking at the putative differences in assessment between LAs and BICRs in an adequately sized sample of clinical trials, an estimation can be obtained of the weight of expectation bias in this setting, if any. In the last 2 years, we have carried out extensive research on the topic of investigator expectation bias in oncology trials. We collected and analysed all phase-2 and phase-3 trials recorded in clinicaltrials.gov and EudraCT databases and reporting the results of PFS and/or ORR assessments carried out by both LAs and BICR within the same trial. First, we focused on PFS in phase-3 trials, a topic that has been matter of debate for more than a decade. Initially, Dodd et al., based on the analysis of 7 phase-3 trials showing no difference between the assessments of PFS carried out by LAs and BICR, raised the issue of BICR as an unnecessary, expensive and time-consuming procedure, which should not be used on a regular basis in confirmative phase-3 trials. Soon after, a group of researchers from a consortium of pharmaceutical companies further expanded the initial observation by Dodd and colleagues, reporting the results of an analysis on Received: 13 December 2022 Revised: 11 January 2023 Accepted: 29 January 2023", "year": 2023, "venue": "British Journal of Clinical Pharmacology", "authors": [ "C. Dello Russo", "Pierluigi Navarra" ], "externalIds": { "DOI": "10.1111/bcp.15680", "CorpusId": 256764767, "PubMed": "36772876" }, "url": "https://www.semanticscholar.org/paper/12ebe9c00558dcef3317b0e9c2059a4981fff120", "referenceCount": 11, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "DermX: an end-to-end framework for explainable automated dermatological diagnosis", "abstract": null, "year": 2022, "venue": "Medical Image Anal.", "authors": [ "Raluca Jalaboi", "F. Faye", "Mauricio Orbes-Arteaga", "D. Jørgensen", "Ole Winther", "A. Galimzianova" ], "externalIds": { "ArXiv": "2202.06956", "DBLP": "journals/mia/JalaboiFOJWG23", "DOI": "10.1016/j.media.2022.102647", "CorpusId": 246863691, "PubMed": "36272237" }, "url": "https://www.semanticscholar.org/paper/d5d630a6624063f608877ce9cf412a5b8dccce0f", "referenceCount": 47, "citationCount": 9, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science", "Engineering" ] }, { "title": "ExAID: A Multimodal Explanation Framework for Computer-Aided Diagnosis of Skin Lesions", "abstract": null, "year": 2022, "venue": "Comput. Methods Programs Biomed.", "authors": [ "Adriano Lucieri", "Muhammad Naseer Bajwa", "S. Braun", "M. I. Malik", "A. Dengel", "Sheraz Ahmed" ], "externalIds": { "DBLP": "journals/cmpb/LucieriBBMDA22", "ArXiv": "2201.01249", "DOI": "10.1016/j.cmpb.2022.106620", "CorpusId": 245668760, "PubMed": "35033756" }, "url": "https://www.semanticscholar.org/paper/3578e5bfd976bef419d0382c292c58f46b38ba36", "referenceCount": 49, "citationCount": 47, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science", "Engineering" ] }, { "title": "Responsibility, second opinions and peer-disagreement: ethical and epistemological challenges of using AI in clinical diagnostic contexts", "abstract": "In this paper, we first classify different types of second opinions and evaluate the ethical and epistemological implications of providing those in a clinical context. Second, we discuss the issue of how artificial intelligent (AI) could replace the human cognitive labour of providing such second opinion and find that several AI reach the levels of accuracy and efficiency needed to clarify their use an urgent ethical issue. Third, we outline the normative conditions of how AI may be used as second opinion in clinical processes, weighing the benefits of its efficiency against concerns of responsibility attribution. Fourth, we provide a ‘rule of disagreement’ that fulfils these conditions while retaining some of the benefits of expanding the use of AI-based decision support systems (AI-DSS) in clinical contexts. This is because the rule of disagreement proposes to use AI as much as possible, but retain the ability to use human second opinions to resolve disagreements between AI and physician-in-charge. Fifth, we discuss some counterarguments.", "year": 2021, "venue": "Journal of Medical Ethics", "authors": [ "Hendrik Kempt", "S. Nagel" ], "externalIds": { "DOI": "10.1136/medethics-2021-107440", "CorpusId": 245168532, "PubMed": "34907006" }, "url": "https://www.semanticscholar.org/paper/6346905922ad3de3fae455e54c9a6261bc85ae2f", "referenceCount": 39, "citationCount": 39, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Current Challenges and Future Opportunities for XAI in Machine Learning-Based Clinical Decision Support Systems: A Systematic Review", "abstract": "Machine Learning and Artificial Intelligence (AI) more broadly have great immediate and future potential for transforming almost all aspects of medicine. However, in many applications, even outside medicine, a lack of transparency in AI applications has become increasingly problematic. This is particularly pronounced where users need to interpret the output of AI systems. Explainable AI (XAI) provides a rationale that allows users to understand why a system has produced a given output. The output can then be interpreted within a given context. One area that is in great need of XAI is that of Clinical Decision Support Systems (CDSSs). These systems support medical practitioners in their clinic decision-making and in the absence of explainability may lead to issues of under or over-reliance. Providing explanations for how recommendations are arrived at will allow practitioners to make more nuanced, and in some cases, life-saving decisions. The need for XAI in CDSS, and the medical field in general, is amplified by the need for ethical and fair decision-making and the fact that AI trained with historical data can be a reinforcement agent of historical actions and biases that should be uncovered. We performed a systematic literature review of work to-date in the application of XAI in CDSS. Tabular data processing XAI-enabled systems are the most common, while XAI-enabled CDSS for text analysis are the least common in literature. There is more interest in developers for the provision of local explanations, while there was almost a balance between post-hoc and ante-hoc explanations, as well as between model-specific and model-agnostic techniques. Studies reported benefits of the use of XAI such as the fact that it could enhance decision confidence for clinicians, or generate the hypothesis about causality, which ultimately leads to increased trustworthiness and acceptability of the system and potential for its incorporation in the clinical workflow. However, we found an overall distinct lack of application of XAI in the context of CDSS and, in particular, a lack of user studies exploring the needs of clinicians. We propose some guidelines for the implementation of XAI in CDSS and explore some opportunities, challenges, and future research needs.", "year": 2021, "venue": "Applied Sciences", "authors": [ "A. Antoniadi", "Yuhan Du", "Yasmine Guendouz", "Lan Wei", "Claudia Mazo", "Brett A. Becker", "C. Mooney" ], "externalIds": { "MAG": "3172362366", "DOI": "10.3390/APP11115088", "CorpusId": 236420019 }, "url": "https://www.semanticscholar.org/paper/19923458a40496d4fe1259662a0e2deea4465957", "referenceCount": 144, "citationCount": 269, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Artificial neural networks and pathologists recognize basal cell carcinomas based on different histological patterns", "abstract": null, "year": 2020, "venue": "Modern Pathology", "authors": [ "S. Kimeswenger", "P. Tschandl", "Petar Noack", "M. Hofmarcher", "Elisabeth Rumetshofer", "H. Kindermann", "R. Silye", "Sepp Hochreiter", "M. Kaltenbrunner", "E. Guenova", "G. Klambauer", "W. Hoetzenecker" ], "externalIds": { "MAG": "3104476988", "DOI": "10.1038/s41379-020-00712-7", "CorpusId": 226308993, "PubMed": "33184470" }, "url": "https://www.semanticscholar.org/paper/a545d6fbd1a8e29311ce31a9c8aa74bd85941725", "referenceCount": 39, "citationCount": 30, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Eye tracking in human interaction: Possibilities and limitations", "abstract": null, "year": 2020, "venue": "Behavior Research Methods", "authors": [ "Niilo V. Valtakari", "I. Hooge", "C. Viktorsson", "P. Nyström", "T. Falck-Ytter", "R. Hessels" ], "externalIds": { "PubMedCentral": "7787418", "DBLP": "conf/icmi/ValtakariHVNFH20", "DOI": "10.3758/s13428-020-01517-x", "CorpusId": 229548749, "PubMed": "33409984" }, "url": "https://www.semanticscholar.org/paper/c21c7cc934141d35940dcbab6bfff1d21ab7d342", "referenceCount": 101, "citationCount": 58, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Concept Bottleneck Models", "abstract": "We seek to learn models that we can interact with using high-level concepts: if the model did not think there was a bone spur in the x-ray, would it still predict severe arthritis? State-of-the-art models today do not typically support the manipulation of concepts like “the existence of bone spurs”, as they are trained end-to-end to go directly from raw input (e.g., pixels) to output (e.g., arthritis severity). We revisit the classic idea of first predicting concepts that are provided at training time, and then using these concepts to predict the label. By construction, we can intervene on these concept bottleneck models by editing their predicted concept values and propagating these changes to the final prediction. On x-ray grading and bird identification, concept bottleneck models achieve competitive accuracy with standard end-to-end models, while enabling interpretation in terms of high-level clinical concepts (“bone spurs”) or bird attributes (“wing color”). These models also allow for richer human-model interaction: accuracy improves significantly if we can correct model mistakes on concepts at test time.", "year": 2020, "venue": "International Conference on Machine Learning", "authors": [ "Pang Wei Koh", "Thao Nguyen", "Y. S. Tang", "Stephen Mussmann", "E. Pierson", "Been Kim", "Percy Liang" ], "externalIds": { "DBLP": "conf/icml/KohNTMPKL20", "ArXiv": "2007.04612", "MAG": "3041871339", "CorpusId": 220424448 }, "url": "https://www.semanticscholar.org/paper/3a24bfb77ed271fef948058e414850f89b0955a7", "referenceCount": 42, "citationCount": 585, "influentialCitationCount": 160, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "What Clinicians Want: Contextualizing Explainable Machine Learning for Clinical End Use", "abstract": "Translating machine learning (ML) models effectively to clinical practice requires establishing clinicians' trust. Explainability, or the ability of an ML model to justify its outcomes and assist clinicians in rationalizing the model prediction, has been generally understood to be critical to establishing trust. However, the field suffers from the lack of concrete definitions for usable explanations in different settings. To identify specific aspects of explainability that may catalyze building trust in ML models, we surveyed clinicians from two distinct acute care specialties (Intenstive Care Unit and Emergency Department). We use their feedback to characterize when explainability helps to improve clinicians' trust in ML models. We further identify the classes of explanations that clinicians identified as most relevant and crucial for effective translation to clinical practice. Finally, we discern concrete metrics for rigorous evaluation of clinical explainability methods. By integrating perceptions of explainability between clinicians and ML researchers we hope to facilitate the endorsement and broader adoption and sustained use of ML systems in healthcare.", "year": 2019, "venue": "Machine Learning in Health Care", "authors": [ "S. Tonekaboni", "Shalmali Joshi", "M. Mccradden", "A. Goldenberg" ], "externalIds": { "DBLP": "conf/mlhc/TonekaboniJMG19", "ArXiv": "1905.05134", "MAG": "2990652486", "CorpusId": 152282619 }, "url": "https://www.semanticscholar.org/paper/91ed985917cf4c317b7d91e15c1ec55e746153bf", "referenceCount": 87, "citationCount": 322, "influentialCitationCount": 23, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Seven-Point Checklist and Skin Lesion Classification Using Multitask Multimodal Neural Nets", "abstract": "We propose a multitask deep convolutional neural network, trained on multimodal data (clinical and dermoscopic images, and patient metadata), to classify the 7-point melanoma checklist criteria and perform skin lesion diagnosis. Our neural network is trained using several multitask loss functions, where each loss considers different combinations of the input modalities, which allows our model to be robust to missing data at inference time. Our final model classifies the 7-point checklist and skin condition diagnosis, produces multimodal feature vectors suitable for image retrieval, and localizes clinically discriminant regions. We benchmark our approach using 1011 lesion cases, and report comprehensive results over all 7-point criteria and diagnosis. We also make our dataset (images and metadata) publicly available online at http://derm.cs.sfu.ca.", "year": 2019, "venue": "IEEE journal of biomedical and health informatics", "authors": [ "J. Kawahara", "Sara Daneshvar", "G. Argenziano", "G. Hamarneh" ], "externalIds": { "MAG": "2797527544", "DBLP": "journals/titb/KawaharaDAH19", "DOI": "10.1109/JBHI.2018.2824327", "CorpusId": 51614248, "PubMed": "29993994" }, "url": "https://www.semanticscholar.org/paper/a0b7b10d504929bdb8765b72ce67077fe82b5971", "referenceCount": 33, "citationCount": 310, "influentialCitationCount": 36, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Emerging applications of eye-tracking technology in dermatology.", "abstract": null, "year": 2018, "venue": "Journal of dermatological science (Amsterdam)", "authors": [ "Kevin K. John", "Jakob D. Jensen", "Andy J. King", "Manusheela Pokharel", "D. Grossman" ], "externalIds": { "MAG": "2798167946", "DOI": "10.1016/j.jdermsci.2018.04.002", "CorpusId": 4887417, "PubMed": "29655589" }, "url": "https://www.semanticscholar.org/paper/4146438f377dbf6d8fe6f9c0a6cd5afcc9a330a3", "referenceCount": 49, "citationCount": 8, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Psychology" ] }, { "title": "Grad-CAM: Visual Explanations from Deep Networks via Gradient-Based Localization", "abstract": null, "year": 2016, "venue": "International Journal of Computer Vision", "authors": [ "Ramprasaath R. Selvaraju", "Abhishek Das", "Ramakrishna Vedantam", "Michael Cogswell", "Devi Parikh", "Dhruv Batra" ], "externalIds": { "MAG": "2962858109", "DBLP": "conf/iccv/SelvarajuCDVPB17", "ArXiv": "1610.02391", "DOI": "10.1007/s11263-019-01228-7", "CorpusId": 15019293 }, "url": "https://www.semanticscholar.org/paper/5582bebed97947a41e3ddd9bd1f284b73f1648c2", "referenceCount": 72, "citationCount": 16622, "influentialCitationCount": 1836, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "“Why Should I Trust You?”: Explaining the Predictions of Any Classifier", "abstract": "Despite widespread adoption, machine learning models remain mostly black boxes. Understanding the reasons behind predictions is, however, quite important in assessing trust, which is fundamental if one plans to take action based on a prediction, or when choosing whether to deploy a new model. Such understanding also provides insights into the model, which can be used to transform an untrustworthy model or prediction into a trustworthy one. In this work, we propose LIME, a novel explanation technique that explains the predictions of any classifier in an interpretable and faithful manner, by learning an interpretable model locally varound the prediction. We also propose a method to explain models by presenting representative individual predictions and their explanations in a non-redundant way, framing the task as a submodular optimization problem. We demonstrate the flexibility of these methods by explaining different models for text (e.g. random forests) and image classification (e.g. neural networks). We show the utility of explanations via novel experiments, both simulated and with human subjects, on various scenarios that require trust: deciding if one should trust a prediction, choosing between models, improving an untrustworthy classifier, and identifying why a classifier should not be trusted.", "year": 2016, "venue": "North American Chapter of the Association for Computational Linguistics", "authors": [ "Marco Tulio Ribeiro", "Sameer Singh", "Carlos Guestrin" ], "externalIds": { "ACL": "N16-3020", "MAG": "2951501516", "DBLP": "journals/corr/RibeiroSG16", "ArXiv": "1602.04938", "DOI": "10.1145/2939672.2939778", "CorpusId": 13029170 }, "url": "https://www.semanticscholar.org/paper/c0883f5930a232a9c1ad601c978caede29155979", "referenceCount": 41, "citationCount": 14452, "influentialCitationCount": 1693, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Learning Deep Features for Discriminative Localization", "abstract": "In this work, we revisit the global average pooling layer proposed in [13], and shed light on how it explicitly enables the convolutional neural network (CNN) to have remarkable localization ability despite being trained on imagelevel labels. While this technique was previously proposed as a means for regularizing training, we find that it actually builds a generic localizable deep representation that exposes the implicit attention of CNNs on an image. Despite the apparent simplicity of global average pooling, we are able to achieve 37.1% top-5 error for object localization on ILSVRC 2014 without training on any bounding box annotation. We demonstrate in a variety of experiments that our network is able to localize the discriminative image regions despite just being trained for solving classification task1.", "year": 2015, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Bolei Zhou", "A. Khosla", "Àgata Lapedriza", "A. Oliva", "A. Torralba" ], "externalIds": { "DBLP": "conf/cvpr/ZhouKLOT16", "ArXiv": "1512.04150", "MAG": "2950328304", "DOI": "10.1109/CVPR.2016.319", "CorpusId": 6789015 }, "url": "https://www.semanticscholar.org/paper/31f9eb39d840821979e5df9f34a6e92dd9c879f2", "referenceCount": 37, "citationCount": 8532, "influentialCitationCount": 1404, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Radiologists remember mountains better than radiographs, or do they?", "abstract": "Abstract. Expertise with encoding material has been shown to aid long-term memory for that material. It is not clear how relevant this expertise is for image memorability (e.g., radiologists’ memory for radiographs), and how robust over time. In two studies, we tested scene memory using a standard long-term memory paradigm. One compared the performance of radiologists to naïve observers on two image sets, chest radiographs and everyday scenes, and the other radiologists’ memory with immediate as opposed to delayed recognition tests using musculoskeletal radiographs and forest scenes. Radiologists’ memory was better than novices for images of expertise but no different for everyday scenes. With the heterogeneity of image sets equated, radiologists’ expertise with radiographs afforded them better memory for the musculoskeletal radiographs than forest scenes. Enhanced memory for images of expertise disappeared over time, resulting in chance level performance for both image sets after weeks of delay. Expertise with the material is important for visual memorability but not to the same extent as idiosyncratic detail and variability of the image set. Similar memory decline with time for images of expertise as for everyday scenes further suggests that extended familiarity with an image is not a robust factor for visual memorability.", "year": 2015, "venue": "Journal of Medical Imaging", "authors": [ "K. Evans", "E. Marom", "M. Godoy", "Diana Palacio", "Tara L. Sagebiel", "S. Cuellar", "M. McEntee", "Charles Tian", "P. Brennan", "T. Haygood" ], "externalIds": { "MAG": "1938588300", "DOI": "10.1117/1.JMI.3.1.011005", "CorpusId": 6278068, "PubMed": "26870748" }, "url": "https://www.semanticscholar.org/paper/2ccbdc3d60d09627baac9b6f246e64029853620b", "referenceCount": 30, "citationCount": 10, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Engineering" ] }, { "title": "On Pixel-Wise Explanations for Non-Linear Classifier Decisions by Layer-Wise Relevance Propagation", "abstract": "Understanding and interpreting classification decisions of automated image classification systems is of high value in many applications, as it allows to verify the reasoning of the system and provides additional information to the human expert. Although machine learning methods are solving very successfully a plethora of tasks, they have in most cases the disadvantage of acting as a black box, not providing any information about what made them arrive at a particular decision. This work proposes a general solution to the problem of understanding classification decisions by pixel-wise decomposition of nonlinear classifiers. We introduce a methodology that allows to visualize the contributions of single pixels to predictions for kernel-based classifiers over Bag of Words features and for multilayered neural networks. These pixel contributions can be visualized as heatmaps and are provided to a human expert who can intuitively not only verify the validity of the classification decision, but also focus further analysis on regions of potential interest. We evaluate our method for classifiers trained on PASCAL VOC 2009 images, synthetic image data containing geometric shapes, the MNIST handwritten digits data set and for the pre-trained ImageNet model available as part of the Caffe open source package.", "year": 2015, "venue": "PLoS ONE", "authors": [ "Sebastian Bach", "Alexander Binder", "G. Montavon", "F. Klauschen", "K. Müller", "W. Samek" ], "externalIds": { "MAG": "1787224781", "PubMedCentral": "4498753", "DOI": "10.1371/journal.pone.0130140", "CorpusId": 9327892, "PubMed": "26161953" }, "url": "https://www.semanticscholar.org/paper/17a273bbd4448083b01b5a9389b3c37f5425aac0", "referenceCount": 68, "citationCount": 3790, "influentialCitationCount": 445, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "How \"consistent\" is \"consistent\"? A clinician-based assessment of the reliability of expressions used by radiologists to communicate diagnostic confidence.", "abstract": null, "year": 2014, "venue": "Clinical Radiology", "authors": [ "A. Rosenkrantz", "Michael Kiritsy", "Sooah Kim" ], "externalIds": { "MAG": "2132778613", "DOI": "10.1016/j.crad.2014.03.004", "CorpusId": 12062732, "PubMed": "24836524" }, "url": "https://www.semanticscholar.org/paper/373748c5e10452da618f7553d411eb13abf035f8", "referenceCount": 12, "citationCount": 28, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Understanding Visual Search Patterns of Dermatologists Assessing Pigmented Skin Lesions Before and After Online Training", "abstract": null, "year": 2014, "venue": "Journal of digital imaging", "authors": [ "E. Krupinski", "Joseph Chao", "R. Hofmann-Wellenhof", "L. Morrison", "C. Curiel-Lewandrowski" ], "externalIds": { "DBLP": "journals/jdi/KrupinskiCHMC14", "MAG": "2116576293", "DOI": "10.1007/s10278-014-9712-1", "CorpusId": 6318147, "PubMed": "24939005" }, "url": "https://www.semanticscholar.org/paper/76185072b72df424661ea2ddb3e75545c7830450", "referenceCount": 44, "citationCount": 22, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "PH2 - A dermoscopic image database for research and benchmarking", "abstract": "The increasing incidence of melanoma has recently promoted the development of computer-aided diagnosis systems for the classification of dermoscopic images. Unfortunately, the performance of such systems cannot be compared since they are evaluated in different sets of images by their authors and there are no public databases available to perform a fair evaluation of multiple systems. In this paper, a dermoscopic image database, called PH2, is presented. The PH2 database includes the manual segmentation, the clinical diagnosis, and the identification of several dermoscopic structures, performed by expert dermatologists, in a set of 200 dermoscopic images. The PH2 database will be made freely available for research and benchmarking purposes.", "year": 2013, "venue": "Annual International Conference of the IEEE Engineering in Medicine and Biology Society", "authors": [ "T. Mendonça", "P. Ferreira", "J. Marques", "A. Marçal", "J. Rozeira" ], "externalIds": { "MAG": "2061253660", "DBLP": "conf/embc/MendoncaFMMR13", "DOI": "10.1109/EMBC.2013.6610779", "CorpusId": 8042197, "PubMed": "24110966" }, "url": "https://www.semanticscholar.org/paper/24019050c30b7e5bf1be28e48b8cb5278c4286fd", "referenceCount": 15, "citationCount": 865, "influentialCitationCount": 99, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Differences in examination characteristics of pigmented skin lesions: Results of an eye tracking study", "abstract": null, "year": 2012, "venue": "Artif. Intell. Medicine", "authors": [ "S. Dreiseitl", "M. Pivec", "M. Binder" ], "externalIds": { "DBLP": "journals/artmed/DreiseitlPB12", "MAG": "2006651944", "DOI": "10.1016/j.artmed.2011.11.004", "CorpusId": 205694329, "PubMed": "22209476" }, "url": "https://www.semanticscholar.org/paper/445c8944fbac1d077bb6c7753d5a93b235c3c568", "referenceCount": 25, "citationCount": 21, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "The \"memory effect\" for repeated radiologic observations.", "abstract": "OBJECTIVE\nIt is assumed that memory has a role to play in repeated radiologic observation studies. The main objective of this study was to examine this assumption and evaluate the effect that memory may have on receiver operating characteristic (ROC) methods.\n\n\nMATERIALS AND METHODS\nA two-center observer study was performed with a total of 24 experienced radiologists. Over two viewings, chest radiographs showing the tip of a central line in either the superior vena cava or the azygos vein were presented. Half of the images were changed between the two viewings. The participants' attention was directed on the first reading to the position of the central line. At the second reading, the participants were asked to assign a confidence score on a 6-point scale about whether each image had been included in the first reading.\n\n\nRESULTS\nFor the images that were scored as \"definitely included\" in the first viewing, readers at our two centers recalled only an average of 2.5 and 4.9 of the 20 repeated images, which is close to a random allocation of images to each score. As the confidence levels diminished for positive identification of repeated images, the numbers of correct answers increased. For images scored as not having been previously included, the numbers of correct answers remained low suggesting that identification of nonrepeated images is poor. Images with a greater number of incidental abnormalities and with more striking abnormalities were recognized more accurately than those with fewer and less striking abnormalities.\n\n\nCONCLUSION\nThis study shows a \"memory effect\" when the same images are presented at a second viewing within a small interval period. This effect appears to occur mainly at low confidence levels. These results suggest that including images with obvious incidental abnormalities in reader performance studies should be avoided.", "year": 2011, "venue": "AJR. American journal of roentgenology", "authors": [ "J. Ryan", "T. Haygood", "Jose-Miguel Yamal", "M. Evanoff", "P. O'Sullivan", "M. McEntee", "P. Brennan" ], "externalIds": { "MAG": "2008219388", "DOI": "10.2214/AJR.10.5859", "CorpusId": 207323816, "PubMed": "22109344" }, "url": "https://www.semanticscholar.org/paper/1761df397475271c1d6706b9bdc7af1f49b2520e", "referenceCount": 21, "citationCount": 31, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Decision trees: a recent overview", "abstract": null, "year": 2011, "venue": "Artificial Intelligence Review", "authors": [ "S. Kotsiantis" ], "externalIds": { "DBLP": "journals/air/Kotsiantis13", "MAG": "1987552279", "DOI": "10.1007/s10462-011-9272-4", "CorpusId": 18866938 }, "url": "https://www.semanticscholar.org/paper/0fab509941517c87f8fe23c5a82e315a8726c442", "referenceCount": 109, "citationCount": 1030, "influentialCitationCount": 34, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Seven‐point checklist of dermoscopy revisited", "abstract": "Background  Most dermoscopic algorithms to diagnose melanoma were established more than 10 years ago and have been tested primarily on clear‐cut melanomas and excised melanocytic naevi.", "year": 2011, "venue": "British Journal of Dermatology", "authors": [ "G. Argenziano", "C. Catricalà", "M. Ardigo", "P. Buccini", "P. Simone", "L. Eibenschutz", "A. Ferrari", "G. Mariani", "V. Silipo", "Isabella Sperduti", "I. Zalaudek" ], "externalIds": { "MAG": "1529623688", "DOI": "10.1111/j.1365-2133.2010.10194.x", "CorpusId": 205261111, "PubMed": "21175563" }, "url": "https://www.semanticscholar.org/paper/9c740603d9f6578e5c4168461d213c31cc6c9834", "referenceCount": 18, "citationCount": 155, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine" ] }, { "title": "\"Memory effect\" in observer performance studies of mammograms.", "abstract": null, "year": 2005, "venue": "Academic Radiology", "authors": [ "L. Hardesty", "M. Ganott", "C. Hakim", "Cathy Cohen", "R. Clearfield", "D. Gur" ], "externalIds": { "MAG": "2032193313", "DOI": "10.1016/J.ACRA.2004.11.026", "CorpusId": 37045980, "PubMed": "15766687" }, "url": "https://www.semanticscholar.org/paper/f24d5f67f0475d05dfa34401c8c6620e3487c8df", "referenceCount": 1, "citationCount": 35, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Computer interface evaluation using eye movements: methods and constructs", "abstract": null, "year": 1999, "venue": "", "authors": [ "J. Goldberg", "Xerxes P. Kotval" ], "externalIds": { "MAG": "2023159994", "DOI": "10.1016/S0169-8141(98)00068-7", "CorpusId": 62687771 }, "url": "https://www.semanticscholar.org/paper/ad0755fceba99d0697d8c1eeb9f1763cc19aa589", "referenceCount": 21, "citationCount": 1007, "influentialCitationCount": 83, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Classification with learning k-nearest neighbors", "abstract": "The nearest neighbor (NN) classifiers, especially the k-NN algorithm, are among the simplest and yet most efficient classification rules and are widely used in practice. We introduce three adaptation rules that can be used in iterative training of a k-NN classifier. This is a novel approach both from the statistical pattern recognition and the supervised neural network learning points of view. The suggested learning rules resemble those of the well-known learning vector quantization (LVQ) method, but at the same time the classifier utilizes the fact that increasing the number of samples that the classification is based on leads to improved classification accuracy. The performances of the suggested learning rules are compared with the usual K-NN rules and the LVQ1 algorithm.", "year": 1996, "venue": "International Conference on Neural Networks", "authors": [ "Jorma T. Laaksonen", "E. Oja" ], "externalIds": { "MAG": "2172189177", "DBLP": "conf/icnn/LaaksonenO96", "DOI": "10.1109/ICNN.1996.549118", "CorpusId": 17726648 }, "url": "https://www.semanticscholar.org/paper/71d6efe56eedf10c628e1f4a1115939a80580c19", "referenceCount": 6, "citationCount": 151, "influentialCitationCount": 15, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Application of Example-Based Explainable Artificial Intelligence (XAI) for Analysis and Interpretation of Medical Imaging: A Systematic Review", "abstract": "Explainable Artificial Intelligence (XAI) is an area of growing interest, particularly in medical imaging, where example-based techniques show great potential. This paper is a systematic review of recent example-based XAI techniques, a promising approach that remains relatively unexplored in clinical practice and medical image analysis. A selection and analysis of recent studies using example-based XAI techniques for interpreting medical images was carried out. Several approaches were examined, highlighting how each contributes to increasing accuracy, transparency, and usability in medical applications. These techniques were compared and discussed in detail, considering their advantages and limitations in the context of medical imaging, with a focus on improving the integration of these technologies into clinical practice and medical decision-making. The review also pointed out gaps in current research, suggesting directions for future investigations. The need to develop XAI methods that are not only technically efficient but also ethically responsible and adaptable to the needs of healthcare professionals was emphasised. Thus, the paper sought to establish a solid foundation for understanding and advancing example-based XAI techniques in medical imaging, promoting a more integrated and patient-centred approach to medicine.", "year": 2024, "venue": "IEEE Access", "authors": [ "Miguel Fontes", "João Dallyson Sousa De Almeida", "António Cunha" ], "externalIds": { "DBLP": "journals/access/FontesAC24", "DOI": "10.1109/ACCESS.2024.3367606", "CorpusId": 267823459 }, "url": "https://www.semanticscholar.org/paper/0c02156cf3f29eea3beb4c2fde3da628862ffaa1", "referenceCount": 18, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "DKFZ Hector Cancer Institute at the University Medical Center Mannheim, Mannheim, Germany", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": ", University Hospital Aachen", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "outperformed the baseline ResNet50 in internal test set accuracy and showed comparable on the external test set", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "of Dermatology", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Outpatient Clinic for Dermatology,", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "The HAM10000 dataset, a large collection of multi-source dermatoscopic images of common pigmented skin lesions", "abstract": null, "year": null, "venue": "Sci. Data", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Fresenius Center for Digital Health, Faculty of Medicine and University Hospital Carl Gustav Carus, TUD", "abstract": null, "year": null, "venue": "Dresden", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "to AI predictions alone, though this improvement was not correlated with their experience level. 7", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Department of Dermatology, Venereology and Allergology, University Hospital Frankfurt, Goethe-University Frankfurt, Frankfurt, Germany", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Skin Cancer Unit", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "An Adaptive End-to-End IoT Security Framework Using Explainable AI and LLMs": { "paper_title": "An Adaptive End-to-End IoT Security Framework Using Explainable AI and LLMs", "arxiv_id": "2409.13177v1", "keyword": "explainable ai", "authors": [ "Sudipto Baral", "Sajal Saha", "Anwar Haque" ], "references": [ { "title": "CICIoT2023: A Real-Time Dataset and Benchmark for Large-Scale Attacks in IoT Environment", "abstract": "Nowadays, the Internet of Things (IoT) concept plays a pivotal role in society and brings new capabilities to different industries. The number of IoT solutions in areas such as transportation and healthcare is increasing and new services are under development. In the last decade, society has experienced a drastic increase in IoT connections. In fact, IoT connections will increase in the next few years across different areas. Conversely, several challenges still need to be faced to enable efficient and secure operations (e.g., interoperability, security, and standards). Furthermore, although efforts have been made to produce datasets composed of attacks against IoT devices, several possible attacks are not considered. Most existing efforts do not consider an extensive network topology with real IoT devices. The main goal of this research is to propose a novel and extensive IoT attack dataset to foster the development of security analytics applications in real IoT operations. To accomplish this, 33 attacks are executed in an IoT topology composed of 105 devices. These attacks are classified into seven categories, namely DDoS, DoS, Recon, Web-based, brute force, spoofing, and Mirai. Finally, all attacks are executed by malicious IoT devices targeting other IoT devices. The dataset is available on the CIC Dataset website.", "year": 2023, "venue": "Italian National Conference on Sensors", "authors": [ "E. P. Neto", "Sajjad Dadkhah", "Raphael Ferreira", "Alireza Zohourian", "Rongxing Lu", "A. Ghorbani" ], "externalIds": { "DBLP": "journals/sensors/NetoDFZLG23", "PubMedCentral": "10346235", "DOI": "10.3390/s23135941", "CorpusId": 259694870, "PubMed": "37447792" }, "url": "https://www.semanticscholar.org/paper/4618d69b16ad46ecb59a1c693bb42fa7d3502a95", "referenceCount": 105, "citationCount": 119, "influentialCitationCount": 20, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Explainable AI and Random Forest Based Reliable Intrusion Detection system", "abstract": "Emerging\nCyber threats with an increased dependency on vulnerable cyber-networks have\njeopardized all stakeholders, making Intrusion Detection Systems (IDS) the\nessential network security requirement. \nSeveral IDS have been proposed in the past decade for preventing systems\nfrom cyber-attacks. Machine learning (ML) based IDS have shown remarkable\nperformance on conventional cyber threats. However, the introduction of\nadversarial attacks in the cyber domain highlights the need to upgrade these IDS\nbecause conventional ML-based approaches are vulnerable to adversarial attacks.\nTherefore, the proposed IDS framework leverages the performance of conventional\nML-based IDS and integrates it with Explainable AI (XAI) to deal with\nadversarial attacks. Global Explanation of AI model, extracted by SHAP (Shapley\nadditive explanation) during the training phase of Primary Random Forest\nClassifier (RFC), is used to reassess the credibility of predicted outcomes. In\nother words, an outcome with low credibility is reassessed by secondary\nclassifiers. This SHAP-based approach helps in filtering out all disguised\nmalicious network traffic and can also enhance user trust by adding\ntransparency to the decision-making process. Adversarial robustness of the\nproposed IDS was assessed by Hop Skip Jump Attack and CICIDS dataset, where IDS\nshowed 98.5% and 100% accuracy, respectively. Furthermore, the performance of\nthe proposed IDS is compared with conventional algorithms using recall,\nprecision, accuracy, and F1-score as evaluation metrics. This comparative\nanalysis and series of experiments endorse the credibility of the proposed\nscheme, depicting that the integration of XAI with conventional IDS can ensure\ncredibility, integrity, and availability of cyber-networks.", "year": 2021, "venue": "", "authors": [ "S. Wali", "Irfan Khan" ], "externalIds": { "DOI": "10.36227/techrxiv.17169080.v1", "CorpusId": 245331964 }, "url": "https://www.semanticscholar.org/paper/9a2f551bf245297b920293a8896b68c95d7e9571", "referenceCount": 0, "citationCount": 21, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": null }, { "title": "Internet of Things attack detection using hybrid Deep Learning Model", "abstract": null, "year": 2021, "venue": "Computer Communications", "authors": [ "Amiya Kumar Sahu", "Suraj Sharma", "M. Tanveer", "R. Raja" ], "externalIds": { "MAG": "3169330763", "DBLP": "journals/comcom/SahuSTR21", "DOI": "10.1016/J.COMCOM.2021.05.024", "CorpusId": 236254595 }, "url": "https://www.semanticscholar.org/paper/d06692295bade32be77a632c2d038793bc6aa443", "referenceCount": 29, "citationCount": 107, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Explaining Network Intrusion Detection System Using Explainable AI Framework", "abstract": "Cybersecurity is a domain where the data distribution is constantly changing with attackers exploring newer patterns to attack cyber infrastructure. Intrusion detection system is one of the important layers in cyber safety in today's world. Machine learning based network intrusion detection systems started showing effective results in recent years. With deep learning models, detection rates of network intrusion detection system are improved. More accurate the model, more the complexity and hence less the interpretability. Deep neural networks are complex and hard to interpret which makes difficult to use them in production as reasons behind their decisions are unknown. In this paper, we have used deep neural network for network intrusion detection and also proposed explainable AI framework to add transparency at every stage of machine learning pipeline. This is done by leveraging Explainable AI algorithms which focus on making ML models less of black boxes by providing explanations as to why a prediction is made. Explanations give us measurable factors as to what features influence the prediction of a cyberattack and to what degree. These explanations are generated from SHAP, LIME, Contrastive Explanations Method, ProtoDash and Boolean Decision Rules via Column Generation. We apply these approaches to NSL KDD dataset for intrusion detection system and demonstrate results.", "year": 2021, "venue": "arXiv.org", "authors": [ "Shraddha Mane", "Dattaraj Rao" ], "externalIds": { "ArXiv": "2103.07110", "DBLP": "journals/corr/abs-2103-07110", "CorpusId": 232222432 }, "url": "https://www.semanticscholar.org/paper/42b50fe18f465282eb596380893a6b78e62b6040", "referenceCount": 17, "citationCount": 49, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "CorrAUC: A Malicious Bot-IoT Traffic Detection Method in IoT Network Using Machine-Learning Techniques", "abstract": "Identification of anomaly and malicious traffic in the Internet-of-Things (IoT) network is essential for the IoT security to keep eyes and block unwanted traffic flows in the IoT network. For this purpose, numerous machine-learning (ML) technique models are presented by many researchers to block malicious traffic flows in the IoT network. However, due to the inappropriate feature selection, several ML models prone misclassify mostly malicious traffic flows. Nevertheless, the significant problem still needs to be studied more in-depth that is how to select effective features for accurate malicious traffic detection in the IoT network. To address the problem, a new framework model is proposed. First, a novel feature selection metric approach named CorrAUC is proposed, and then based on CorrAUC, a new feature selection algorithm named CorrAUC is developed and designed, which is based on the wrapper technique to filter the features accurately and select effective features for the selected ML algorithm by using the area under the curve (AUC) metric. Then, we applied the integrated TOPSIS and Shannon entropy based on a bijective soft set to validate selected features for malicious traffic identification in the IoT network. We evaluate our proposed approach by using the Bot-IoT data set and four different ML algorithms. The experimental results analysis showed that our proposed method is efficient and can achieve >96% results on average.", "year": 2021, "venue": "IEEE Internet of Things Journal", "authors": [ "M. Shafiq", "Zhihong Tian", "A. Bashir", "Xiaojiang Du", "M. Guizani" ], "externalIds": { "DBLP": "journals/iotj/ShafiqTBDG21", "MAG": "3035366542", "DOI": "10.1109/JIOT.2020.3002255", "CorpusId": 226485070 }, "url": "https://www.semanticscholar.org/paper/026cbbb91e5a0d5b606917d14b385a2b84fee64e", "referenceCount": 67, "citationCount": 274, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Network Flow based IoT Botnet Attack Detection using Deep Learning", "abstract": "Governments around the globe are promoting smart city applications to enhance the quality of daily-life activities in urban areas. Smart cities include internet-enabled devices that are used by applications like health care, power grid, water treatment, traffic control, etc to enhance its effectiveness. The expansion in the quantity of Internet-of-things (IoT) based botnet attacks is due to the growing trend of Internet-enabled devices. To provide advanced cyber security solutions to IoT devices and smart city applications, this paper proposes a deep learning (DL) based botnet detection system that works on network traffic flows. The botnet detection framework collects the network traffic flows, converts them into connection records and uses a DL model to detect attacks emanating from the compromised IoT devices. To determine an optimal DL model, many experiments are conducted on well-known and recently released benchmark data sets. Further, the datasets are visualized to understand its characteristics. The proposed DL model outperformed the conventional machine learning (ML) models.", "year": 2020, "venue": "Conference on Computer Communications Workshops", "authors": [ "S. S", "V. R.", "M. Alazab", "Soman Kp" ], "externalIds": { "MAG": "3048529935", "DBLP": "conf/infocom/SRAK20", "DOI": "10.1109/INFOCOMWKSHPS50562.2020.9162668", "CorpusId": 221120765 }, "url": "https://www.semanticscholar.org/paper/d7fdce79660fe458071f24d675de309922da204a", "referenceCount": 23, "citationCount": 94, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Fog-Based Attack Detection Framework for Internet of Things Using Deep Learning", "abstract": "The number of cyber-attacks and data breaches has immensely increased across different enterprises, companies, and industries as a result of the exploitation of the weaknesses in securing Internet of Things (IoT) devices. The increasing number of various devices connected to IoT and their different protocols has led to growing volume of zero-day attacks. Deep learning (DL) has demonstrated its superiority in big data fields and cyber-security. Recently, DL has been used in cyber-attacks detection because of its capability of extracting and learning deep features of known attacks and detecting unknown attacks without the need for manual feature engineering. However, DL cannot be implemented on IoT devices with limited resources because it requires extensive computation, strong power and storage capabilities. This paper presents a comprehensive attack detection framework of a distributed, robust, and high detection rate to detect several IoT cyber-attacks using DL. The proposed framework implements an attack detector on fog nodes because of its distributed nature, high computational capacity and proximity to edge devices. Six DL models are compared to identify the DL model with the best performance. All DL models are evaluated using five different datasets, each of which involves various attacks. Experiments show that the long short-term memory model outperforms the five other DL models. The proposed framework is effective in terms of response time and detection accuracy and can detect several types of cyber-attacks with 99.97% detection rate and 99.96% detection accuracy in binary classification and 99.65% detection accuracy in multi-class classification.", "year": 2020, "venue": "IEEE Access", "authors": [ "Ahmed E. Samy", "Haining Yu", "Hongli Zhang" ], "externalIds": { "DBLP": "journals/access/SamyYZ20", "MAG": "3017752721", "DOI": "10.1109/ACCESS.2020.2988854", "CorpusId": 218468830 }, "url": "https://www.semanticscholar.org/paper/7ddc5e6e9fd37353bdb78e729dabd938c1408ffc", "referenceCount": 53, "citationCount": 68, "influentialCitationCount": 9, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Improving User Trust on Deep Neural Networks Based Intrusion Detection Systems", "abstract": "Deep Neural Networks based intrusion detection systems (DNN-IDS) have proven to be effective. However, in domains like critical infrastructure security, user trust on the DNN-IDS is imperative and high accuracy isn't sufficient. The black-box nature of DNNs hinders transparency of the DNN-IDS, which is necessary for building trust. The main objective of this work is to improve user trust by improving transparency of the DNN-IDS by making it more communicative. This paper presents a methodology to generate offline and online feedback to the user on the decision making process of the DNN-IDS. Offline, the user is reported the input features that are most relevant in detecting each type of intrusion by the trained DNN-IDS. Online, for each detection, the user is reported the inputs features that contributed most to the detection. The presented method was implemented on the KDD-NSL dataset with a multi-layer perceptron (MLP) based DNN-IDS. Binary and multi-class classification was carried out on the dataset. Further, several DNN-IDS architectures with different depth were tested to study the factors that drive classification. It was observed that despite showing very similar accuracy results, the factors that drove the decisions were different across architectures. This evidences that the qualitative analysis that is enabled through reporting relevant input features is important for the user to make a more informed decision in choosing a DNN-IDS. This online and offline feedback leads to improving the transparency of the DNN-IDS and helps build trust prior to and during deployment.", "year": 2018, "venue": "Annual Conference of the IEEE Industrial Electronics Society", "authors": [ "Kasun Amarasinghe", "M. Manic" ], "externalIds": { "DBLP": "conf/iecon/AmarasingheM18", "MAG": "2907889246", "DOI": "10.1109/IECON.2018.8591322", "CorpusId": 57364562 }, "url": "https://www.semanticscholar.org/paper/908eeeb220d76060debdb4eaa42a56f72867cbef", "referenceCount": 22, "citationCount": 33, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "An Adversarial Approach for Explainable AI in Intrusion Detection Systems", "abstract": "Despite the growing popularity of modern machine learning techniques (e.g, Deep Neural Networks) in cyber-security applications, most of these models are perceived as a black-box for the user. Adversarial machine learning offers an approach to increase our understanding of these models. In this paper we present an approach to generate explanations for incorrect classifications made by data-driven Intrusion Detection Systems (IDSs) An adversarial approach is used to find the minimum modifications (of the input features) required to correctly classify a given set of misclassified samples. The magnitude of such modifications is used to visualize the most relevant features that explain the reason for the misclassification. The presented methodology generated satisfactory explanations that describe the reasoning behind the mis-classifications, with descriptions that match expert knowledge. The advantages of the presented methodology are: 1) applicable to any classifier with defined gradients. 2) does not require any modification of the classifier model. 3) can be extended to perform further diagnosis (e.g. vulnerability assessment) and gain further understanding of the system. Experimental evaluation was conducted on the NSL-KDD99 benchmark dataset using Linear and Multilayer perceptron classifiers. The results are shown using intuitive visualizations in order to improve the interpretability of the results.", "year": 2018, "venue": "Annual Conference of the IEEE Industrial Electronics Society", "authors": [ "Daniel L. Marino", "Chathurika S. Wickramasinghe", "M. Manic" ], "externalIds": { "MAG": "2951435006", "DBLP": "conf/iecon/MarinoWM18", "ArXiv": "1811.11705", "DOI": "10.1109/IECON.2018.8591457", "CorpusId": 53811301 }, "url": "https://www.semanticscholar.org/paper/43962ec59c5ef78271381842285c7d8dfc3f0d57", "referenceCount": 25, "citationCount": 98, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "A Unified Approach to Interpreting Model Predictions", "abstract": "Understanding why a model makes a certain prediction can be as crucial as the prediction's accuracy in many applications. However, the highest accuracy for large modern datasets is often achieved by complex models that even experts struggle to interpret, such as ensemble or deep learning models, creating a tension between accuracy and interpretability. In response, various methods have recently been proposed to help users interpret the predictions of complex models, but it is often unclear how these methods are related and when one method is preferable over another. To address this problem, we present a unified framework for interpreting predictions, SHAP (SHapley Additive exPlanations). SHAP assigns each feature an importance value for a particular prediction. Its novel components include: (1) the identification of a new class of additive feature importance measures, and (2) theoretical results showing there is a unique solution in this class with a set of desirable properties. The new class unifies six existing methods, notable because several recent methods in the class lack the proposed desirable properties. Based on insights from this unification, we present new methods that show improved computational performance and/or better consistency with human intuition than previous approaches.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "Scott M. Lundberg", "Su-In Lee" ], "externalIds": { "MAG": "2618851150", "DBLP": "journals/corr/LundbergL17", "ArXiv": "1705.07874", "CorpusId": 21889700 }, "url": "https://www.semanticscholar.org/paper/442e10a3c6640ded9408622005e3c2a8906ce4c2", "referenceCount": 10, "citationCount": 16601, "influentialCitationCount": 1905, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Network intrusion detection using machine learning anomaly detection algorithms", "abstract": "Attacks on the network are exceptional cases that are not observed in normal traffic behavior. In this work, in order to detect network attacks, using k-means algorithm a new semi-supervised anomaly detection system has been designed and implemented. During the training phase, normal samples were separated into clusters by applying k-means algorithm. Then, in order to be able to distinguish between normal and abnormal samples — according to their distances from the clusters' centers and using a validation dataset-a threshold value was calculated. New samples that are far from the clusters' centers more than the threshold value is detected as anomalies. We used NSL-KDD — a labelled dataset of network connection traces-for testing our method's effectiveness. The experiments result on the NSL-KDD data set, shows that we achieved an accuracy of 80.119%.", "year": 2017, "venue": "Signal Processing and Communications Applications Conference", "authors": [ "M. Elif Karsligil", "A. Gokhan Yavuz", "M. A. Guvensan", "Khadija Hanifi", "Hasan Bank" ], "externalIds": { "MAG": "2724553162", "DBLP": "conf/siu/KarsligilYGHB17", "DOI": "10.1109/SIU.2017.7960616", "CorpusId": 10917407 }, "url": "https://www.semanticscholar.org/paper/f03dad9d2d35aa8e1802a89954fa6a0c22671667", "referenceCount": 16, "citationCount": 23, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "“Why Should I Trust You?”: Explaining the Predictions of Any Classifier", "abstract": "Despite widespread adoption, machine learning models remain mostly black boxes. Understanding the reasons behind predictions is, however, quite important in assessing trust, which is fundamental if one plans to take action based on a prediction, or when choosing whether to deploy a new model. Such understanding also provides insights into the model, which can be used to transform an untrustworthy model or prediction into a trustworthy one. In this work, we propose LIME, a novel explanation technique that explains the predictions of any classifier in an interpretable and faithful manner, by learning an interpretable model locally varound the prediction. We also propose a method to explain models by presenting representative individual predictions and their explanations in a non-redundant way, framing the task as a submodular optimization problem. We demonstrate the flexibility of these methods by explaining different models for text (e.g. random forests) and image classification (e.g. neural networks). We show the utility of explanations via novel experiments, both simulated and with human subjects, on various scenarios that require trust: deciding if one should trust a prediction, choosing between models, improving an untrustworthy classifier, and identifying why a classifier should not be trusted.", "year": 2016, "venue": "North American Chapter of the Association for Computational Linguistics", "authors": [ "Marco Tulio Ribeiro", "Sameer Singh", "Carlos Guestrin" ], "externalIds": { "ACL": "N16-3020", "MAG": "2951501516", "DBLP": "journals/corr/RibeiroSG16", "ArXiv": "1602.04938", "DOI": "10.1145/2939672.2939778", "CorpusId": 13029170 }, "url": "https://www.semanticscholar.org/paper/c0883f5930a232a9c1ad601c978caede29155979", "referenceCount": 41, "citationCount": 14452, "influentialCitationCount": 1693, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Survey of Security and Privacy Issues of Internet of Things", "abstract": "This paper is a general survey of all the security issues existing in the Internet of Things (IoT) along with an analysis of the privacy issues that an end-user may face as a consequence of the spread of IoT. The majority of the survey is focused on the security loopholes arising out of the information exchange technologies used in Internet of Things. No countermeasure to the security drawbacks has been analyzed in the paper.", "year": 2015, "venue": "arXiv.org", "authors": [ "T. Borgohain", "U. Kumar", "S. Sanyal" ], "externalIds": { "MAG": "1494291086", "ArXiv": "1501.02211", "DBLP": "journals/corr/BorgohainKS15", "CorpusId": 2829149 }, "url": "https://www.semanticscholar.org/paper/42cd362089c8b2ae6b09a9dc532bec7e7f44d754", "referenceCount": 48, "citationCount": 741, "influentialCitationCount": 33, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "An Explainable Machine Learning Framework for Intrusion Detection Systems", "abstract": "In recent years, machine learning-based intrusion detection systems (IDSs) have proven to be effective; especially, deep neural networks improve the detection rates of intrusion detection models. However, as models become more and more complex, people can hardly get the explanations behind their decisions. At the same time, most of the works about model interpretation focuses on other fields like computer vision, natural language processing, and biology. This leads to the fact that in practical use, cybersecurity experts can hardly optimize their decisions according to the judgments of the model. To solve these issues, a framework is proposed in this paper to give an explanation for IDSs. This framework uses SHapley Additive exPlanations (SHAP), and combines local and global explanations to improve the interpretation of IDSs. The local explanations give the reasons why the model makes certain decisions on the specific input. The global explanations give the important features extracted from IDSs, present the relationships between the feature values and different types of attacks. At the same time, the interpretations between two different classifiers, one-vs-all classifier and multiclass classifier, are compared. NSL-KDD dataset is used to test the feasibility of the framework. The framework proposed in this paper leads to improve the transparency of any IDS, and helps the cybersecurity staff have a better understanding of IDSs’ judgments. Furthermore, the different interpretations between different kinds of classifiers can also help security experts better design the structures of the IDSs. More importantly, this work is unique in the intrusion detection field, presenting the first use of the SHAP method to give explanations for IDSs.", "year": 2020, "venue": "IEEE Access", "authors": [ "Maonan Wang", "K. Zheng", "Yanqing Yang", "Xiujuan Wang" ], "externalIds": { "DBLP": "journals/access/WangZYW20", "MAG": "3017093935", "DOI": "10.1109/ACCESS.2020.2988359", "CorpusId": 218473599 }, "url": "https://www.semanticscholar.org/paper/c785e1453be0cc374bb84fb589123b697dfc3497", "referenceCount": 58, "citationCount": 154, "influentialCitationCount": 12, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] } ] }, "Explainable AI for Autism Diagnosis: Identifying Critical Brain Regions Using fMRI Data": { "paper_title": "Explainable AI for Autism Diagnosis: Identifying Critical Brain Regions Using fMRI Data", "arxiv_id": "2409.15374v1", "keyword": "explainable ai", "authors": [ "Suryansh Vidya", "Kush Gupta", "Amir Aly", "Andy Wills", "Emmanuel Ifeachor", "Rohit Shankar" ], "references": [ { "title": "Automated diagnosis of autism with artificial intelligence: State of the art", "abstract": "Abstract Autism spectrum disorder (ASD) represents a panel of conditions that begin during the developmental period and result in impairments of personal, social, academic, or occupational functioning. Early diagnosis is directly related to a better prognosis. Unfortunately, the diagnosis of ASD requires a long and exhausting subjective process. We aimed to review the state of the art for automated autism diagnosis and recognition in this research. In February 2022, we searched multiple databases and sources of gray literature for eligible studies. We used an adapted version of the QUADAS-2 tool to assess the risk of bias in the studies. A brief report of the methods and results of each study is presented. Data were synthesized for each modality separately using the Split Component Synthesis (SCS) method. We assessed heterogeneity using the I 2 statistics and evaluated publication bias using trim and fill tests combined with ln DOR. Confidence in cumulative evidence was assessed using the GRADE approach for diagnostic studies. We included 344 studies from 186,020 participants (51,129 are estimated to be unique) for nine different modalities in this review, from which 232 reported sufficient data for meta-analysis. The area under the curve was in the range of 0.71–0.90 for all the modalities. The studies on EEG data provided the best accuracy, with the area under the curve ranging between 0.85 and 0.93. We found that the literature is rife with bias and methodological/reporting flaws. Recommendations are provided for future research to provide better studies and fill in the current knowledge gaps.", "year": 2023, "venue": "Reviews in the Neurosciences", "authors": [ "Amir Valizadeh", "Mana Moassefi", "Amin Nakhostin-Ansari", "Soheil Heidari Some’eh", "Hossein Hosseini-Asl", "Mehrnush Saghab Torbati", "R. Aghajani", "Zahra Maleki Ghorbani", "Iman Menbari-Oskouie", "Faezeh Aghajani", "Alireza Mirzamohamadi", "Mohammad Ghafouri", "S. Faghani", "A. Memari" ], "externalIds": { "DOI": "10.1515/revneuro-2023-0050", "CorpusId": 261583693, "PubMed": "37678819" }, "url": "https://www.semanticscholar.org/paper/45160c76c7288e49eab2389334ff3377009672e4", "referenceCount": 94, "citationCount": 4, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Autism spectrum disorder classification using Adam war strategy optimization enabled deep belief network", "abstract": null, "year": 2023, "venue": "Biomedical Signal Processing and Control", "authors": [ "V. Bhandage", "M. K", "S. Muppidi", "B. Maram" ], "externalIds": { "DBLP": "journals/bspc/BhandageKMM23", "DOI": "10.1016/j.bspc.2023.104914", "CorpusId": 258271679 }, "url": "https://www.semanticscholar.org/paper/d06b52d0135f0e031f99962bc2bd8f7fa992f69e", "referenceCount": 31, "citationCount": 7, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Early Diagnosis of Autism Spectrum Disorder: A Review and Analysis of the Risks and Benefits", "abstract": "Autism spectrum disorder (ASD) is a neurodevelopmental condition made up of enduring challenges in social communication and interaction and the presence of repetitive and restricted behavior patterns. Early diagnosis of autism is crucial for timely intervention and improved long-term outcomes. This review aims to explore some of its signs and symptoms, look into some diagnostic tools, and analyze the benefits and risks associated with an early diagnosis of autism. The symptoms of ASD vary from child to child, some of which are: avoidance of eye contact, lack of response to names, excessive fear, and lack of interactive and pretend play. Early identification of these symptoms by caregivers and healthcare providers facilitates the need for diagnosis and appropriate interventions. Some screening and diagnostic tools that have been found to help make the diagnosis are the Modified Checklist for Autism in Toddlers, Revised with Follow-Up (M-CHAT-R/F), the Social Communication Questionnaire (SCQ), the Parents' Evaluation of Developmental Status (PEDS), and the Childhood Autism Rating Scale (CARS), amongst others. The benefits of early diagnosis include the opportunity for early intervention, which has been shown to enhance developmental outcomes and improve adaptive skills. Early identification allows for the implementation of specialized interventions tailored to the specific needs of individuals with autism, targeting social communication, language development, and behavioral challenges. Furthermore, early diagnosis enables families to access appropriate support services, educational resources, and community programs, facilitating better coping mechanisms, reducing parental stress, and increasing adult independence. However, early diagnosis of autism also entails certain risks. One significant concern is the potential for labeling and stigmatization, which can impact the child's self-esteem and social interactions. There is a risk of overdiagnosis or misdiagnosis, leading to unnecessary interventions and treatments. Additionally, the diagnostic process can be lengthy, complex, and emotionally challenging for families, requiring comprehensive assessments by multidisciplinary teams. This review highlights the importance of a balanced approach when considering the benefits and risks of early diagnosis. Early identification allows for timely interventions that significantly improve developmental outcomes and quality of life for individuals with autism. To mitigate the risks, it is crucial to ensure accurate and reliable diagnostic procedures, support families throughout the process, and promote societal awareness and acceptance. We also highlighted some future directions in the management of autism, including the use of biomarkers and the use of artificial intelligence and learning for diagnosing ASD.", "year": 2023, "venue": "Cureus", "authors": [ "Chiugo Okoye", "Chidi M Obialo-Ibeawuchi", "Omobolanle A Obajeun", "S. Sarwar", "C. Tawfik", "M. Waleed", "Asad Ullah Wasim", "Iman Mohamoud", "A. Afolayan", "Rheiner N Mbaezue" ], "externalIds": { "PubMedCentral": "10491411", "DOI": "10.7759/cureus.43226", "CorpusId": 260794723, "PubMed": "37692637" }, "url": "https://www.semanticscholar.org/paper/e18eba880635be9eb79769213ac149f74fcecbf4", "referenceCount": 36, "citationCount": 30, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Deep Learning Aided Neuroimaging and Brain Regulation", "abstract": "Currently, deep learning aided medical imaging is becoming the hot spot of AI frontier application and the future development trend of precision neuroscience. This review aimed to render comprehensive and informative insights into the recent progress of deep learning and its applications in medical imaging for brain monitoring and regulation. The article starts by providing an overview of the current methods for brain imaging, highlighting their limitations and introducing the potential benefits of using deep learning techniques to overcome these limitations. Then, we further delve into the details of deep learning, explaining the basic concepts and providing examples of how it can be used in medical imaging. One of the key strengths is its thorough discussion of the different types of deep learning models that can be used in medical imaging including convolutional neural networks (CNNs), recurrent neural networks (RNNs), and generative adversarial network (GAN) assisted magnetic resonance imaging (MRI), positron emission tomography (PET)/computed tomography (CT), electroencephalography (EEG)/magnetoencephalography (MEG), optical imaging, and other imaging modalities. Overall, our review on deep learning aided medical imaging for brain monitoring and regulation provides a referrable glance for the intersection of deep learning aided neuroimaging and brain regulation.", "year": 2023, "venue": "Italian National Conference on Sensors", "authors": [ "Mengze Xu", "Yuanyuan Ouyang", "Zhen Yuan" ], "externalIds": { "PubMedCentral": "10255716", "DBLP": "journals/sensors/XuOY23", "DOI": "10.3390/s23114993", "CorpusId": 258880006, "PubMed": "37299724" }, "url": "https://www.semanticscholar.org/paper/ca7eb4e3a202db2300f2b34a6c528e1054cfdeb0", "referenceCount": 106, "citationCount": 11, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Prevalence and Characteristics of Autism Spectrum Disorder Among Children Aged 8 Years — Autism and Developmental Disabilities Monitoring Network, 11 Sites, United States, 2020", "abstract": "Problem/Condition Autism spectrum disorder (ASD). Period Covered 2020. Description of System The Autism and Developmental Disabilities Monitoring (ADDM) Network is an active surveillance program that provides estimates of the prevalence of ASD among children aged 8 years. In 2020, there were 11 ADDM Network sites across the United States (Arizona, Arkansas, California, Georgia, Maryland, Minnesota, Missouri, New Jersey, Tennessee, Utah, and Wisconsin). To ascertain ASD among children aged 8 years, ADDM Network staff review and abstract developmental evaluations and records from community medical and educational service providers. A child met the case definition if their record documented 1) an ASD diagnostic statement in an evaluation, 2) a classification of ASD in special education, or 3) an ASD International Classification of Diseases (ICD) code. Results For 2020, across all 11 ADDM sites, ASD prevalence per 1,000 children aged 8 years ranged from 23.1 in Maryland to 44.9 in California. The overall ASD prevalence was 27.6 per 1,000 (one in 36) children aged 8 years and was 3.8 times as prevalent among boys as among girls (43.0 versus 11.4). Overall, ASD prevalence was lower among non-Hispanic White children (24.3) and children of two or more races (22.9) than among non-Hispanic Black or African American (Black), Hispanic, and non-Hispanic Asian or Pacific Islander (A/PI) children (29.3, 31.6, and 33.4 respectively). ASD prevalence among non-Hispanic American Indian or Alaska Native (AI/AN) children (26.5) was similar to that of other racial and ethnic groups. ASD prevalence was associated with lower household income at three sites, with no association at the other sites. Across sites, the ASD prevalence per 1,000 children aged 8 years based exclusively on documented ASD diagnostic statements was 20.6 (range = 17.1 in Wisconsin to 35.4 in California). Of the 6,245 children who met the ASD case definition, 74.7% had a documented diagnostic statement of ASD, 65.2% had a documented ASD special education classification, 71.6% had a documented ASD ICD code, and 37.4% had all three types of ASD indicators. The median age of earliest known ASD diagnosis was 49 months and ranged from 36 months in California to 59 months in Minnesota. Among the 4,165 (66.7%) children with ASD with information on cognitive ability, 37.9% were classified as having an intellectual disability. Intellectual disability was present among 50.8% of Black, 41.5% of A/PI, 37.8% of two or more races, 34.9% of Hispanic, 34.8% of AI/AN, and 31.8% of White children with ASD. Overall, children with intellectual disability had earlier median ages of ASD diagnosis (43 months) than those without intellectual disability (53 months). Interpretation For 2020, one in 36 children aged 8 years (approximately 4% of boys and 1% of girls) was estimated to have ASD. These estimates are higher than previous ADDM Network estimates during 2000–2018. For the first time among children aged 8 years, the prevalence of ASD was lower among White children than among other racial and ethnic groups, reversing the direction of racial and ethnic differences in ASD prevalence observed in the past. Black children with ASD were still more likely than White children with ASD to have a co-occurring intellectual disability. Public Health Action The continued increase among children identified with ASD, particularly among non-White children and girls, highlights the need for enhanced infrastructure to provide equitable diagnostic, treatment, and support services for all children with ASD. Similar to previous reporting periods, findings varied considerably across network sites, indicating the need for additional research to understand the nature of such differences and potentially apply successful identification strategies across states.", "year": 2023, "venue": "Morbidity and mortality weekly report. Surveillance summaries", "authors": [ "M. Maenner", "Z. Warren", "Ashley Robinson Williams", "E. Amoakohene", "A. Bakian", "D. Bilder", "M. Durkin", "Robert T. Fitzgerald", "Sarah M. Furnier", "Michelle M. Hughes", "Christine M. Ladd-Acosta", "Dedria McArthur", "Elise T. Pas", "Angelica Salinas", "Alison C. Vehorn", "Susan Williams", "A. Esler", "Andrea Grzybowski", "J. Hall-Lande", "Ruby H N Nguyen", "Karen J. Pierce", "W. Zahorodny", "Allison Hudson", "Libby Hallas", "K. Mancilla", "Mary E. Patrick", "Josephine Shenouda", "Kate Sidwell", "M. Dirienzo", "Johanna Gutierrez", "Margaret H. Spivey", "M. Lopez", "S. Pettygrove", "Yvette D. Schwenk", "Anita Washington", "K. Shaw" ], "externalIds": { "PubMedCentral": "10042614", "DOI": "10.15585/mmwr.ss7202a1", "CorpusId": 257735672, "PubMed": "36952288" }, "url": "https://www.semanticscholar.org/paper/e632b5e4f2ff99e313f0bf86ac461075412d823b", "referenceCount": 36, "citationCount": 722, "influentialCitationCount": 48, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Atypical functional connectivity of temporal cortex with precuneus and visual regions may be an early-age signature of ASD", "abstract": null, "year": 2023, "venue": "Molecular Autism", "authors": [ "Yaqiong Xiao", "Teresa H. Wen", "Lauren Kupis", "L. Eyler", "V. Taluja", "Jaden Troxel", "Disha Goel", "M. Lombardo", "K. Pierce", "E. Courchesne" ], "externalIds": { "PubMedCentral": "10007788", "DOI": "10.1186/s13229-023-00543-8", "CorpusId": 257434256, "PubMed": "36899425" }, "url": "https://www.semanticscholar.org/paper/a7606d2968ec1a1c82e5ef0b0063deda14838046", "referenceCount": 68, "citationCount": 16, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Gamma oscillations point to the role of primary visual cortex in atypical motion processing in autism", "abstract": "Neurophysiological studies suggest that abnormal neural inhibition may explain a range of sensory processing differences in autism spectrum disorders (ASD). In particular, the impaired ability of people with ASD to visually discriminate the motion direction of small-size objects and their reduced perceptual suppression of background-like visual motion may stem from deficient surround inhibition within the primary visual cortex (V1) and/or its atypical top-down modulation by higher-tier cortical areas. In this study, we estimate the contribution of abnormal surround inhibition to the motion-processing deficit in ASD. For this purpose, we used a putative correlate of surround inhibition–suppression of the magnetoencephalographic (MEG) gamma response (GR) caused by an increase in the drift rate of a large annular high-contrast grating. The motion direction discrimination thresholds for the gratings of different angular sizes (1° and 12°) were assessed in a separate psychophysical paradigm. The MEG data were collected in 42 boys with ASD and 37 typically developing (TD) boys aged 7–15 years. Psychophysical data were available in 33 and 34 of these participants, respectively. The results showed that the GR suppression in V1 was reduced in boys with ASD, while their ability to detect the direction of motion was compromised only in the case of small stimuli. In TD boys, the GR suppression directly correlated with perceptual suppression caused by increasing stimulus size, thus suggesting the role of the top-down modulations of V1 in surround inhibition. In ASD, weaker GR suppression was associated with the poor directional sensitivity to small stimuli, but not with perceptual suppression. These results strongly suggest that a local inhibitory deficit in V1 plays an important role in the reduction of directional sensitivity in ASD and that this perceptual deficit cannot be explained exclusively by atypical top-down modulation of V1 by higher-tier cortical areas.", "year": 2023, "venue": "PLoS ONE", "authors": [ "E. Orekhova", "V. Manyukhina", "Ilia A Galuta", "A. Prokofyev", "D. Goiaeva", "T. Obukhova", "K. Fadeev", "Justin F. Schneiderman", "T. Stroganova" ], "externalIds": { "PubMedCentral": "9925089", "DOI": "10.1371/journal.pone.0281531", "CorpusId": 246739334, "PubMed": "36780507" }, "url": "https://www.semanticscholar.org/paper/7414091b332d5d2c4521826262bce3ace1d605f5", "referenceCount": 97, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Broad transcriptomic dysregulation occurs across the cerebral cortex in ASD", "abstract": null, "year": 2022, "venue": "Nature", "authors": [ "M. Gandal", "J. Haney", "Brie Wamsley", "C. Yap", "Sepideh Parhami", "Prashant S. Emani", "Nathan Chang", "George T. Chen", "Gil D. Hoftman", "Diego de Alba", "Gokul Ramaswami", "C. Hartl", "A. Bhattacharya", "C. Luo", "Ting Jin", "Daifeng Wang", "R. Kawaguchi", "Diana Quintero", "J. Ou", "Y. E. Wu", "N. Parikshak", "V. Swarup", "T. G. Belgard", "M. Gerstein", "B. Pasaniuc", "D. Geschwind" ], "externalIds": { "PubMedCentral": "9668748", "DOI": "10.1038/s41586-022-05377-7", "CorpusId": 253266752, "PubMed": "36323788" }, "url": "https://www.semanticscholar.org/paper/1de7ecbb406f458a6e035e79cc37452c8366d7b5", "referenceCount": 77, "citationCount": 88, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "A survey on the interpretability of deep learning in medical diagnosis", "abstract": null, "year": 2022, "venue": "Multimedia Systems", "authors": [ "Qiaoying Teng", "Zhe Liu", "Yuqing Song", "K. Han", "Yang Lu" ], "externalIds": { "PubMedCentral": "9243744", "DBLP": "journals/mms/TengLSHL22", "DOI": "10.1007/s00530-022-00960-4", "CorpusId": 250057074, "PubMed": "35789785" }, "url": "https://www.semanticscholar.org/paper/465f3ea576b8f9b05d86d3ae22b74e112f4e1e9a", "referenceCount": 116, "citationCount": 53, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Sources of bias in artificial intelligence that perpetuate healthcare disparities—A global review", "abstract": "Background While artificial intelligence (AI) offers possibilities of advanced clinical prediction and decision-making in healthcare, models trained on relatively homogeneous datasets, and populations poorly-representative of underlying diversity, limits generalisability and risks biased AI-based decisions. Here, we describe the landscape of AI in clinical medicine to delineate population and data-source disparities. Methods We performed a scoping review of clinical papers published in PubMed in 2019 using AI techniques. We assessed differences in dataset country source, clinical specialty, and author nationality, sex, and expertise. A manually tagged subsample of PubMed articles was used to train a model, leveraging transfer-learning techniques (building upon an existing BioBERT model) to predict eligibility for inclusion (original, human, clinical AI literature). Of all eligible articles, database country source and clinical specialty were manually labelled. A BioBERT-based model predicted first/last author expertise. Author nationality was determined using corresponding affiliated institution information using Entrez Direct. And first/last author sex was evaluated using the Gendarize.io API. Results Our search yielded 30,576 articles, of which 7,314 (23.9%) were eligible for further analysis. Most databases came from the US (40.8%) and China (13.7%). Radiology was the most represented clinical specialty (40.4%), followed by pathology (9.1%). Authors were primarily from either China (24.0%) or the US (18.4%). First and last authors were predominately data experts (i.e., statisticians) (59.6% and 53.9% respectively) rather than clinicians. And the majority of first/last authors were male (74.1%). Interpretation U.S. and Chinese datasets and authors were disproportionately overrepresented in clinical AI, and almost all of the top 10 databases and author nationalities were from high income countries (HICs). AI techniques were most commonly employed for image-rich specialties, and authors were predominantly male, with non-clinical backgrounds. Development of technological infrastructure in data-poor regions, and diligence in external validation and model re-calibration prior to clinical implementation in the short-term, are crucial in ensuring clinical AI is meaningful for broader populations, and to avoid perpetuating global health inequity.", "year": 2022, "venue": "PLOS Digital Health", "authors": [ "L. Celi", "J. Cellini", "Marie Charpignon", "E. Dee", "Franck Dernoncourt", "René Eber", "W. G. Mitchell", "L. Moukheiber", "Julian Schirmer", "Julia Situ", "J. Paguio", "Joel Park", "J. Wawira", "Seth Yao" ], "externalIds": { "PubMedCentral": "9931338", "DOI": "10.1371/journal.pdig.0000022", "CorpusId": 247920762, "PubMed": "36812532" }, "url": "https://www.semanticscholar.org/paper/429018881ad753f0d5be21fdad6c036c44566005", "referenceCount": 61, "citationCount": 131, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Transparency of Deep Neural Networks for Medical Image Analysis: A Review of Interpretability Methods", "abstract": null, "year": 2021, "venue": "Comput. Biol. Medicine", "authors": [ "Zohaib Salahuddin", "H. Woodruff", "A. Chatterjee", "P. Lambin" ], "externalIds": { "ArXiv": "2111.02398", "DBLP": "journals/corr/abs-2111-02398", "DOI": "10.1016/j.compbiomed.2021.105111", "CorpusId": 242758341, "PubMed": "34891095" }, "url": "https://www.semanticscholar.org/paper/6815b0621436a0594219f7db23ec66c800f392a0", "referenceCount": 184, "citationCount": 213, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science", "Engineering" ] }, { "title": "Racial, Ethnic, and Sociodemographic Disparities in Diagnosis of Children with Autism Spectrum Disorder", "abstract": "ABSTRACT: This special article uses a biosocial-ecological framework to discuss findings in the literature on racial, ethnic, and sociodemographic diagnostic disparities in autism spectrum disorder. We draw explanations from this framework on the complex and cumulative influences of social injustices across interpersonal and systemic levels.", "year": 2021, "venue": "Journal of Developmental and Behavioral Pediatrics", "authors": [ "Brandon S. Aylward", "Diana E. Gal-Szabo", "S. Taraman" ], "externalIds": { "PubMedCentral": "8500365", "DOI": "10.1097/DBP.0000000000000996", "CorpusId": 237494309, "PubMed": "34510108" }, "url": "https://www.semanticscholar.org/paper/51efecf6c133bce7756540553a71fd3f215a5710", "referenceCount": 68, "citationCount": 46, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Graph Neural Networks in Network Neuroscience", "abstract": "Noninvasive medical neuroimaging has yielded many discoveries about the brain connectivity. Several substantial techniques mapping morphological, structural and functional brain connectivities were developed to create a comprehensive road map of neuronal activities in the human brain –namely brain graph. Relying on its non-euclidean data type, graph neural network (GNN) provides a clever way of learning the deep graph structure and it is rapidly becoming the state-of-the-art leading to enhanced performance in various network neuroscience tasks. Here we review current GNN-based methods, highlighting the ways that they have been used in several applications related to brain graphs such as missing brain graph synthesis and disease classification. We conclude by charting a path toward a better application of GNN models in network neuroscience field for neurological disorder diagnosis and population graph integration. The list of papers cited in our work is available at https://github.com/basiralab/GNNs-in-Network-Neuroscience.", "year": 2021, "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", "authors": [ "Alaa Bessadok", "M. Mahjoub", "I. Rekik" ], "externalIds": { "DBLP": "journals/corr/abs-2106-03535", "ArXiv": "2106.03535", "DOI": "10.1109/TPAMI.2022.3209686", "CorpusId": 235358683, "PubMed": "36155474" }, "url": "https://www.semanticscholar.org/paper/db5d583782264529456a475ce8e9a90823b3a2b5", "referenceCount": 127, "citationCount": 106, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine", "Biology" ] }, { "title": "ASD-SAENet: A Sparse Autoencoder, and Deep-Neural Network Model for Detecting Autism Spectrum Disorder (ASD) Using fMRI Data", "abstract": "Autism spectrum disorder (ASD) is a heterogenous neurodevelopmental disorder which is characterized by impaired communication, and limited social interactions. The shortcomings of current clinical approaches which are based exclusively on behavioral observation of symptomology, and poor understanding of the neurological mechanisms underlying ASD necessitates the identification of new biomarkers that can aid in study of brain development, and functioning, and can lead to accurate and early detection of ASD. In this paper, we developed a deep-learning model called ASD-SAENet for classifying patients with ASD from typical control subjects using fMRI data. We designed and implemented a sparse autoencoder (SAE) which results in optimized extraction of features that can be used for classification. These features are then fed into a deep neural network (DNN) which results in superior classification of fMRI brain scans more prone to ASD. Our proposed model is trained to optimize the classifier while improving extracted features based on both reconstructed data error and the classifier error. We evaluated our proposed deep-learning model using publicly available Autism Brain Imaging Data Exchange (ABIDE) dataset collected from 17 different research centers, and include more than 1,035 subjects. Our extensive experimentation demonstrate that ASD-SAENet exhibits comparable accuracy (70.8%), and superior specificity (79.1%) for the whole dataset as compared to other methods. Further, our experiments demonstrate superior results as compared to other state-of-the-art methods on 12 out of the 17 imaging centers exhibiting superior generalizability across different data acquisition sites and protocols. The implemented code is available on GitHub portal of our lab at: https://github.com/pcdslab/ASD-SAENet.", "year": 2021, "venue": "Frontiers in Computational Neuroscience", "authors": [ "Fahad Almuqhim", "F. Saeed" ], "externalIds": { "DBLP": "journals/ficn/AlmuqhimS21", "PubMedCentral": "8060560", "DOI": "10.3389/fncom.2021.654315", "CorpusId": 233175949, "PubMed": "33897398" }, "url": "https://www.semanticscholar.org/paper/6e9958660a8e26e11d19bd0628ec63dfc3e9f9e9", "referenceCount": 58, "citationCount": 70, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Barriers to Autism Spectrum Disorder Diagnosis for Young Women and Girls: a Systematic Review", "abstract": null, "year": 2020, "venue": "Review Journal of Autism and Developmental Disorders", "authors": [ "Georgia Lockwood Estrin", "Victoria Milner", "D. Spain", "F. Happé", "Emma Colvert" ], "externalIds": { "MAG": "3095823263", "PubMedCentral": "8604819", "DOI": "10.1007/s40489-020-00225-8", "CorpusId": 228993580, "PubMed": "34868805" }, "url": "https://www.semanticscholar.org/paper/2e200223b01460dd6e07f818212b41de0e2f585b", "referenceCount": 60, "citationCount": 143, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Psychology" ] }, { "title": "An Explainable Machine Learning Model for Early Detection of Parkinson's Disease using LIME on DaTscan Imagery", "abstract": null, "year": 2020, "venue": "Comput. Biol. Medicine", "authors": [ "Pavan Rajkumar Magesh", "Richard Delwin Myloth", "Rijo Jackson Tom" ], "externalIds": { "DBLP": "journals/corr/abs-2008-00238", "MAG": "3046695717", "ArXiv": "2008.00238", "DOI": "10.1016/j.compbiomed.2020.104041", "CorpusId": 220936191, "PubMed": "33074113" }, "url": "https://www.semanticscholar.org/paper/c752964e61c047aaebb25e77504c752e200407d2", "referenceCount": 37, "citationCount": 149, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine", "Engineering" ] }, { "title": "Deep Learning for Neuroimaging-based Diagnosis and Rehabilitation of Autism Spectrum Disorder: A Review", "abstract": null, "year": 2020, "venue": "Comput. Biol. Medicine", "authors": [ "Marjane Khodatars", "A. Shoeibi", "Delaram Sadeghi", "Navid Ghaasemi", "M. Jafari", "Parisa Moridian", "Ali Khadem", "R. Alizadehsani", "A. Zare", "Yinan Kong", "A. Khosravi", "S. Nahavandi", "Sadiq Hussain", "U. Acharya", "M. Berk" ], "externalIds": { "DBLP": "journals/cbm/KhodatarsSSGJMK21", "ArXiv": "2007.01285", "MAG": "3039636711", "DOI": "10.1016/j.compbiomed.2021.104949", "CorpusId": 220301573, "PubMed": "34737139" }, "url": "https://www.semanticscholar.org/paper/cba0a928a77bdc100bc248ed9a616d70bf108e74", "referenceCount": 236, "citationCount": 165, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine", "Engineering", "Mathematics" ] }, { "title": "Early Detection and Diagnosis of Autism Spectrum Disorder: Why Is It So Difficult?", "abstract": null, "year": 2020, "venue": "Seminars in Pediatric Neurology", "authors": [ "P. McCarty", "R. Frye" ], "externalIds": { "MAG": "3038017345", "DOI": "10.1016/j.spen.2020.100831", "CorpusId": 221522851, "PubMed": "32892958" }, "url": "https://www.semanticscholar.org/paper/43d74f9f20ce3f4c164e12bcb62207962c526a2c", "referenceCount": 24, "citationCount": 67, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Prevalence and cumulative incidence of autism spectrum disorders and the patterns of co-occurring neurodevelopmental disorders in a total population sample of 5-year-old children", "abstract": null, "year": 2020, "venue": "Molecular Autism", "authors": [ "M. Saito", "T. Hirota", "Yui Sakamoto", "Masaki Adachi", "Michio Takahashi", "Ayako Osato-Kaneda", "Y. Kim", "B. Leventhal", "A. Shui", "Sumi Kato", "Kazuhiko Nakamura" ], "externalIds": { "PubMedCentral": "7227343", "MAG": "3026298612", "DOI": "10.1186/s13229-020-00342-5", "CorpusId": 218636108, "PubMed": "32410700" }, "url": "https://www.semanticscholar.org/paper/a2b779cdafe0886d25a5d9bf5d9df3758dbd912c", "referenceCount": 33, "citationCount": 85, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "DeepFMRI: End-to-end deep learning for functional connectivity and classification of ADHD using fMRI", "abstract": null, "year": 2020, "venue": "Journal of Neuroscience Methods", "authors": [ "A. Riaz", "Muhammad Asad", "Eduardo Alonso", "G. Slabaugh" ], "externalIds": { "MAG": "3003265860", "DOI": "10.1016/j.jneumeth.2019.108506", "CorpusId": 210916172, "PubMed": "32001294" }, "url": "https://www.semanticscholar.org/paper/fb366558faf434c3c1b4678878aca64b2bf57c98", "referenceCount": 61, "citationCount": 92, "influentialCitationCount": 9, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Explaining Models by Propagating Shapley Values of Local Components", "abstract": null, "year": 2019, "venue": "Explainable AI in Healthcare and Medicine", "authors": [ "Hugh Chen", "Scott M. Lundberg", "Su-In Lee" ], "externalIds": { "ArXiv": "1911.11888", "MAG": "2991414967", "DBLP": "journals/corr/abs-1911-11888", "DOI": "10.1007/978-3-030-53352-6_24", "CorpusId": 208309986 }, "url": "https://www.semanticscholar.org/paper/0030923414ff02a4180bc29809003d503be213e5", "referenceCount": 29, "citationCount": 93, "influentialCitationCount": 7, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Clinical Decision Support in the Era of Artificial Intelligence.", "abstract": null, "year": 2018, "venue": "Journal of the American Medical Association (JAMA)", "authors": [ "E. Shortliffe", "M. Sepúlveda" ], "externalIds": { "MAG": "2899876413", "DOI": "10.1001/jama.2018.17163", "CorpusId": 53223428, "PubMed": "30398550" }, "url": "https://www.semanticscholar.org/paper/74a6826bcd97afbc7bcb175d622ab63f1596d260", "referenceCount": 9, "citationCount": 415, "influentialCitationCount": 9, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine" ] }, { "title": "A systematic review of quality of life of adults on the autism spectrum", "abstract": "Autism spectrum disorder is associated with co-existing conditions that may adversely affect an individual’s quality of life. No systematic review of quality of life of adults on the autism spectrum has been conducted. Our objectives were as follows: (1) review the evidence about quality of life for adults on the autism spectrum; (2) critically appraise current practice in assessing quality of life of adults on the autism spectrum. We searched bibliographic databases and other literature to identify studies using a direct measure of quality of life of adults on the autism spectrum. Hand searching of reference lists, citation searching and personal communication with field experts were also undertaken. In total, 827 studies were identified; 14 were included. Only one quality of life measure designed for use with the general autism spectrum population was identified. Quality of life of adults on the autism spectrum is lower than that of typically developing adults, when measured with tools designed for the general population. There are no comprehensive autism spectrum disorder–specific quality of life measurement tools validated for use with representative samples of adults on the autism spectrum. There is a pressing need to develop robust measures of quality of life of autistic adults.", "year": 2018, "venue": "Autism", "authors": [ "Michael Ayres", "J. Parr", "J. Rodgers", "David Mason", "L. Avery", "D. Flynn" ], "externalIds": { "MAG": "2743496150", "DOI": "10.1177/1362361317714988", "CorpusId": 30324574, "PubMed": "28805071" }, "url": "https://www.semanticscholar.org/paper/4970e36f3e36593adab41648ee6c3d0d77ae297b", "referenceCount": 33, "citationCount": 144, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Psychology", "Medicine" ] }, { "title": "A Novel Transfer Learning Approach to Enhance Deep Neural Network Classification of Brain Functional Connectomes", "abstract": "Early diagnosis remains a significant challenge for many neurological disorders, especially for rare disorders where studying large cohorts is not possible. A novel solution that investigators have undertaken is combining advanced machine learning algorithms with resting-state functional Magnetic Resonance Imaging to unveil hidden pathological brain connectome patterns to uncover diagnostic and prognostic biomarkers. Recently, state-of-the-art deep learning techniques are outperforming traditional machine learning methods and are hailed as a milestone for artificial intelligence. However, whole brain classification that combines brain connectome with deep learning has been hindered by insufficient training samples. Inspired by the transfer learning strategy employed in computer vision, we exploited previously collected resting-state functional MRI data for healthy subjects from existing databases and transferred this knowledge for new disease classification tasks. We developed a deep transfer learning neural network (DTL-NN) framework for enhancing the classification of whole brain functional connectivity patterns. Briefly, we trained a stacked sparse autoencoder (SSAE) prototype to learn healthy functional connectivity patterns in an offline learning environment. Then, the SSAE prototype was transferred to a DTL-NN model for a new classification task. To test the validity of our framework, we collected resting-state functional MRI data from the Autism Brain Imaging Data Exchange (ABIDE) repository. Using autism spectrum disorder (ASD) classification as a target task, we compared the performance of our DTL-NN approach with a traditional deep neural network and support vector machine models across four ABIDE data sites that enrolled at least 60 subjects. As compared to traditional models, our DTL-NN approach achieved an improved performance in accuracy, sensitivity, specificity and area under receiver operating characteristic curve. These findings suggest that DTL-NN approaches could enhance disease classification for neurological conditions, where accumulating large neuroimaging datasets has been challenging.", "year": 2018, "venue": "Frontiers in Neuroscience", "authors": [ "Hailong Li", "N. Parikh", "Lili He" ], "externalIds": { "PubMedCentral": "6066582", "MAG": "2810012895", "DOI": "10.3389/fnins.2018.00491", "CorpusId": 49904929, "PubMed": "30087587" }, "url": "https://www.semanticscholar.org/paper/498862ee20d099837cbad8523a18a4dc7598277c", "referenceCount": 47, "citationCount": 120, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "A Benchmark for Interpretability Methods in Deep Neural Networks", "abstract": "We propose an empirical measure of the approximate accuracy of feature importance estimates in deep neural networks. Our results across several large-scale image classification datasets show that many popular interpretability methods produce estimates of feature importance that are not better than a random designation of feature importance. Only certain ensemble based approaches---VarGrad and SmoothGrad-Squared---outperform such a random assignment of importance. The manner of ensembling remains critical, we show that some approaches do no better then the underlying method but carry a far higher computational burden.", "year": 2018, "venue": "Neural Information Processing Systems", "authors": [ "Sara Hooker", "D. Erhan", "Pieter-Jan Kindermans", "Been Kim" ], "externalIds": { "MAG": "2970447476", "DBLP": "conf/nips/HookerEKK19", "ArXiv": "1806.10758", "CorpusId": 202782699 }, "url": "https://www.semanticscholar.org/paper/5f614777d25efd14b7426e99cb2544f2d6be133e", "referenceCount": 44, "citationCount": 597, "influentialCitationCount": 81, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Abnormal Functional Connectivity of Resting State Network Detection Based on Linear ICA Analysis in Autism Spectrum Disorder", "abstract": "Some functional magnetic resonance imaging (fMRI) researches in autism spectrum disorder (ASD) patients have shown that ASD patients have significant impairment in brain response. However, few researchers have studied the functional structure changes of the eight resting state networks (RSNs) in ASD patients. Therefore, research on statistical differences of RSNs between 42 healthy controls (HC) and 50 ASD patients has been studied using linear independent component analysis (ICA) in this paper. Our researches showed that there was abnormal functional connectivity (FC) of RSNs in ASD patients. The RSNs with the decreased FC and increased FC in ASD patients included default mode network (DMN), central executive network (CEN), core network (CN), visual network (VN), self-referential network (SRN) compared to HC. The RSNs with the increased FC in ASD patients included auditory network (AN), somato-motor network (SMN). The dorsal attention network (DAN) in ASD patients showed the decreased FC. Our findings indicate that the abnormal FC in RSNs extensively exists in ASD patients. Our results have important contribution for the study of neuro-pathophysiological mechanisms in ASD patients.", "year": 2018, "venue": "Frontiers in Physiology", "authors": [ "Xia-an Bi", "Junxia Zhao", "Qian Xu", "Qi Sun", "Zhigang Wang" ], "externalIds": { "MAG": "2802901695", "PubMedCentral": "5952255", "DOI": "10.3389/fphys.2018.00475", "CorpusId": 19222250, "PubMed": "29867534" }, "url": "https://www.semanticscholar.org/paper/f69f81902dab141129fc955b62749571688606c1", "referenceCount": 85, "citationCount": 36, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Detect, Quantify, and Incorporate Dataset Bias: A Neuroimaging Analysis on 12, 207 Individuals", "abstract": "Neuroimaging datasets keep growing in size to address increasingly complex medical questions. However, even the largest datasets today alone are too small for training complex models or for finding genome wide associations. A solution is to grow the sample size by merging data across several datasets. However, bias in datasets complicates this approach and includes additional sources of variation in the data instead. In this work, we combine 15 large neuroimaging datasets to study bias. First, we detect bias by demonstrating that scans can be correctly assigned to a dataset with 73.3% accuracy. Next, we introduce metrics to quantify the compatibility across datasets and to create embeddings of neuroimaging sites. Finally, we incorporate the presence of bias for the selection of a training set for predicting autism. For the quantification of the dataset bias, we introduce two metrics: the Bhattacharyya distance between datasets and the age prediction error. The presented embedding of neuroimaging sites provides an interesting new visualization about the similarity of different sites. This could be used to guide the merging of data sources, while limiting the introduction of unwanted variation. Finally, we demonstrate a clear performance increase when incorporating dataset bias for training set selection in autism prediction. Overall, we believe that the growing amount of neuroimaging data necessitates to incorporate data-driven methods for quantifying dataset bias in future analyses.", "year": 2018, "venue": "arXiv.org", "authors": [ "C. Wachinger", "B. Gutiérrez-Becker", "A. Rieckmann" ], "externalIds": { "DBLP": "journals/corr/abs-1804-10764", "MAG": "2799030348", "ArXiv": "1804.10764", "CorpusId": 13749965 }, "url": "https://www.semanticscholar.org/paper/3b8761ef32737ece2626a61b41226acb05f325ac", "referenceCount": 30, "citationCount": 6, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep learning for segmentation of brain tumors: Impact of cross‐institutional training and testing", "abstract": "BACKGROUND AND PURPOSE\nConvolutional neural networks (CNNs) are commonly used for segmentation of brain tumors. In this work, we assess the effect of cross-institutional training on the performance of CNNs.\n\n\nMETHODS\nWe selected 44 glioblastoma (GBM) patients from two institutions in The Cancer Imaging Archive dataset. The images were manually annotated by outlining each tumor component to form ground truth. To automatically segment the tumors in each patient, we trained three CNNs: (a) one using data for patients from the same institution as the test data, (b) one using data for the patients from the other institution and (c) one using data for the patients from both of the institutions. The performance of the trained models was evaluated using Dice similarity coefficients as well as Average Hausdorff Distance between the ground truth and automatic segmentations. The 10-fold cross-validation scheme was used to compare the performance of different approaches.\n\n\nRESULTS\nPerformance of the model significantly decreased (P < 0.0001) when it was trained on data from a different institution (dice coefficients: 0.68 ± 0.19 and 0.59 ± 0.19) as compared to training with data from the same institution (dice coefficients: 0.72 ± 0.17 and 0.76 ± 0.12). This trend persisted for segmentation of the entire tumor as well as its individual components.\n\n\nCONCLUSIONS\nThere is a very strong effect of selecting data for training on performance of CNNs in a multi-institutional setting. Determination of the reasons behind this effect requires additional comprehensive investigation.", "year": 2018, "venue": "Medical Physics (Lancaster)", "authors": [ "Ehab Albadawy", "Ashirbani Saha", "M. Mazurowski" ], "externalIds": { "MAG": "2791655542", "DOI": "10.1002/mp.12752", "CorpusId": 3837836, "PubMed": "29356028" }, "url": "https://www.semanticscholar.org/paper/05447ad7c0bc55f8b3766e23da1578bb13f46bbf", "referenceCount": 27, "citationCount": 183, "influentialCitationCount": 7, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Methods for interpreting and understanding deep neural networks", "abstract": null, "year": 2017, "venue": "Digit. Signal Process.", "authors": [ "G. Montavon", "W. Samek", "K. Müller" ], "externalIds": { "DBLP": "journals/dsp/MontavonSM18", "MAG": "2657631929", "ArXiv": "1706.07979", "DOI": "10.1016/j.dsp.2017.10.011", "CorpusId": 207170725 }, "url": "https://www.semanticscholar.org/paper/a002e71561c90767240672f357b7d9e6d4d95186", "referenceCount": 78, "citationCount": 2071, "influentialCitationCount": 78, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "A Unified Approach to Interpreting Model Predictions", "abstract": "Understanding why a model makes a certain prediction can be as crucial as the prediction's accuracy in many applications. However, the highest accuracy for large modern datasets is often achieved by complex models that even experts struggle to interpret, such as ensemble or deep learning models, creating a tension between accuracy and interpretability. In response, various methods have recently been proposed to help users interpret the predictions of complex models, but it is often unclear how these methods are related and when one method is preferable over another. To address this problem, we present a unified framework for interpreting predictions, SHAP (SHapley Additive exPlanations). SHAP assigns each feature an importance value for a particular prediction. Its novel components include: (1) the identification of a new class of additive feature importance measures, and (2) theoretical results showing there is a unique solution in this class with a set of desirable properties. The new class unifies six existing methods, notable because several recent methods in the class lack the proposed desirable properties. Based on insights from this unification, we present new methods that show improved computational performance and/or better consistency with human intuition than previous approaches.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "Scott M. Lundberg", "Su-In Lee" ], "externalIds": { "MAG": "2618851150", "DBLP": "journals/corr/LundbergL17", "ArXiv": "1705.07874", "CorpusId": 21889700 }, "url": "https://www.semanticscholar.org/paper/442e10a3c6640ded9408622005e3c2a8906ce4c2", "referenceCount": 10, "citationCount": 16601, "influentialCitationCount": 1905, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Learning Important Features Through Propagating Activation Differences", "abstract": "The purported \"black box\" nature of neural networks is a barrier to adoption in applications where interpretability is essential. Here we present DeepLIFT (Deep Learning Important FeaTures), a method for decomposing the output prediction of a neural network on a specific input by backpropagating the contributions of all neurons in the network to every feature of the input. DeepLIFT compares the activation of each neuron to its 'reference activation' and assigns contribution scores according to the difference. By optionally giving separate consideration to positive and negative contributions, DeepLIFT can also reveal dependencies which are missed by other approaches. Scores can be computed efficiently in a single backward pass. We apply DeepLIFT to models trained on MNIST and simulated genomic data, and show significant advantages over gradient-based methods. Video tutorial: http://goo.gl/qKb7pL, code: http://goo.gl/RM8jvH.", "year": 2017, "venue": "International Conference on Machine Learning", "authors": [ "Avanti Shrikumar", "Peyton Greenside", "A. Kundaje" ], "externalIds": { "MAG": "2605409611", "DBLP": "conf/icml/ShrikumarGK17", "ArXiv": "1704.02685", "CorpusId": 3385018 }, "url": "https://www.semanticscholar.org/paper/1a2118bed729579528deb51e745d58dd3629baf6", "referenceCount": 17, "citationCount": 3444, "influentialCitationCount": 373, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Patterns of Atypical Functional Connectivity and Behavioral Links in Autism Differ Between Default, Salience, and Executive Networks.", "abstract": "Autism spectrum disorder (ASD) is characterized by atypical brain network organization, but findings have been inconsistent. While methodological and maturational factors have been considered, the network specificity of connectivity abnormalities remains incompletely understood. We investigated intrinsic functional connectivity (iFC) for four \"core\" functional networks-default-mode (DMN), salience (SN), and left (lECN) and right executive control (rECN). Resting-state functional MRI data from 75 children and adolescents (37 ASD, 38 typically developing [TD]) were included. Functional connectivity within and between networks was analyzed for regions of interest (ROIs) and whole brain, compared between groups, and correlated with behavioral scores. ROI analyses showed overconnectivity (ASD > TD), especially between DMN and ECN. Whole-brain results were mixed. While predominant overconnectivity was found for DMN (posterior cingulate seed) and rECN (right inferior parietal seed), predominant underconnectivity was found for SN (right anterior insula seed) and lECN (left inferior parietal seed). In the ASD group, reduced SN integrity was associated with sensory and sociocommunicative symptoms. In conclusion, atypical connectivity in ASD is network-specific, ranging from extensive overconnectivity (DMN, rECN) to extensive underconnectivity (SN, lECN). Links between iFC and behavior differed between groups. Core symptomatology in the ASD group was predominantly related to connectivity within the salience network.", "year": 2016, "venue": "Cerebral Cortex", "authors": [ "Angela E. Abbott", "Aarti Nair", "C. Keown", "M. Datko", "Afrooz Jahedi", "I. Fishman", "R. Müller" ], "externalIds": { "MAG": "2291733379", "DOI": "10.1093/cercor/bhv191", "CorpusId": 4907108, "PubMed": "26351318" }, "url": "https://www.semanticscholar.org/paper/b29f1c492e7876798595f386d77feeb52f7a6e65", "referenceCount": 109, "citationCount": 168, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Psychology", "Medicine" ] }, { "title": "The neuroanatomy of autism – a developmental perspective", "abstract": "Autism Spectrum Disorders (ASDs) are a heterogeneous group of neurodevelopmental disorders that are diagnosed solely on the basis of behaviour. A large body of work has reported neuroanatomical differences between individuals with ASD and neurotypical controls. Despite the huge clinical and genetic heterogeneity that typifies autism, some of these anatomical features appear to be either present in most cases or so dramatically altered in some that their presence is now reasonably well replicated in a number of studies. One such finding is the tendency towards overgrowth of the frontal cortex during the early postnatal period. Although these reports have been focused primarily on the presumed pathological anatomy, they are providing us with important insights into normal brain anatomy and are stimulating new ideas and hypotheses about the normal trajectory of brain development and the function of specific anatomical brain structures. The use of model systems that include genetic model organisms such as the mouse and, more recently, human induced pluripotent stem cell‐derived brain organoids to model normal and pathological human cortical development, is proving particularly informative. Here we review some of the neuroanatomical alterations reported in autism, with a particular focus on well‐validated findings and recent advances in the field, and ask what these observations can tell us about normal and abnormal brain development.", "year": 2016, "venue": "Journal of Anatomy", "authors": [ "Alex P. A. Donovan", "M. A. Basson" ], "externalIds": { "PubMedCentral": "5192959", "MAG": "2520025142", "DOI": "10.1111/joa.12542", "CorpusId": 15410785, "PubMed": "27620360" }, "url": "https://www.semanticscholar.org/paper/9adcd9fe1f8c384ba6dab9b59f6773a5556ddb08", "referenceCount": 90, "citationCount": 174, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Psychology" ] }, { "title": "Autism Spectrum Disorder- A Review", "abstract": "Autism spectrum disorders (ASDs) are a group of developmental disabilities characterized by impairments in social interaction and communication and by restricted, repetitive, and stereotyped patterns of behaviour. Symptoms typically are apparent before age 3 years. The complex nature of these disorders, coupled with a lack of biologic markers for diagnosis and changes in clinical definitions over time, creates challenges in monitoring the prevalence of ASDs. Accurate reporting of data is essential to understand the prevalence of ASDs in the population and can help direct research. ASDs have increased in prevalence, leading to a demand for improved understanding of the comparative effectiveness of different pharmacologic, behavioural, medical and alternative treatments for children as well as systems for providing services.1", "year": 2016, "venue": "", "authors": [ "S. Yadav" ], "externalIds": { "MAG": "2521981476", "DOI": "10.5958/2454-2660.2016.00044.2", "CorpusId": 78865548 }, "url": "https://www.semanticscholar.org/paper/f0ef0f1672dd928cfe8f6d18cdd7546eb99b6d59", "referenceCount": 0, "citationCount": 45, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine" ] }, { "title": "“Why Should I Trust You?”: Explaining the Predictions of Any Classifier", "abstract": "Despite widespread adoption, machine learning models remain mostly black boxes. Understanding the reasons behind predictions is, however, quite important in assessing trust, which is fundamental if one plans to take action based on a prediction, or when choosing whether to deploy a new model. Such understanding also provides insights into the model, which can be used to transform an untrustworthy model or prediction into a trustworthy one. In this work, we propose LIME, a novel explanation technique that explains the predictions of any classifier in an interpretable and faithful manner, by learning an interpretable model locally varound the prediction. We also propose a method to explain models by presenting representative individual predictions and their explanations in a non-redundant way, framing the task as a submodular optimization problem. We demonstrate the flexibility of these methods by explaining different models for text (e.g. random forests) and image classification (e.g. neural networks). We show the utility of explanations via novel experiments, both simulated and with human subjects, on various scenarios that require trust: deciding if one should trust a prediction, choosing between models, improving an untrustworthy classifier, and identifying why a classifier should not be trusted.", "year": 2016, "venue": "North American Chapter of the Association for Computational Linguistics", "authors": [ "Marco Tulio Ribeiro", "Sameer Singh", "Carlos Guestrin" ], "externalIds": { "ACL": "N16-3020", "MAG": "2951501516", "DBLP": "journals/corr/RibeiroSG16", "ArXiv": "1602.04938", "DOI": "10.1145/2939672.2939778", "CorpusId": 13029170 }, "url": "https://www.semanticscholar.org/paper/c0883f5930a232a9c1ad601c978caede29155979", "referenceCount": 41, "citationCount": 14452, "influentialCitationCount": 1693, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Autism, females, and the DSM-5: Gender bias in autism diagnosis", "abstract": "ABSTRACT Autism is a neurodevelopmental condition thought to occur predominantly in males. Despite limited research, there is evidence that criteria used to identify autism are gender-biased, leaving females with autism undiagnosed or misdiagnosed. This article provides a brief history of autism and an overview of research related to gender bias in autism spectrum diagnosis. In addition, research on DSM-5 autism criteria relevant to gender is reviewed, along with the role diagnostic screening instruments play in perpetuating gender bias. Finally, the sensitivity of DSM-5 criteria to females on the autism spectrum is considered within the context of social work practice and research.", "year": 2016, "venue": "", "authors": [ "Jolynn L. Haney" ], "externalIds": { "MAG": "2220764169", "DOI": "10.1080/15332985.2015.1031858", "CorpusId": 146264371 }, "url": "https://www.semanticscholar.org/paper/4ab68b5579432cad1835b0c999b6c613a4323f12", "referenceCount": 55, "citationCount": 68, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Psychology" ] }, { "title": "Neuroimaging in autism spectrum disorder: brain structure and function across the lifespan", "abstract": null, "year": 2015, "venue": "Lancet Neurology", "authors": [ "C. Ecker", "S. Bookheimer", "D. Murphy" ], "externalIds": { "MAG": "2010651179", "DOI": "10.1016/S1474-4422(15)00050-2", "CorpusId": 796415, "PubMed": "25891007" }, "url": "https://www.semanticscholar.org/paper/385cc630db9dd1f0cde0417bd05e77700194652d", "referenceCount": 150, "citationCount": 362, "influentialCitationCount": 19, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Psychology" ] }, { "title": "Long-Term Outcomes of Early Intervention in 6-Year-Old Children With Autism Spectrum Disorder.", "abstract": null, "year": 2015, "venue": "Journal of the American Academy of Child and Adolescent Psychiatry", "authors": [ "A. Estes", "J. Munson", "S. Rogers", "J. Greenson", "Jamie Winter", "G. Dawson" ], "externalIds": { "MAG": "1981760840", "DOI": "10.1016/j.jaac.2015.04.005", "CorpusId": 24619586, "PubMed": "26088663" }, "url": "https://www.semanticscholar.org/paper/aac74ad63d598f88521bb7ef222390facff1b5ad", "referenceCount": 29, "citationCount": 420, "influentialCitationCount": 23, "isOpenAccess": true, "fieldsOfStudy": [ "Psychology", "Medicine" ] }, { "title": "Autism: reduced connectivity between cortical areas involved in face expression, theory of mind, and the sense of self", "abstract": "Cheng, Rolls et al. examine whole-brain voxel-based resting-state functional connectivity in 418 people with autism. They reveal reduced connectivity between regions involved in facial expression processing and theory of mind (middle temporal gyrus), emotion processing (ventromedial prefrontal cortex), and the representation of self (precuneus and related posterior cingulate areas).", "year": 2015, "venue": "Brain : a journal of neurology", "authors": [ "W. Cheng", "E. Rolls", "Huaguang Gu", "Jie Zhang", "Jianfeng Feng" ], "externalIds": { "MAG": "2104482304", "PubMedCentral": "4407191", "DOI": "10.1093/brain/awv051", "CorpusId": 8141452, "PubMed": "25795704" }, "url": "https://www.semanticscholar.org/paper/88309a9eb0215452a670489896508d0032414fba", "referenceCount": 52, "citationCount": 223, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Psychology" ] }, { "title": "Resting state functional magnetic resonance imaging and neural network classified autism and control", "abstract": null, "year": 2015, "venue": "Cortex", "authors": [ "T. Iidaka" ], "externalIds": { "MAG": "2075210662", "DOI": "10.1016/j.cortex.2014.08.011", "CorpusId": 43314807, "PubMed": "25243989" }, "url": "https://www.semanticscholar.org/paper/e0488faf7e4b6e558aac8f13f04fdb74eea34ed1", "referenceCount": 57, "citationCount": 161, "influentialCitationCount": 13, "isOpenAccess": false, "fieldsOfStudy": [ "Psychology", "Medicine" ] }, { "title": "The idiosyncratic brain: distortion of spontaneous connectivity patterns in autism spectrum disorder", "abstract": null, "year": 2015, "venue": "Nature Neuroscience", "authors": [ "Avital Hahamy", "M. Behrmann", "R. Malach" ], "externalIds": { "MAG": "2011150799", "DOI": "10.1038/nn.3919", "CorpusId": 14394509, "PubMed": "25599222" }, "url": "https://www.semanticscholar.org/paper/333bff6f358ac4268cd5e2ccf13950f4baacf312", "referenceCount": 57, "citationCount": 348, "influentialCitationCount": 19, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine", "Psychology" ] }, { "title": "A Connectome Computation System for discovery science of brain", "abstract": null, "year": 2015, "venue": "", "authors": [ "Ting Xu", "Zhi Yang", "Lili Jiang", "Xiu-Xia Xing", "X. Zuo" ], "externalIds": { "MAG": "1992338409", "DOI": "10.1007/S11434-014-0698-3", "CorpusId": 53964463 }, "url": "https://www.semanticscholar.org/paper/d1e765075d23354e6867bede700d41b2b7bc929e", "referenceCount": 96, "citationCount": 147, "influentialCitationCount": 4, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Adam: A Method for Stochastic Optimization", "abstract": "We introduce Adam, an algorithm for first-order gradient-based optimization of stochastic objective functions, based on adaptive estimates of lower-order moments. The method is straightforward to implement, is computationally efficient, has little memory requirements, is invariant to diagonal rescaling of the gradients, and is well suited for problems that are large in terms of data and/or parameters. The method is also appropriate for non-stationary objectives and problems with very noisy and/or sparse gradients. The hyper-parameters have intuitive interpretations and typically require little tuning. Some connections to related algorithms, on which Adam was inspired, are discussed. We also analyze the theoretical convergence properties of the algorithm and provide a regret bound on the convergence rate that is comparable to the best known results under the online convex optimization framework. Empirical results demonstrate that Adam works well in practice and compares favorably to other stochastic optimization methods. Finally, we discuss AdaMax, a variant of Adam based on the infinity norm.", "year": 2014, "venue": "International Conference on Learning Representations", "authors": [ "Diederik P. Kingma", "Jimmy Ba" ], "externalIds": { "MAG": "2964121744", "DBLP": "journals/corr/KingmaB14", "ArXiv": "1412.6980", "CorpusId": 6628106 }, "url": "https://www.semanticscholar.org/paper/a6cb366736791bcccc5c8639de5a8f9636bf87e8", "referenceCount": 26, "citationCount": 139990, "influentialCitationCount": 22063, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Striving for Simplicity: The All Convolutional Net", "abstract": "Most modern convolutional neural networks (CNNs) used for object recognition are built using the same principles: Alternating convolution and max-pooling layers followed by a small number of fully connected layers. We re-evaluate the state of the art for object recognition from small images with convolutional networks, questioning the necessity of different components in the pipeline. We find that max-pooling can simply be replaced by a convolutional layer with increased stride without loss in accuracy on several image recognition benchmarks. Following this finding -- and building on other recent work for finding simple network structures -- we propose a new architecture that consists solely of convolutional layers and yields competitive or state of the art performance on several object recognition datasets (CIFAR-10, CIFAR-100, ImageNet). To analyze the network we introduce a new variant of the \"deconvolution approach\" for visualizing features learned by CNNs, which can be applied to a broader range of network structures than existing approaches.", "year": 2014, "venue": "International Conference on Learning Representations", "authors": [ "Jost Tobias Springenberg", "Alexey Dosovitskiy", "T. Brox", "Martin A. Riedmiller" ], "externalIds": { "MAG": "2123045220", "ArXiv": "1412.6806", "DBLP": "journals/corr/SpringenbergDBR14", "CorpusId": 12998557 }, "url": "https://www.semanticscholar.org/paper/33af9298e5399269a12d4b9901492fe406af62b4", "referenceCount": 29, "citationCount": 4436, "influentialCitationCount": 418, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Global motion perception deficits in autism are reflected as early as primary visual cortex.", "abstract": "Individuals with autism are often characterized as 'seeing the trees, but not the forest'-attuned to individual details in the visual world at the expense of the global percept they compose. Here, we tested the extent to which global processing deficits in autism reflect impairments in (i) primary visual processing; or (ii) decision-formation, using an archetypal example of global perception, coherent motion perception. In an event-related functional MRI experiment, 43 intelligence quotient and age-matched male participants (21 with autism, age range 15-27 years) performed a series of coherent motion perception judgements in which the amount of local motion signals available to be integrated into a global percept was varied by controlling stimulus viewing duration (0.2 or 0.6 s) and the proportion of dots moving in the correct direction (coherence: 4%, 15%, 30%, 50%, or 75%). Both typical participants and those with autism evidenced the same basic pattern of accuracy in judging the direction of motion, with performance decreasing with reduced coherence and shorter viewing durations. Critically, these effects were exaggerated in autism: despite equal performance at the long duration, performance was more strongly reduced by shortening viewing duration in autism (P < 0.015) and decreasing stimulus coherence (P < 0.008). To assess the neural correlates of these effects we focused on the responses of primary visual cortex and the middle temporal area, critical in the early visual processing of motion signals, as well as a region in the intraparietal sulcus thought to be involved in perceptual decision-making. The behavioural results were mirrored in both primary visual cortex and the middle temporal area, with a greater reduction in response at short, compared with long, viewing durations in autism compared with controls (both P < 0.018). In contrast, there was no difference between the groups in the intraparietal sulcus (P > 0.574). These findings suggest that reduced global motion perception in autism is driven by an atypical response early in visual processing and may reflect a fundamental perturbation in neural circuitry.", "year": 2014, "venue": "Brain : a journal of neurology", "authors": [ "Caroline E. Robertson", "Cibu Thomas", "Dwight J. Kravitz", "G. Wallace", "S. Baron-Cohen", "Alex Martin", "C. Baker" ], "externalIds": { "MAG": "2155485320", "DOI": "10.1093/brain/awu189", "CorpusId": 7649179, "PubMed": "25060095" }, "url": "https://www.semanticscholar.org/paper/270496b50ebc1df554fd678994d246f183ee0cc4", "referenceCount": 66, "citationCount": 109, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Psychology", "Medicine" ] }, { "title": "The Cerebellum, Sensitive Periods, and Autism", "abstract": null, "year": 2014, "venue": "Neuron", "authors": [ "S. Wang", "A. Kloth", "A. Badura" ], "externalIds": { "MAG": "2061043823", "DOI": "10.1016/j.neuron.2014.07.016", "CorpusId": 15881612, "PubMed": "25102558" }, "url": "https://www.semanticscholar.org/paper/0dc08ceff7472e6b23a6074430f0dcbd6cad1025", "referenceCount": 177, "citationCount": 640, "influentialCitationCount": 42, "isOpenAccess": true, "fieldsOfStudy": [ "Psychology", "Medicine" ] }, { "title": "Brain connectivity in autism", "abstract": "With the increasing prevalence of autism spectrum disorders (ASD), the pace of research aimed at understanding the neurobiology of this complex neurodevelopmental disorder has accelerated. Neuroimaging and postmortem studies have provided evidence for disruptions in functional and structural connectivity in the brains of individuals with ASD (Vissers et al., 2012). This burgeoning literature continues to struggle with methodological and conceptual issues inherent to discovering relationships between brain and behavior. While there has been considerable progress, many open questions remain. In this special topic, a collection of empirical contributions and reviews from leaders in the field attempt to synthesize and extend prior work investigating brain connectivity in autism. Multiple theoretical perspectives and neuroimaging methods are brought together with the aim of addressing outstanding questions about the nature and extent of brain connectivity aberrations in autism.", "year": 2014, "venue": "Frontiers in Human Neuroscience", "authors": [ "R. Kana", "L. Uddin", "T. Kenet", "D. Chugani", "R. Müller" ], "externalIds": { "PubMedCentral": "4041005", "MAG": "1992184506", "DOI": "10.3389/fnhum.2014.00349", "CorpusId": 2560835, "PubMed": "24917800" }, "url": "https://www.semanticscholar.org/paper/64016833b5f18bdc2d16d1ddc9ee05223b68d7f2", "referenceCount": 32, "citationCount": 128, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Psychology", "Medicine" ] }, { "title": "Methods to detect, characterize, and remove motion artifact in resting state fMRI", "abstract": null, "year": 2014, "venue": "NeuroImage", "authors": [ "Jonathan D. Power", "A. Mitra", "Timothy O. Laumann", "A. Snyder", "B. Schlaggar", "S. Petersen" ], "externalIds": { "MAG": "1973776237", "DBLP": "journals/neuroimage/PowerMLSSP14", "DOI": "10.1016/j.neuroimage.2013.08.048", "CorpusId": 2433590, "PubMed": "23994314" }, "url": "https://www.semanticscholar.org/paper/a3f132288f4797236098766cb2893b5d3afe69f6", "referenceCount": 38, "citationCount": 2901, "influentialCitationCount": 314, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Salience network-based classification and prediction of symptom severity in children with autism.", "abstract": "IMPORTANCE\nAutism spectrum disorder (ASD) affects 1 in 88 children and is characterized by a complex phenotype, including social, communicative, and sensorimotor deficits. Autism spectrum disorder has been linked with atypical connectivity across multiple brain systems, yet the nature of these differences in young children with the disorder is not well understood.\n\n\nOBJECTIVES\nTo examine connectivity of large-scale brain networks and determine whether specific networks can distinguish children with ASD from typically developing (TD) children and predict symptom severity in children with ASD.\n\n\nDESIGN, SETTING, AND PARTICIPANTS\nCase-control study performed at Stanford University School of Medicine of 20 children 7 to 12 years old with ASD and 20 age-, sex-, and IQ-matched TD children.\n\n\nMAIN OUTCOMES AND MEASURES\nBetween-group differences in intrinsic functional connectivity of large-scale brain networks, performance of a classifier built to discriminate children with ASD from TD children based on specific brain networks, and correlations between brain networks and core symptoms of ASD.\n\n\nRESULTS\nWe observed stronger functional connectivity within several large-scale brain networks in children with ASD compared with TD children. This hyperconnectivity in ASD encompassed salience, default mode, frontotemporal, motor, and visual networks. This hyperconnectivity result was replicated in an independent cohort obtained from publicly available databases. Using maps of each individual's salience network, children with ASD could be discriminated from TD children with a classification accuracy of 78%, with 75% sensitivity and 80% specificity. The salience network showed the highest classification accuracy among all networks examined, and the blood oxygen-level dependent signal in this network predicted restricted and repetitive behavior scores. The classifier discriminated ASD from TD in the independent sample with 83% accuracy, 67% sensitivity, and 100% specificity.\n\n\nCONCLUSIONS AND RELEVANCE\nSalience network hyperconnectivity may be a distinguishing feature in children with ASD. Quantification of brain network connectivity is a step toward developing biomarkers for objectively identifying children with ASD.", "year": 2013, "venue": "JAMA psychiatry", "authors": [ "L. Uddin", "Kaustubh Supekar", "Charles J. Lynch", "A. Khouzam", "Jennifer M. Phillips", "C. Feinstein", "S. Ryali", "V. Menon" ], "externalIds": { "MAG": "1973140577", "DOI": "10.1001/jamapsychiatry.2013.104", "CorpusId": 28437772, "PubMed": "23803651" }, "url": "https://www.semanticscholar.org/paper/1424ea49ecec8c27dbc3599ef1cdcec6d4794385", "referenceCount": 89, "citationCount": 535, "influentialCitationCount": 29, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Psychology" ] }, { "title": "The Autism Brain Imaging Data Exchange: Towards Large-Scale Evaluation of the Intrinsic Brain Architecture in Autism", "abstract": null, "year": 2013, "venue": "Molecular Psychiatry", "authors": [ "A. di Martino", "Chaogan Yan", "Qingyang Li", "Erin B. Denio", "F. Castellanos", "K. Alaerts", "Jeffrey S. Anderson", "Michal Assaf", "S. Bookheimer", "M. Dapretto", "Ben Deen", "S. Delmonte", "I. Dinstein", "B. Ertl-Wagner", "D. Fair", "L. Gallagher", "Daniel P. Kennedy", "C. Keown", "C. Keysers", "J. Lainhart", "C. Lord", "B. Luna", "V. Menon", "N. Minshew", "Christopher S. Monk", "S. Mueller", "R. Müller", "M. B. Nebel", "J. Nigg", "K. O'Hearn", "K. Pelphrey", "S. Peltier", "Jeff D. Rudie", "S. Sunaert", "M. Thioux", "J. Tyszka", "L. Uddin", "J. Verhoeven", "N. Wenderoth", "J. Wiggins", "S. Mostofsky", "M. Milham" ], "externalIds": { "PubMedCentral": "4162310", "MAG": "2167868121", "DOI": "10.1038/mp.2013.78", "CorpusId": 13785515, "PubMed": "23774715" }, "url": "https://www.semanticscholar.org/paper/9012a33a1ac8b6388b562e7558b3d808df485ceb", "referenceCount": 81, "citationCount": 2072, "influentialCitationCount": 108, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Psychology" ] }, { "title": "A review of cell assemblies", "abstract": null, "year": 2013, "venue": "Biological cybernetics", "authors": [ "C. Huyck", "P. Passmore" ], "externalIds": { "MAG": "2077445966", "DBLP": "journals/bc/HuyckP13", "DOI": "10.1007/s00422-013-0555-5", "CorpusId": 9048001, "PubMed": "23559034" }, "url": "https://www.semanticscholar.org/paper/e6f220dc3bd0208ed43a431bb8baddfc00b102e4", "referenceCount": 259, "citationCount": 52, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Spurious but systematic correlations in functional connectivity MRI networks arise from subject motion", "abstract": null, "year": 2012, "venue": "NeuroImage", "authors": [ "Jonathan D. Power", "K. Barnes", "A. Snyder", "B. Schlaggar", "S. Petersen" ], "externalIds": { "MAG": "1990134753", "DBLP": "journals/neuroimage/PowerBSSP12", "DOI": "10.1016/j.neuroimage.2011.10.018", "CorpusId": 37089375, "PubMed": "22019881" }, "url": "https://www.semanticscholar.org/paper/00844516c86828a4cc81471b573cb1a1696fcde9", "referenceCount": 39, "citationCount": 6590, "influentialCitationCount": 720, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "DPARSF: A MATLAB Toolbox for “Pipeline” Data Analysis of Resting-State fMRI", "abstract": "Resting-state functional magnetic resonance imaging (fMRI) has attracted more and more attention because of its effectiveness, simplicity and non-invasiveness in exploration of the intrinsic functional architecture of the human brain. However, user-friendly toolbox for “pipeline” data analysis of resting-state fMRI is still lacking. Based on some functions in Statistical Parametric Mapping (SPM) and Resting-State fMRI Data Analysis Toolkit (REST), we have developed a MATLAB toolbox called Data Processing Assistant for Resting-State fMRI (DPARSF) for “pipeline” data analysis of resting-state fMRI. After the user arranges the Digital Imaging and Communications in Medicine (DICOM) files and click a few buttons to set parameters, DPARSF will then give all the preprocessed (slice timing, realign, normalize, smooth) data and results for functional connectivity, regional homogeneity, amplitude of low-frequency fluctuation (ALFF), and fractional ALFF. DPARSF can also create a report for excluding subjects with excessive head motion and generate a set of pictures for easily checking the effect of normalization. In addition, users can also use DPARSF to extract time courses from regions of interest.", "year": 2010, "venue": "Frontiers in Systems Neuroscience", "authors": [ "Chaogan Yan", "Y. Zang" ], "externalIds": { "MAG": "2057550180", "PubMedCentral": "2889691", "DOI": "10.3389/fnsys.2010.00013", "CorpusId": 1149101, "PubMed": "20577591" }, "url": "https://www.semanticscholar.org/paper/fc1d568021431e1fd9b4ef2623e5b706f4240239", "referenceCount": 51, "citationCount": 3154, "influentialCitationCount": 207, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Abnormalities of intrinsic functional connectivity in autism spectrum disorders,\n", "abstract": null, "year": 2009, "venue": "NeuroImage", "authors": [ "Christopher S. Monk", "S. Peltier", "J. Wiggins", "S. Weng", "Melisa Carrasco", "S. Risi", "C. Lord" ], "externalIds": { "DBLP": "journals/neuroimage/MonkPWWCRL09", "MAG": "2158200838", "DOI": "10.1016/j.neuroimage.2009.04.069", "CorpusId": 2135810, "PubMed": "19409498" }, "url": "https://www.semanticscholar.org/paper/4946f3f23b8b18136e4d30f021657149e754fa0f", "referenceCount": 68, "citationCount": 548, "influentialCitationCount": 46, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science", "Psychology" ] }, { "title": "Functional abnormalities of the default network during self- and other-reflection in autism.", "abstract": "Recent studies of autism have identified functional abnormalities of the default network during a passive resting state. Since the default network is also typically engaged during social, emotional and introspective processing, dysfunction of this network may underlie some of the difficulties individuals with autism exhibit in these broad domains. In the present experiment, we attempted to further delineate the nature of default network abnormality in autism using experimentally constrained social and introspective tasks. Thirteen autism and 12 control participants were scanned while making true/false judgments for various statements about themselves (SELF condition) or a close other person (OTHER), and pertaining to either psychological personality traits (INTERNAL) or observable characteristics and behaviors (EXTERNAL). In the ventral medial prefrontal cortex/ventral anterior cingulate cortex, activity was reduced in the autism group across all judgment conditions and also during a resting condition, suggestive of task-independent dysfunction of this region. In other default network regions, overall levels of activity were not different between groups. Furthermore, in several of these regions, we found group by condition interactions only for INTERNAL/EXTERNAL judgments, and not SELF/OTHER judgments, suggestive of task-specific dysfunction. Overall, these results provide a more detailed view of default network functionality and abnormality in autism.", "year": 2008, "venue": "Social Cognitive and Affective Neuroscience", "authors": [ "Daniel P. Kennedy", "E. Courchesne" ], "externalIds": { "MAG": "2157293619", "DOI": "10.1093/scan/nsn011", "CorpusId": 3153944, "PubMed": "19015108" }, "url": "https://www.semanticscholar.org/paper/c8fea94f78244de56afd244e6d09bcf29c09a334", "referenceCount": 46, "citationCount": 238, "influentialCitationCount": 10, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Psychology" ] }, { "title": "Greedy Layer-Wise Training of Deep Networks", "abstract": "Complexity theory of circuits strongly suggests that deep architectures can be much more efficient (sometimes exponentially) than shallow architectures, in terms of computational elements required to represent some functions. Deep multi-layer neural networks have many levels of non-linearities allowing them to compactly represent highly non-linear and highly-varying functions. However, until recently it was not clear how to train such deep networks, since gradient-based optimization starting from random initialization appears to often get stuck in poor solutions. Hinton et al. recently introduced a greedy layer-wise unsupervised learning algorithm for Deep Belief Networks (DBN), a generative model with many layers of hidden causal variables. In the context of the above optimization problem, we study this algorithm empirically and explore variants to better understand its success and extend it to cases where the inputs are continuous or where the structure of the input distribution is not revealing enough about the variable to be predicted in a supervised task. Our experiments also confirm the hypothesis that the greedy layer-wise unsupervised training strategy mostly helps the optimization, by initializing weights in a region near a good local minimum, giving rise to internal distributed representations that are high-level abstractions of the input, bringing better generalization.", "year": 2006, "venue": "Neural Information Processing Systems", "authors": [ "Yoshua Bengio", "Pascal Lamblin", "D. Popovici", "H. Larochelle" ], "externalIds": { "MAG": "2540556213", "DBLP": "conf/nips/BengioLPL06", "DOI": "10.7551/mitpress/7503.003.0024", "CorpusId": 14201947 }, "url": "https://www.semanticscholar.org/paper/355d44f53428b1ac4fb2ab468d593c720640e5bd", "referenceCount": 18, "citationCount": 5191, "influentialCitationCount": 312, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Autism and Abnormal Development of Brain Connectivity", "abstract": "It has been said that people with autism suffer from a lack of “central coherence,” the cognitive ability to bind together a jumble of separate features into a single, coherent object or concept ([Frith, 1989][1]). Ironically, the same can be said of the field of autism research, which all too", "year": 2004, "venue": "Journal of Neuroscience", "authors": [ "M. Belmonte", "G. Allen", "A. Beckel-Mitchener", "L. Boulanger", "Ruth A. Carper", "S. Webb" ], "externalIds": { "MAG": "2082863207", "DOI": "10.1523/JNEUROSCI.3340-04.2004", "CorpusId": 11899344, "PubMed": "15496656" }, "url": "https://www.semanticscholar.org/paper/96324abf1f6a688efe0689e52411ebdf0e076f79", "referenceCount": 73, "citationCount": 1190, "influentialCitationCount": 56, "isOpenAccess": true, "fieldsOfStudy": [ "Psychology", "Medicine" ] }, { "title": "The amygdala theory of autism", "abstract": null, "year": 2000, "venue": "Neuroscience and Biobehavioral Reviews", "authors": [ "S. Baron-Cohen", "H. Ring", "E. Bullmore", "S. Wheelwright", "Chris Ashwin", "Steven Williams" ], "externalIds": { "MAG": "2070083499", "DOI": "10.1016/S0149-7634(00)00011-7", "CorpusId": 7455984, "PubMed": "10781695" }, "url": "https://www.semanticscholar.org/paper/87ebc5aa1f83a947da69e1a470617ab8e4d6c5fe", "referenceCount": 125, "citationCount": 1143, "influentialCitationCount": 43, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine", "Psychology" ] }, { "title": "A Deep Concatenated Convolutional Neural Network-Based Method to Classify Autism", "abstract": null, "year": 2022, "venue": "International Conference on Neural Information Processing", "authors": [ "Tanu Wadhera", "Mufti Mahmud", "David J. Brown" ], "externalIds": { "DBLP": "conf/iconip/WadheraM022", "DOI": "10.1007/978-981-99-1648-1_37", "CorpusId": 258240562 }, "url": "https://www.semanticscholar.org/paper/0949c3f67824160bd0383ad546983b3a82d20bd8", "referenceCount": 0, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Identification of Autism Based on SVM-RFE and Stacked Sparse Auto-Encoder", "abstract": "In order to improve the classification accuracy of patients with autism based on the full Autism Brain Imaging Data Exchange dataset, a total of 501 subjects with autism and 553 subjects with typical control across 17 sites were involved in the study. Firstly, we applied the resting-state functional magnetic resonance imaging data to calculate the functional connectivity (FC) based on the automated anatomical labeling atlas with 116 brain regions. Secondly, we adopted the support vector machine-recursive feature elimination algorithm to select top 1000 features from the primitive FC features. Thirdly, we trained a stacked sparse auto-encoder with two hidden layers to extract the high-level latent and complicated features from the 1000 features. Finally, the optimal features obtained were fed into the softmax classifier. Experimental results demonstrate that the proposed classification algorithm is able to identify the autism with a state-of-the-art accuracy of 93.59% (sensitivity 92.52%, specificity 94.56%).", "year": 2019, "venue": "IEEE Access", "authors": [ "Canhua Wang", "Zhiyong Xiao", "Baoyu Wang", "Jianhua Wu" ], "externalIds": { "MAG": "2969646834", "DBLP": "journals/access/WangXWW19", "DOI": "10.1109/ACCESS.2019.2936639", "CorpusId": 201833100 }, "url": "https://www.semanticscholar.org/paper/aff82f3bcc3fcb097145f17a3393b8c993c161de", "referenceCount": 31, "citationCount": 45, "influentialCitationCount": 9, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "“Layerwise relevance propagation for explaining deep neural network decisions in mribased alzheimer’s disease classification,”", "abstract": null, "year": 2019, "venue": "Frontiers in Aging Neuroscience", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "“Artificial intelligence and machine learning in software,”", "abstract": null, "year": 2019, "venue": "artificial-intelligence-and-machine-learning-software-medical-device", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Deep Learning", "abstract": null, "year": 2016, "venue": "International Journal of Semantic Computing", "authors": [ "Xing Hao", "Guigang Zhang", "Shang Ma" ], "externalIds": { "DBLP": "journals/escri/HaoZ17", "DOI": "10.1142/S1793351X16500045", "CorpusId": 1779661 }, "url": "https://www.semanticscholar.org/paper/4f8d648c52edf74e41b0996128aa536e13cc7e82", "referenceCount": 2, "citationCount": 50533, "influentialCitationCount": 2842, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "From the SelectedWorks of Marcel Adam Just 2006 Sentence comprehension in autism : Thinking in pictures with decreased functional connectivity", "abstract": "Comprehending high-imagery sentences like The number eight when rotated 90 degrees looks like a pair of eyeglasses involves the participation and integration of several cortical regions. The linguistic content must be processed to determine what is to be mentally imaged, and then the mental image must be evaluated and related to the sentence. A theory of cortical underconnectivity in autism predicts that the interregional collaboration required between linguistic and imaginal processing in this task would be underserved in autism. This functional MRI study examined brain activation in 12 participants with autism and 13 age-and IQ-matched control participants while they processed sentences with either high-or low-imagery content. The analysis of functional connectivity among cortical regions showed that the language and spatial centres in the participants with autism were not as well synchronized as in controls. In addition to the functional connectivity differences, there was also a group difference in activation. In the processing of low-imagery sentences (e.g. Addition, subtraction and multiplication are all math skills), the use of imagery is not essential to comprehension. Nevertheless, the autism group activated parietal and occipital brain regions associated with imagery for comprehending both the low and high-imagery sentences, suggesting that they were using mental imagery in both conditions. In contrast, the control group showed imagery-related activation primarily in the high-imagery condition. The findings provide further evidence of underintegration of language and imagery in autism (and hence expand the understanding of underconnectivity) but also show that people with autism are more reliant on visualization to support language comprehension.", "year": 2016, "venue": "", "authors": [ "R. Kana", "T. Keller", "V. Cherkassky", "N. Minshew", "M. Just" ], "externalIds": { "CorpusId": 13230765 }, "url": "https://www.semanticscholar.org/paper/3f38a7aa16e4172103bc538d4857d485e3e41492", "referenceCount": 59, "citationCount": 339, "influentialCitationCount": 10, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "“Niak: Neuroimaging analysis kit : I do imaging,”", "abstract": null, "year": 2016, "venue": "idoimaging.com", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Autism spectrum disorders: an overview on diagnosis and treatment.", "abstract": "Pervasive developmental disorders are now commonly referred to as autism spectrum disorders (ASDs). ASDs present with a range of severity and impairments, and often are a cause of severe disability, representing a major public health concern. The diagnostic criteria require delays or abnormal functioning in social interaction, language, and/or imaginative play within the first 3 years of life, resulting in a deviation from the developmental pattern expected for the age. Because establishing a diagnosis of ASD is possible as early as 18-24 months of age, clinicians should strive to identify and begin intervention in children with ASD as soon as signs are manifest. Increasing efforts are underway to make ASD screening universal in pediatric healthcare. Given the crucial importance of early identification and multiple modalities of treatment for ASD, this review will summarize the diagnostic criteria, key areas for assessment by clinicians, specific scales and instruments for assessment, and discussion of evidence-based treatment programs and the role of specific drug therapies for symptom management.", "year": 2013, "venue": "Revista Brasileira de Psiquiatria", "authors": [ "H. Brentani", "C. S. Paula", "Daniela Bordini", "Deborah Rolim", "Fábio Sato", "Joana Portolese", "Maria Clara Pacífico", "J. McCracken" ], "externalIds": { "MAG": "2098013228", "DOI": "10.1590/1516-4446-2013-S104", "CorpusId": 29500871, "PubMed": "24142129" }, "url": "https://www.semanticscholar.org/paper/eb1db00560128f977bfb36102bec3ec3be3f9090", "referenceCount": 124, "citationCount": 105, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Psychology", "Medicine" ] }, { "title": "Towards Automated Analysis of Connectomes: The Configurable Pipeline for the Analysis of Connectomes (C-PAC)", "abstract": null, "year": 2013, "venue": "", "authors": [ "Craddock Cameron", "S. Sharad", "C. Brian", "K. Ranjeet", "Ghosh Satrajit", "Chaogan Yan", "Qingyang Li", "Lurie Daniel", "V. Joshua", "Burns Randal", "Colcombe Stanley", "M. Maarten", "Kelly Clare", "D. Adriana", "Castellanos Francisco", "Milham Michael" ], "externalIds": { "MAG": "2329148842", "DOI": "10.3389/CONF.FNINF.2013.09.00042", "CorpusId": 61955981 }, "url": "https://www.semanticscholar.org/paper/90c0f411e47a0dd625bae3833638f8ef5dd8b543", "referenceCount": 0, "citationCount": 262, "influentialCitationCount": 19, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "“Abide preprocessed,”", "abstract": null, "year": 2012, "venue": "Preprocessed-connectomes-project.org", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "“Kana rk, keller ta, cherkassky vl, minshew nj, just ma. atypical frontalposterior synchronization of theory of mind regions in autism during mental state attribution. soc neurosci 4: 135152,”", "abstract": null, "year": 2009, "venue": "Social neuroscience", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "“Cortical activation and synchronization during sentence comprehension in high-functioning autism: evidence of underconnectiv-ity,”", "abstract": null, "year": 2004, "venue": "Brain", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Brodmann's localisation in the cerebral cortex", "abstract": "Part 1 The principles of comparative cortical cytoarchitectonics: the basic laminar pattern of the cerebral cortex - homogenetic and heterogenetic cortical formations regional variations in cell structure of the cerebral cortex particularities of the cytoarchitecture in different animals. Part 2 The principles of comparative field organisation in the cerebral cortex: description of individual brain maps common features in cortical architectonics variations in cortical architectonics. Part 3 Synthesis - hypothesis of the cortex as a morphological, physiological and pathological organ - histological cortical: localization in relation to morphology localization and histopathology physiology of the cortex as an organ.", "year": 1999, "venue": "", "authors": [ "L. Garey" ], "externalIds": { "MAG": "1607939528", "DOI": "10.1142/p151", "CorpusId": 142729473 }, "url": "https://www.semanticscholar.org/paper/51ed308fc6527a3503760fc8c9ce002d2c346a96", "referenceCount": 0, "citationCount": 517, "influentialCitationCount": 35, "isOpenAccess": false, "fieldsOfStudy": [ "Psychology" ] }, { "title": "“Madeforasd: A multiatlas deep ensemble network for diagnosing autism spectrum disorder,”", "abstract": null, "year": null, "venue": "Computers in Biology and Medicine", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "“A semisupervised autoencoder for disease diagnosis,”", "abstract": null, "year": null, "venue": "Neurocomputing", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "TACE: Tumor-Aware Counterfactual Explanations": { "paper_title": "TACE: Tumor-Aware Counterfactual Explanations", "arxiv_id": "2409.13045v1", "keyword": "explainable ai", "authors": [ "Eleonora Beatrice Rossi", "Eleonora Lopez", "Danilo Comminiello" ], "references": [ { "title": "OCTET: Object-aware Counterfactual Explanations", "abstract": "Nowadays, deep vision models are being widely deployed in safety-critical applications, e.g., autonomous driving, and explainability of such models is becoming a pressing concern. Among explanation methods, counter-factual explanations aim to find minimal and interpretable changes to the input image that would also change the output of the model to be explained. Such explanations point end-users at the main factors that impact the decision of the model. However, previous methods struggle to explain decision models trained on images with many objects, e.g., urban scenes, which are more difficult to work with but also arguably more critical to explain. In this work, we propose to tackle this issue with an object-centric framework for counterfactual explanation generation. Our method, inspired by recent generative modeling works, encodes the query image into a latent space that is structured in a way to ease object-level manipulations. Doing so, it provides the end-user with control over which search directions (e.g., spatial displacement of objects, style modification, etc.) are to be explored during the counterfactual generation. We conduct a set of experiments on counterfactual explanation benchmarks for driving scenes, and we show that our method can be adapted beyond classification, e.g., to explain semantic segmentation models. To complete our analysis, we design and run a user study that measures the usefulness of counterfactual explanations in understanding a decision model. Code is available at https://github.com/valeoai/OCTET.", "year": 2022, "venue": "Computer Vision and Pattern Recognition", "authors": [ "Mehdi Zemni", "Mickaël Chen", "Éloi Zablocki", "H. Ben-younes", "Patrick P'erez", "M. Cord" ], "externalIds": { "DBLP": "journals/corr/abs-2211-12380", "ArXiv": "2211.12380", "DOI": "10.1109/CVPR52729.2023.01446", "CorpusId": 253761301 }, "url": "https://www.semanticscholar.org/paper/3877a045d853a8b938f039750cb6e0fb4aa9f83f", "referenceCount": 71, "citationCount": 19, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "CheXplaining in Style: Counterfactual Explanations for Chest X-rays using StyleGAN", "abstract": "Deep learning models used in medical image anal-ysis are prone to raising reliability concerns due to their black-box nature. To shed light on these black-box models, previous works predominantly focus on identifying the contribution of input features to the diagnosis, i.e., feature attribution. In this work, we explore counterfactual explanations to identify what patterns the models rely on for diagnosis. Specifically, we investigate the effect of changing features within chest X-rays on the classifier’s output to understand its decision mech-anism. We leverage a StyleGAN-based approach (StyleEx) to create counterfactual explanations for chest X-rays by manipulating specific latent directions in their latent space. In addition, we propose EigenFind to significantly reduce the computation time of generated explanations. We clinically evaluate the relevancy of our counterfactual explanations with the help of radiologists. Our code is publicly available. 1", "year": 2022, "venue": "arXiv.org", "authors": [ "Matan Atad", "V. Dmytrenko", "Yitong Li", "Xinyue Zhang", "Matthias Keicher", "J. Kirschke", "B. Wiestler", "Ashkan Khakzar", "N. Navab" ], "externalIds": { "ArXiv": "2207.07553", "DBLP": "journals/corr/abs-2207-07553", "DOI": "10.48550/arXiv.2207.07553", "CorpusId": 250607445 }, "url": "https://www.semanticscholar.org/paper/67add6a624cd81dfc6fee7f094bcd4b39dab8743", "referenceCount": 10, "citationCount": 18, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Engineering", "Computer Science" ] }, { "title": "Explaining in Style: Training a GAN to explain a classifier in StyleSpace", "abstract": "Image classification models can depend on multiple different semantic attributes of the image. An explanation of the decision of the classifier needs to both discover and visualize these properties. Here we present StylEx, a method for doing this, by training a generative model to specifically explain multiple attributes that underlie classifier decisions. A natural source for such attributes is the StyleSpace of StyleGAN, which is known to generate semantically meaningful dimensions in the image. However, because standard GAN training is not dependent on the classifier, it may not represent those attributes which are important for the classifier decision, and the dimensions of StyleSpace may represent irrelevant at-tributes. To overcome this, we propose a training procedure for a StyleGAN, which incorporates the classifier model, in order to learn a classifier-specific StyleSpace. Explanatory attributes are then selected from this space. These can be used to visualize the effect of changing multiple attributes per image, thus providing image-specific explanations. We apply StylEx to multiple domains, including animals, leaves, faces and retinal images. For these, we show how an image can be modified in different ways to change its classifier output. Our results show that the method finds attributes that align well with semantic ones, generate meaningful image-specific explanations, and are human-interpretable as measured in user-studies.1", "year": 2021, "venue": "IEEE International Conference on Computer Vision", "authors": [ "Oran Lang", "Yossi Gandelsman", "Michal Yarom", "Yoav Wald", "G. Elidan", "Avinatan Hassidim", "W. Freeman", "Phillip Isola", "A. Globerson", "M. Irani", "Inbar Mosseri" ], "externalIds": { "ArXiv": "2104.13369", "DBLP": "conf/iccv/LangGYWEHFIGIM21", "MAG": "3159635074", "DOI": "10.1109/ICCV48922.2021.00073", "CorpusId": 233407984 }, "url": "https://www.semanticscholar.org/paper/0981cbb415a602b599a3549282429ca3acf35a85", "referenceCount": 39, "citationCount": 129, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering", "Mathematics" ] }, { "title": "Explainable, trustworthy, and ethical machine learning for healthcare: A survey", "abstract": "With the advent of machine learning (ML) and deep learning (DL) empowered applications for critical applications like healthcare, the questions about liability, trust, and interpretability of their outputs are raising. The black-box nature of various DL models is a roadblock to clinical utilization. Therefore, to gain the trust of clinicians and patients, we need to provide explanations about the decisions of models. With the promise of enhancing the trust and transparency of black-box models, researchers are in the phase of maturing the field of eXplainable ML (XML). In this paper, we provided a comprehensive review of explainable and interpretable ML techniques for various healthcare applications. Along with highlighting security, safety, and robustness challenges that hinder the trustworthiness of ML, we also discussed the ethical issues arising because of the use of ML/DL for healthcare. We also describe how explainable and trustworthy ML can resolve all these ethical problems. Finally, we elaborate on the limitations of existing approaches and highlight various open research problems that require further development.", "year": 2021, "venue": "Comput. Biol. Medicine", "authors": [ "Khansa Rasheed", "A. Qayyum", "M. Ghaly", "Ala I. Al-Fuqaha", "Adeel Razi", "Junaid Qadir" ], "externalIds": { "DBLP": "journals/cbm/RasheedQGARQ22", "MAG": "3167976525", "DOI": "10.36227/TECHRXIV.14376179.V1", "CorpusId": 236751896, "PubMed": "36115302" }, "url": "https://www.semanticscholar.org/paper/ef77f88c475b2fb3fbb07a57435d72f42464c0cf", "referenceCount": 199, "citationCount": 116, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Explaining the Black-box Smoothly- A Counterfactual Approach", "abstract": "We propose a BlackBox Counterfactual Explainer, designed to explain image classification models for medical applications. Classical approaches (e.g., , saliency maps) that assess feature importance do not explain how imaging features in important anatomical regions are relevant to the classification decision. Such reasoning is crucial for transparent decision-making in healthcare applications. Our framework explains the decision for a target class by gradually exaggerating the semantic effect of the class in a query image. We adopted a Generative Adversarial Network (GAN) to generate a progressive set of perturbations to a query image, such that the classification decision changes from its original class to its negation. Our proposed loss function preserves essential details (e.g., support devices) in the generated images. We used counterfactual explanations from our framework to audit a classifier trained on a chest X-ray dataset with multiple labels. Clinical evaluation of model explanations is a challenging task. We proposed clinically-relevant quantitative metrics such as cardiothoracic ratio and the score of a healthy costophrenic recess to evaluate our explanations. We used these metrics to quantify the counterfactual changes between the populations with negative and positive decisions for a diagnosis by the given classifier. We conducted a human-grounded experiment with diagnostic radiology residents to compare different styles of explanations (no explanation, saliency map, cycleGAN explanation, and our counterfactual explanation) by evaluating different aspects of explanations: (1) understandability, (2) classifier's decision justification, (3) visual quality, (d) identity preservation, and (5) overall helpfulness of an explanation to the users. Our results show that our counterfactual explanation was the only explanation method that significantly improved the users' understanding of the classifier's decision compared to the no-explanation baseline. Our metrics established a benchmark for evaluating model explanation methods in medical images. Our explanations revealed that the classifier relied on clinically relevant radiographic features for its diagnostic decisions, thus making its decision-making process more transparent to the end-user.", "year": 2021, "venue": "Medical Image Anal.", "authors": [ "Junyu Chen", "Yong Du", "Yufan He", "W. Paul Segars", "Ye Li", "Eirc C. Frey" ], "externalIds": { "ArXiv": "2101.04230", "DBLP": "journals/mia/SinglaEPWB23", "DOI": "10.36227/techrxiv.14777772", "CorpusId": 244478596, "PubMed": "36571975" }, "url": "https://www.semanticscholar.org/paper/fb8d5b02bc61eb3b8ee746704893e799b969e961", "referenceCount": 67, "citationCount": 83, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Engineering", "Medicine" ] }, { "title": "Bilateral Asymmetry Guided Counterfactual Generating Network for Mammogram Classification", "abstract": "Mammogram benign or malignant classification with only image-level labels is challenging due to the absence of lesion annotations. Motivated by the symmetric prior that the lesions on one side of breasts rarely appear in the corresponding areas on the other side, we explore to answer a counterfactual question to identify the lesion areas. This counterfactual question means: given an image with lesions, how would the features have behaved if there were no lesions in the image? To answer this question, we derive a new theoretical result based on the symmetric prior. Specifically, by building a causal model that entails such a prior for bilateral images, we identify to optimize the distances in distribution between i) the counterfactual features and the target side’s features in lesion-free areas; and ii) the counterfactual features and the reference side’s features in lesion areas. To realize these optimizations for better benign/malignant classification, we propose a counterfactual generative network, which is mainly composed of Generator Adversarial Network and a prediction feedback mechanism, they are optimized jointly and prompt each other. Specifically, the former can further improve the classi?cation performance by generating counterfactual features to calculate lesion areas. On the other hand, the latter helps counterfactual generation by the supervision of classification loss. The utility of our method and the effectiveness of each module in our model can be verified by state-of-the-art performance on INBreast and an in-house dataset and ablation studies.", "year": 2020, "venue": "IEEE Transactions on Image Processing", "authors": [ "Chu-ran Wang", "Jing Li", "Fandong Zhang", "Xinwei Sun", "Hao Dong", "Yizhou Yu", "Yizhou Wang" ], "externalIds": { "DBLP": "journals/corr/abs-2009-14406", "ArXiv": "2009.14406", "MAG": "3091685812", "DOI": "10.1109/TIP.2021.3112053", "CorpusId": 222067165, "PubMed": "34534086" }, "url": "https://www.semanticscholar.org/paper/6955b9c80b7d0549b3250b7e65df9a249c16796b", "referenceCount": 39, "citationCount": 13, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Fast Real-time Counterfactual Explanations", "abstract": "Counterfactual explanations are considered, which is to answer {\\it why the prediction is class A but not B.} Different from previous optimization based methods, an optimization-free Fast ReAl-time Counterfactual Explanation (FRACE) algorithm is proposed benefiting from the development of multi-domain image to image translation algorithms. Built from starGAN, a transformer is trained as a residual generator conditional on a classifier constrained under a proposal perturbation loss which maintains the content information of the query image, but just the class-specific semantic information is changed. The transformer can transfer the query image to any counterfactual class, and during inference, our explanation can be generated by it only within a forward time. It is fast and can satisfy the real-time practical application. Because of the adversarial training of GAN, our explanation is also more realistic compared to other counterparts. The experimental results demonstrate that our proposal is better than the existing state of the art in terms of quality and speed.", "year": 2020, "venue": "arXiv.org", "authors": [ "Yunxia Zhao" ], "externalIds": { "DBLP": "journals/corr/abs-2007-05684", "ArXiv": "2007.05684", "MAG": "3042074110", "CorpusId": 220496471 }, "url": "https://www.semanticscholar.org/paper/21c35e907cc48177abd694d576956ff78b6d30cf", "referenceCount": 27, "citationCount": 14, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Getting a CLUE: A Method for Explaining Uncertainty Estimates", "abstract": "Both uncertainty estimation and interpretability are important factors for trustworthy machine learning systems. However, there is little work at the intersection of these two areas. We address this gap by proposing a novel method for interpreting uncertainty estimates from differentiable probabilistic models, like Bayesian Neural Networks (BNNs). Our method, Counterfactual Latent Uncertainty Explanations (CLUE), indicates how to change an input, while keeping it on the data manifold, such that a BNN becomes more confident about the input's prediction. We validate CLUE through 1) a novel framework for evaluating counterfactual explanations of uncertainty, 2) a series of ablation experiments, and 3) a user study. Our experiments show that CLUE outperforms baselines and enables practitioners to better understand which input patterns are responsible for predictive uncertainty.", "year": 2020, "venue": "International Conference on Learning Representations", "authors": [ "Javier Antor'an", "Umang Bhatt", "T. Adel", "Adrian Weller", "José Miguel Hernández-Lobato" ], "externalIds": { "MAG": "3132800470", "ArXiv": "2006.06848", "DBLP": "journals/corr/abs-2006-06848", "DOI": "10.17863/CAM.69929", "CorpusId": 219636236 }, "url": "https://www.semanticscholar.org/paper/aa9df1b1d7ca16ff3357a662714d15fd5340dce4", "referenceCount": 57, "citationCount": 98, "influentialCitationCount": 19, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Explanation by Progressive Exaggeration", "abstract": "As machine learning methods see greater adoption and implementation in high stakes applications such as medical image diagnosis, the need for model interpretability and explanation has become more critical. Classical approaches that assess feature importance (e.g. saliency maps) do not explain how and why a particular region of an image is relevant to the prediction. We propose a method that explains the outcome of a classification black-box by gradually exaggerating the semantic effect of a given class. Given a query input to a classifier, our method produces a progressive set of plausible variations of that query, which gradually changes the posterior probability from its original class to its negation. These counter-factually generated samples preserve features unrelated to the classification decision, such that a user can employ our method as a \"tuning knob\" to traverse a data manifold while crossing the decision boundary. Our method is model agnostic and only requires the output value and gradient of the predictor with respect to its input.", "year": 2019, "venue": "International Conference on Learning Representations", "authors": [ "Sumedha Singla", "Brian Pollack", "Junxiang Chen", "K. Batmanghelich" ], "externalIds": { "MAG": "2987086488", "DBLP": "journals/corr/abs-1911-00483", "ArXiv": "1911.00483", "CorpusId": 207878944 }, "url": "https://www.semanticscholar.org/paper/c0072c9c25fb94f6e62bb7b18520add3c2df12a9", "referenceCount": 45, "citationCount": 92, "influentialCitationCount": 16, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "GANalyze: Toward Visual Definitions of Cognitive Image Properties", "abstract": "We introduce a framework that uses Generative Adversarial Networks (GANs) to study cognitive properties like memorability. These attributes are of interest because we do not have a concrete visual definition of what they entail. What does it look like for a dog to be more memorable? GANs allow us to generate a manifold of natural-looking images with fine-grained differences in their visual attributes. By navigating this manifold in directions that increase memorability, we can visualize what it looks like for a particular generated image to become more memorable. The resulting ``visual definitions\" surface image properties (like ``object size\") that may underlie memorability. Through behavioral experiments, we verify that our method indeed discovers image manipulations that causally affect human memory performance. We further demonstrate that the same framework can be used to analyze image aesthetics and emotional valence. ganalyze.csail.mit.edu.", "year": 2019, "venue": "IEEE International Conference on Computer Vision", "authors": [ "L. Goetschalckx", "A. Andonian", "A. Oliva", "Phillip Isola" ], "externalIds": { "MAG": "3003162010", "ArXiv": "1906.10112", "DBLP": "conf/iccv/GoetschalckxAOI19", "DOI": "10.1109/ICCV.2019.00584", "CorpusId": 195345534 }, "url": "https://www.semanticscholar.org/paper/4904a1d0271086115c402d797c6262c62546fa94", "referenceCount": 37, "citationCount": 290, "influentialCitationCount": 19, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Open Set Learning with Counterfactual Images", "abstract": null, "year": 2018, "venue": "European Conference on Computer Vision", "authors": [ "Lawrence Neal", "Matthew Lyle Olson", "Xiaoli Z. Fern", "Weng-Keen Wong", "Fuxin Li" ], "externalIds": { "MAG": "2895752198", "DBLP": "conf/eccv/NealOFWL18", "DOI": "10.1007/978-3-030-01231-1_38", "CorpusId": 51913282 }, "url": "https://www.semanticscholar.org/paper/330008af4074ef0e2b21787677783827d6a15056", "referenceCount": 30, "citationCount": 373, "influentialCitationCount": 76, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "BlobGAN: Spatially disen-tangled scene representations", "abstract": null, "year": 2022, "venue": "European Conference on Computer Vision (ECCV)", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Don't be Fooled: The Misinformation Effect of Explanations in Human-AI Collaboration": { "paper_title": "Don't be Fooled: The Misinformation Effect of Explanations in Human-AI Collaboration", "arxiv_id": "2409.12809v1", "keyword": "explainable ai", "authors": [ "Philipp Spitzer", "Joshua Holstein", "Katelyn Morrison", "Kenneth Holstein", "Gerhard Satzger", "Niklas Kühl" ], "references": [ { "title": "Transferring Domain Knowledge with (X)AI-Based Learning Systems", "abstract": "In numerous high-stakes domains, training novices via conventional learning systems does not suffice. To impart tacit knowledge, experts' hands-on guidance is imperative. However, training novices by experts is costly and time-consuming, increasing the need for alternatives. Explainable artificial intelligence (XAI) has conventionally been used to make black-box artificial intelligence systems interpretable. In this work, we utilize XAI as an alternative: An (X)AI system is trained on experts' past decisions and is then employed to teach novices by providing examples coupled with explanations. In a study with 249 participants, we measure the effectiveness of such an approach for a classification task. We show that (X)AI-based learning systems are able to induce learning in novices and that their cognitive styles moderate learning. Thus, we take the first steps to reveal the impact of XAI on human learning and point AI developers to future options to tailor the design of (X)AI-based learning systems.", "year": 2024, "venue": "arXiv.org", "authors": [ "Philipp Spitzer", "Niklas Kühl", "Marc Goutier", "Manuel Kaschura", "G. Satzger" ], "externalIds": { "DBLP": "journals/corr/abs-2406-01329", "ArXiv": "2406.01329", "DOI": "10.48550/arXiv.2406.01329", "CorpusId": 270217294 }, "url": "https://www.semanticscholar.org/paper/918152e46d623d0baa91e39df86076e566b30d32", "referenceCount": 76, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "LLMs for XAI: Future Directions for Explaining Explanations", "abstract": "In response to the demand for Explainable Artificial Intelligence (XAI), we investigate the use of Large Language Models (LLMs) to transform ML explanations into natural, human-readable narratives. Rather than directly explaining ML models using LLMs, we focus on refining explanations computed using existing XAI algorithms. We outline several research directions, including defining evaluation metrics, prompt design, comparing LLM models, exploring further training methods, and integrating external data. Initial experiments and user study suggest that LLMs offer a promising way to enhance the interpretability and usability of XAI.", "year": 2024, "venue": "arXiv.org", "authors": [ "Alexandra Zytek", "Sara Pido", "K. Veeramachaneni" ], "externalIds": { "ArXiv": "2405.06064", "DBLP": "journals/corr/abs-2405-06064", "DOI": "10.48550/arXiv.2405.06064", "CorpusId": 269740873 }, "url": "https://www.semanticscholar.org/paper/61279c16f5719783e96e792b2bc379aa48d60e47", "referenceCount": 21, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "On the Quest for Effectiveness in Human Oversight: Interdisciplinary Perspectives", "abstract": "Human oversight is currently discussed as a potential safeguard to counter some of the negative aspects of high-risk AI applications. This prompts a critical examination of the role and conditions necessary for what is prominently termed effective or meaningful human oversight of these systems. This paper investigates effective human oversight by synthesizing insights from psychological, legal, philosophical, and technical domains. Based on the claim that the main objective of human oversight is risk mitigation, we propose a viable understanding of effectiveness in human oversight: for human oversight to be effective, the oversight person has to have (a) sufficient causal power with regard to the system and its effects, (b) suitable epistemic access to relevant aspects of the situation, (c) self-control, and (d) fitting intentions for their role. Furthermore, we argue that this is equivalent to saying that an oversight person is effective if and only if they are morally responsible and have fitting intentions. Against this backdrop, we suggest facilitators and inhibitors of effectiveness in human oversight when striving for practical applicability. We discuss factors in three domains, namely, the technical design of the system, individual factors of oversight persons, and the environmental circumstances in which they operate. Finally, this paper scrutinizes the upcoming AI Act of the European Union – in particular Article 14 on Human Oversight – as an exemplary regulatory framework in which we study the practicality of our understanding of effective human oversight. By analyzing the provisions and implications of the European AI Act proposal, we pinpoint how far that proposal aligns with our analyses regarding effective human oversight as well as how it might get enriched by our conceptual understanding of effectiveness in human oversight.", "year": 2024, "venue": "Conference on Fairness, Accountability and Transparency", "authors": [ "Sarah Sterz", "Kevin Baum", "Sebastian Biewer", "Holger Hermanns", "Anne Lauber-Rönsberg", "Philip Meinel", "Markus Langer" ], "externalIds": { "DBLP": "conf/fat/Sterz0BHLML24", "ArXiv": "2404.04059", "DOI": "10.1145/3630106.3659051", "CorpusId": 268987490 }, "url": "https://www.semanticscholar.org/paper/daa38a0aa43fc8ca37c0f25e9fd7a580f3cff018", "referenceCount": 90, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Complementarity in Human-AI Collaboration: Concept, Sources, and Evidence", "abstract": "Artificial intelligence (AI) can improve human decision-making in various application areas. Ideally, collaboration between humans and AI should lead to complementary team performance (CTP) -- a level of performance that neither of them can attain individually. So far, however, CTP has rarely been observed, suggesting an insufficient understanding of the complementary constituents in human-AI collaboration that can contribute to CTP in decision-making. This work establishes a holistic theoretical foundation for understanding and developing human-AI complementarity. We conceptualize complementarity by introducing and formalizing the notion of complementarity potential and its realization. Moreover, we identify and outline sources that explain CTP. We illustrate our conceptualization by applying it in two empirical studies exploring two different sources of complementarity potential. In the first study, we focus on information asymmetry as a source and, in a real estate appraisal use case, demonstrate that humans can leverage unique contextual information to achieve CTP. In the second study, we focus on capability asymmetry as an alternative source, demonstrating how heterogeneous capabilities can help achieve CTP. Our work provides researchers with a theoretical foundation of complementarity in human-AI decision-making and demonstrates that leveraging sources of complementarity potential constitutes a viable pathway toward effective human-AI collaboration.", "year": 2024, "venue": "arXiv.org", "authors": [ "Patrick Hemmer", "Max Schemmer", "Niklas Kuhl", "Michael Vossing", "G. Satzger" ], "externalIds": { "ArXiv": "2404.00029", "DBLP": "journals/corr/abs-2404-00029", "DOI": "10.48550/arXiv.2404.00029", "CorpusId": 268820097 }, "url": "https://www.semanticscholar.org/paper/df29a654e19d4551a1e222f019c819b56f8ddd7c", "referenceCount": 105, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Designing explainable AI to improve human-AI team performance: A medical stakeholder-driven scoping review", "abstract": null, "year": 2024, "venue": "Artif. Intell. Medicine", "authors": [ "H. V. Subramanian", "Casey Canfield", "Daniel B. Shank" ], "externalIds": { "DBLP": "journals/artmed/SubramanianCS24", "DOI": "10.1016/j.artmed.2024.102780", "CorpusId": 267077640, "PubMed": "38462282" }, "url": "https://www.semanticscholar.org/paper/3b87653f0e16b41b0e79c86be9c04de0e4bbddfe", "referenceCount": 104, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Combating Misinformation in the Era of Generative AI Models", "abstract": "Misinformation has been a persistent and harmful phenomenon affecting our society in various ways, including individuals' physical health and economic stability. With the rise of short video platforms and related applications, the spread of multi-modal misinformation, encompassing images, texts, audios, and videos have exacerbated these concerns. The introduction of generative AI models like ChatGPT and Stable Diffusion has further complicated matters, giving rise to Artificial Intelligence Generated Content (AIGC) and presenting new challenges in detecting and mitigating misinformation. Consequently, traditional approaches to misinformation detection and intervention have become inadequate in this evolving landscape. This paper explores the challenges posed by AIGC in the context of misinformation. It examines the issue from psychological and societal perspectives, and explores the subtle manipulation traces found in AIGC at signal, perceptual, semantic, and human levels. By scrutinizing manipulation traces such as signal manipulation, semantic inconsistencies, logical incoherence, and psychological strategies, our objective is to tackle AI-generated misinformation and provide a conceptual design of systematic explainable solution. Ultimately, we aim for this paper to contribute valuable insights into combating misinformation, particularly in the era of AIGC.", "year": 2023, "venue": "ACM Multimedia", "authors": [ "Danni Xu", "Shaojing Fan", "Mohan S. Kankanhalli" ], "externalIds": { "DBLP": "conf/mm/XuFK23", "DOI": "10.1145/3581783.3612704", "CorpusId": 264492332 }, "url": "https://www.semanticscholar.org/paper/073e120d783e9b0b9fc6cbe60898bc5d4365a4c8", "referenceCount": 93, "citationCount": 22, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Towards Effective Human-AI Decision-Making: The Role of Human Learning in Appropriate Reliance on AI Advice", "abstract": "The true potential of human-AI collaboration lies in exploiting the complementary capabilities of humans and AI to achieve a joint performance superior to that of the individual AI or human, i.e., to achieve complementary team performance (CTP). To realize this complementarity potential, humans need to exercise discretion in following AI 's advice, i.e., appropriately relying on the AI's advice. While previous work has focused on building a mental model of the AI to assess AI recommendations, recent research has shown that the mental model alone cannot explain appropriate reliance. We hypothesize that, in addition to the mental model, human learning is a key mediator of appropriate reliance and, thus, CTP. In this study, we demonstrate the relationship between learning and appropriate reliance in an experiment with 100 participants. This work provides fundamental concepts for analyzing reliance and derives implications for the effective design of human-AI decision-making.", "year": 2023, "venue": "International Conference on Interaction Sciences", "authors": [ "Max Schemmer", "Andrea Bartos", "Philipp Spitzer", "Patrick Hemmer", "Niklas Kuhl", "Jonas Liebschner", "G. Satzger" ], "externalIds": { "DBLP": "conf/icis/SchemmerBSH0LS23", "ArXiv": "2310.02108", "DOI": "10.48550/arXiv.2310.02108", "CorpusId": 263608597 }, "url": "https://www.semanticscholar.org/paper/655da0ed37f335d5045a750b56ec9c2ad025b432", "referenceCount": 70, "citationCount": 5, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The Impact of Imperfect XAI on Human-AI Decision-Making", "abstract": "Explainability techniques are rapidly being developed to improve human-AI decision-making across various cooperative work settings. Consequently, previous research has evaluated how decision-makers collaborate with imperfect AI by investigating appropriate reliance and task performance with the aim of designing more human-centered computer-supported collaborative tools. Several human-centered explainable AI (XAI) techniques have been proposed in hopes of improving decision-makers' collaboration with AI; however, these techniques are grounded in findings from previous studies that primarily focus on the impact of incorrect AI advice. Few studies acknowledge the possibility of the explanations being incorrect even if the AI advice is correct. Thus, it is crucial to understand how imperfect XAI affects human-AI decision-making. In this work, we contribute a robust, mixed-methods user study with 136 participants to evaluate how incorrect explanations influence humans' decision-making behavior in a bird species identification task, taking into account their level of expertise and an explanation's level of assertiveness. Our findings reveal the influence of imperfect XAI and humans' level of expertise on their reliance on AI and human-AI team performance. We also discuss how explanations can deceive decision-makers during human-AI collaboration. Hence, we shed light on the impacts of imperfect XAI in the field of computer-supported cooperative work and provide guidelines for designers of human-AI collaboration systems.", "year": 2023, "venue": "Proc. ACM Hum. Comput. Interact.", "authors": [ "Katelyn Morrison", "Philipp Spitzer", "Violet Turri", "Michelle C. Feng", "Niklas Kuhl", "Adam Perer" ], "externalIds": { "DBLP": "journals/corr/abs-2307-13566", "ArXiv": "2307.13566", "DOI": "10.1145/3641022", "CorpusId": 260154746 }, "url": "https://www.semanticscholar.org/paper/1f83050a22da19fe4c0e7b0ac04768c92ecd6338", "referenceCount": 101, "citationCount": 7, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Dealing with information overload: a comprehensive review", "abstract": "Information overload is a problem that is being exacerbated by the ongoing digitalization of the world of work and the growing use of information and communication technologies. Therefore, the aim of this systematic literature review is to provide an insight into existing measures for prevention and intervention related to information overload. The methodological approach of the systematic review is based on the PRISMA standards. A keyword search in three interdisciplinary scientific databases and other more practice-oriented databases resulted in the identification of 87 studies, field reports, and conceptual papers that were included in the review. The results show that a considerable number of papers have been published on interventions on the behavioral prevention level. At the level of structural prevention, there are also many proposals on how to design work to reduce information overload. A further distinction can be made between work design approaches at the level of information and communication technology and at the level of teamwork and organizational regulations. Although the identified studies cover a wide range of possible interventions and design approaches to address information overload, the strength of the evidence from these studies is mixed.", "year": 2023, "venue": "Frontiers in Psychology", "authors": [ "Miriam Arnold", "Mascha Goldschmitt", "T. Rigotti" ], "externalIds": { "PubMedCentral": "10322198", "DOI": "10.3389/fpsyg.2023.1122200", "CorpusId": 259352589, "PubMed": "37416535" }, "url": "https://www.semanticscholar.org/paper/a6f73783549ba3e6f145f5640113dc31c07dbfbe", "referenceCount": 109, "citationCount": 27, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "ML-Based Teaching Systems: A Conceptual Framework", "abstract": "As the shortage of skilled workers continues to be a pressing issue, exacerbated by demographic change, it is becoming a critical challenge for organizations to preserve the knowledge of retiring experts and pass it on to novices. While this knowledge transfer has traditionally occurred through personal interaction, it lacks scalability and requires significant resources and time. IT-based teaching systems have addressed this scalability issue, but their development is still tedious and time-consuming. In this work, we investigate the potential of machine learning (ML) models to facilitate knowledge transfer in an organizational context, leading to more cost-effective IT-based teaching systems. Through a systematic literature review, we examine key concepts, themes, and dimensions to understand better and design ML-based teaching systems. To do so, we capture and consolidate the capabilities of ML models in IT-based teaching systems, inductively analyze relevant concepts in this context, and determine their interrelationships. We present our findings in the form of a review of the key concepts, themes, and dimensions to understand and inform on ML-based teaching systems. Building on these results, our work contributes to research on computer-supported cooperative work by conceptualizing how ML-based teaching systems can preserve expert knowledge and facilitate its transfer from SMEs to human novices. In this way, we shed light on this emerging subfield of human-computer interaction and serve to build an interdisciplinary research agenda.", "year": 2023, "venue": "Proc. ACM Hum. Comput. Interact.", "authors": [ "Philipp Spitzer", "Niklas Kühl", "Daniel Heinz", "G. Satzger" ], "externalIds": { "ArXiv": "2305.07681", "DBLP": "journals/pacmhci/Spitzer0HS23", "DOI": "10.1145/3610197", "CorpusId": 258686755 }, "url": "https://www.semanticscholar.org/paper/97cd7cd211a0020e473bd67ba4ed29f0ec50dad9", "referenceCount": 116, "citationCount": 3, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Synthetic Lies: Understanding AI-Generated Misinformation and Evaluating Algorithmic and Human Solutions", "abstract": "Large language models have abilities in creating high-volume human-like texts and can be used to generate persuasive misinformation. However, the risks remain under-explored. To address the gap, this work first examined characteristics of AI-generated misinformation (AI-misinfo) compared with human creations, and then evaluated the applicability of existing solutions. We compiled human-created COVID-19 misinformation and abstracted it into narrative prompts for a language model to output AI-misinfo. We found significant linguistic differences within human-AI pairs, and patterns of AI-misinfo in enhancing details, communicating uncertainties, drawing conclusions, and simulating personal tones. While existing models remained capable of classifying AI-misinfo, a significant performance drop compared to human-misinfo was observed. Results suggested that existing information assessment guidelines had questionable applicability, as AI-misinfo tended to meet criteria in evidence credibility, source transparency, and limitation acknowledgment. We discuss implications for practitioners, researchers, and journalists, as AI can create new challenges to the societal problem of misinformation.", "year": 2023, "venue": "International Conference on Human Factors in Computing Systems", "authors": [ "Jiawei Zhou", "Yixuan Zhang", "Qianni Luo", "Andrea G. Parker", "M. de Choudhury" ], "externalIds": { "DBLP": "conf/chi/ZhouZLPC23", "DOI": "10.1145/3544548.3581318", "CorpusId": 257633591 }, "url": "https://www.semanticscholar.org/paper/fdb117c68332d23cb1bd57e3cc36b8c9cfbdcbf7", "referenceCount": 108, "citationCount": 135, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Impact Of Explainable AI On Cognitive Load: Insights From An Empirical Study", "abstract": "While the emerging research field of explainable artificial intelligence (XAI) claims to address the lack of explainability in high-performance machine learning models, in practice, XAI targets developers rather than actual end-users. Unsurprisingly, end-users are often unwilling to use XAI-based decision support systems. Similarly, there is limited interdisciplinary research on end-users' behavior during XAI explanations usage, rendering it unknown how explanations may impact cognitive load and further affect end-user performance. Therefore, we conducted an empirical study with 271 prospective physicians, measuring their cognitive load, task performance, and task time for distinct implementation-independent XAI explanation types using a COVID-19 use case. We found that these explanation types strongly influence end-users' cognitive load, task performance, and task time. Further, we contextualized a mental efficiency metric, ranking local XAI explanation types best, to provide recommendations for future applications and implications for sociotechnical XAI research.", "year": 2023, "venue": "European Conference on Information Systems", "authors": [ "L. Herm" ], "externalIds": { "DBLP": "conf/ecis/Herm23", "ArXiv": "2304.08861", "DOI": "10.48550/arXiv.2304.08861", "CorpusId": 258187065 }, "url": "https://www.semanticscholar.org/paper/b872482af407059ae02b9458522cc39bf74bf0f2", "referenceCount": 85, "citationCount": 11, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Understanding Uncertainty: How Lay Decision-makers Perceive and Interpret Uncertainty in Human-AI Decision Making", "abstract": "Decision Support Systems (DSS) based on Machine Learning (ML) often aim to assist lay decision-makers, who are not math-savvy, in making high-stakes decisions. However, existing ML-based DSS are not always transparent about the probabilistic nature of ML predictions and how uncertain each prediction is. This lack of transparency could give lay decision-makers a false sense of reliability. Growing calls for AI transparency have led to increasing efforts to quantify and communicate model uncertainty. However, there are still gaps in knowledge regarding how and why the decision-makers utilize ML uncertainty information in their decision process. Here, we conducted a qualitative, think-aloud user study with 17 lay decision-makers who interacted with three different DSS: 1) interactive visualization, 2) DSS based on an ML model that provides predictions without uncertainty information, and 3) the same DSS with uncertainty information. Our qualitative analysis found that communicating uncertainty about ML predictions forced participants to slow down and think analytically about their decisions. This in turn made participants more vigilant, resulting in reduction in over-reliance on ML-based DSS. Our work contributes empirical knowledge on how lay decision-makers perceive, interpret, and make use of uncertainty information when interacting with DSS. Such foundational knowledge informs the design of future ML-based DSS that embrace transparent uncertainty communication.", "year": 2023, "venue": "International Conference on Intelligent User Interfaces", "authors": [ "Snehal Prabhudesai", "Leyao Yang", "Sumit Asthana", "Xun Huan", "Q. Liao", "Nikola Banovic" ], "externalIds": { "DBLP": "conf/iui/PrabhudesaiYAHL23", "DOI": "10.1145/3581641.3584033", "CorpusId": 257767905 }, "url": "https://www.semanticscholar.org/paper/03321964d1fb73a41558401df1fed0a483f8f0d5", "referenceCount": 113, "citationCount": 22, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Human-AI Collaboration: The Effect of AI Delegation on Human Task Performance and Task Satisfaction", "abstract": "Recent work has proposed artificial intelligence (AI) models that can learn to decide whether to make a prediction for an instance of a task or to delegate it to a human by considering both parties’ capabilities. In simulations with synthetically generated or context-independent human predictions, delegation can help improve the performance of human-AI teams—compared to humans or the AI model completing the task alone. However, so far, it remains unclear how humans perform and how they perceive the task when they are aware that an AI model delegated task instances to them. In an experimental study with 196 participants, we show that task performance and task satisfaction improve through AI delegation, regardless of whether humans are aware of the delegation. Additionally, we identify humans’ increased levels of self-efficacy as the underlying mechanism for these improvements in performance and satisfaction. Our findings provide initial evidence that allowing AI models to take over more management responsibilities can be an effective form of human-AI collaboration in workplaces.", "year": 2023, "venue": "International Conference on Intelligent User Interfaces", "authors": [ "Patrick Hemmer", "Monika Westphal", "Max Schemmer", "S. Vetter", "Michael Vossing", "G. Satzger" ], "externalIds": { "DBLP": "journals/corr/abs-2303-09224", "ArXiv": "2303.09224", "DOI": "10.1145/3581641.3584052", "CorpusId": 257557813 }, "url": "https://www.semanticscholar.org/paper/8287c93cf5382ecbceedec788e24f28511396c16", "referenceCount": 80, "citationCount": 19, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Effects of AI and Logic-Style Explanations on Users’ Decisions Under Different Levels of Uncertainty", "abstract": "Existing eXplainable Artificial Intelligence (XAI) techniques support people in interpreting AI advice. However, although previous work evaluates the users’ understanding of explanations, factors influencing the decision support are largely overlooked in the literature. This article addresses this gap by studying the impact of user uncertainty, AI correctness, and the interaction between AI uncertainty and explanation logic-styles for classification tasks. We conducted two separate studies: one requesting participants to recognize handwritten digits and one to classify the sentiment of reviews. To assess the decision making, we analyzed the task performance, agreement with the AI suggestion, and the user’s reliance on the XAI interface elements. Participants make their decision relying on three pieces of information in the XAI interface (image or text instance, AI prediction, and explanation). Participants were shown one explanation style (between-participants design) according to three styles of logical reasoning (inductive, deductive, and abductive). This allowed us to study how different levels of AI uncertainty influence the effectiveness of different explanation styles. The results show that user uncertainty and AI correctness on predictions significantly affected users’ classification decisions considering the analyzed metrics. In both domains (images and text), users relied mainly on the instance to decide. Users were usually overconfident about their choices, and this evidence was more pronounced for text. Furthermore, the inductive style explanations led to overreliance on the AI advice in both domains—it was the most persuasive, even when the AI was incorrect. The abductive and deductive styles have complex effects depending on the domain and the AI uncertainty levels.", "year": 2023, "venue": "ACM Trans. Interact. Intell. Syst.", "authors": [ "Federico Maria Cau", "H. Hauptmann", "L. D. Spano", "N. Tintarev" ], "externalIds": { "DBLP": "journals/tiis/CauHST23", "DOI": "10.1145/3588320", "CorpusId": 257535487 }, "url": "https://www.semanticscholar.org/paper/5222c6b40b1acc8f839c6b19c3877031ab3f2a2f", "referenceCount": 90, "citationCount": 6, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Generation Probabilities Are Not Enough: Exploring the Effectiveness of Uncertainty Highlighting in AI-Powered Code Completions", "abstract": "Large-scale generative models enabled the development of AI-powered code completion tools to assist programmers in writing code. However, much like other AI-powered tools, AI-powered code completions are not always accurate, potentially introducing bugs or even security vulnerabilities into code if not properly detected and corrected by a human programmer. One technique that has been proposed and implemented to help programmers identify potential errors is to highlight uncertain tokens. However, there have been no empirical studies exploring the effectiveness of this technique-- nor investigating the different and not-yet-agreed-upon notions of uncertainty in the context of generative models. We explore the question of whether conveying information about uncertainty enables programmers to more quickly and accurately produce code when collaborating with an AI-powered code completion tool, and if so, what measure of uncertainty best fits programmers' needs. Through a mixed-methods study with 30 programmers, we compare three conditions: providing the AI system's code completion alone, highlighting tokens with the lowest likelihood of being generated by the underlying generative model, and highlighting tokens with the highest predicted likelihood of being edited by a programmer. We find that highlighting tokens with the highest predicted likelihood of being edited leads to faster task completion and more targeted edits, and is subjectively preferred by study participants. In contrast, highlighting tokens according to their probability of being generated does not provide any benefit over the baseline with no highlighting. We further explore the design space of how to convey uncertainty in AI-powered code completion tools, and find that programmers prefer highlights that are granular, informative, interpretable, and not overwhelming.", "year": 2023, "venue": "arXiv.org", "authors": [ "Helena Vasconcelos", "Gagan Bansal", "Adam Fourney", "Q. Liao", "Jennifer Wortman Vaughan" ], "externalIds": { "DBLP": "journals/corr/abs-2302-07248", "ArXiv": "2302.07248", "DOI": "10.48550/arXiv.2302.07248", "CorpusId": 256846746 }, "url": "https://www.semanticscholar.org/paper/7c817fa57edd087820d3b4d16e4d1f40d4cb5200", "referenceCount": 61, "citationCount": 26, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Appropriate Reliance on AI Advice: Conceptualization and the Effect of Explanations", "abstract": "AI advice is becoming increasingly popular, e.g., in investment and medical treatment decisions. As this advice is typically imperfect, decision-makers have to exert discretion as to whether actually follow that advice: they have to “appropriately” rely on correct and turn down incorrect advice. However, current research on appropriate reliance still lacks a common definition as well as an operational measurement concept. Additionally, no in-depth behavioral experiments have been conducted that help understand the factors influencing this behavior. In this paper, we propose Appropriateness of Reliance (AoR) as an underlying, quantifiable two-dimensional measurement concept. We develop a research model that analyzes the effect of providing explanations for AI advice. In an experiment with 200 participants, we demonstrate how these explanations influence the AoR, and, thus, the effectiveness of AI advice. Our work contributes fundamental concepts for the analysis of reliance behavior and the purposeful design of AI advisors.", "year": 2023, "venue": "International Conference on Intelligent User Interfaces", "authors": [ "Max Schemmer", "Niklas Kühl", "Carina Benz", "Andrea Bartos", "G. Satzger" ], "externalIds": { "ArXiv": "2302.02187", "DBLP": "conf/iui/SchemmerKBBS23", "DOI": "10.1145/3581641.3584066", "CorpusId": 256616418 }, "url": "https://www.semanticscholar.org/paper/95f751e8535a6d7f8fe733ab4729f738ef0566c4", "referenceCount": 79, "citationCount": 43, "influentialCitationCount": 6, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Rams, hounds and white boxes: Investigating human-AI collaboration protocols in medical diagnosis", "abstract": null, "year": 2023, "venue": "Artif. Intell. Medicine", "authors": [ "F. Cabitza", "Andrea Campagner", "Luca Ronzio", "M. Cameli", "G. Mandoli", "M. C. Pastore", "L. Sconfienza", "Duarte Folgado", "M. Barandas", "Hugo Gamboa" ], "externalIds": { "DBLP": "journals/artmed/CabitzaCRCMPSFB23", "DOI": "10.1016/j.artmed.2023.102506", "CorpusId": 256724279, "PubMed": "36990586" }, "url": "https://www.semanticscholar.org/paper/756b3334fb2b865894dfe9d37d348ba6140751ae", "referenceCount": 58, "citationCount": 26, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Decision control and explanations in human-AI collaboration: Improving user perceptions and compliance", "abstract": null, "year": 2023, "venue": "Computers in Human Behavior", "authors": [ "Monika Westphal", "Michael Vössing", "G. Satzger", "G. Yom-Tov", "A. Rafaeli" ], "externalIds": { "DBLP": "journals/chb/WestphalVSYR23", "DOI": "10.1016/j.chb.2023.107714", "CorpusId": 256966039 }, "url": "https://www.semanticscholar.org/paper/4c264051587938fcb0843e71d9081d4920e8ddf8", "referenceCount": 83, "citationCount": 14, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Understanding the Role of Human Intuition on Reliance in Human-AI Decision-Making with Explanations", "abstract": "AI explanations are often mentioned as a way to improve human-AI decision-making, but empirical studies have not found consistent evidence of explanations' effectiveness and, on the contrary, suggest that they can increase overreliance when the AI system is wrong. While many factors may affect reliance on AI support, one important factor is how decision-makers reconcile their own intuition---beliefs or heuristics, based on prior knowledge, experience, or pattern recognition, used to make judgments---with the information provided by the AI system to determine when to override AI predictions. We conduct a think-aloud, mixed-methods study with two explanation types (feature- and example-based) for two prediction tasks to explore how decision-makers' intuition affects their use of AI predictions and explanations, and ultimately their choice of when to rely on AI. Our results identify three types of intuition involved in reasoning about AI predictions and explanations: intuition about the task outcome, features, and AI limitations. Building on these, we summarize three observed pathways for decision-makers to apply their own intuition and override AI predictions. We use these pathways to explain why (1) the feature-based explanations we used did not improve participants' decision outcomes and increased their overreliance on AI, and (2) the example-based explanations we used improved decision-makers' performance over feature-based explanations and helped achieve complementary human-AI performance. Overall, our work identifies directions for further development of AI decision-support systems and explanation methods that help decision-makers effectively apply their intuition to achieve appropriate reliance on AI.", "year": 2023, "venue": "Proc. ACM Hum. Comput. Interact.", "authors": [ "Valerie Chen", "Q. Liao", "Jennifer Wortman Vaughan", "Gagan Bansal" ], "externalIds": { "DBLP": "journals/corr/abs-2301-07255", "ArXiv": "2301.07255", "DOI": "10.1145/3610219", "CorpusId": 255998499 }, "url": "https://www.semanticscholar.org/paper/e632d642dc115a7f4cb7d881481ed61e70200af8", "referenceCount": 95, "citationCount": 57, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Explanations, Fairness, and Appropriate Reliance in Human-AI Decision-Making", "abstract": "In this work, we study the effects of feature-based explanations on distributive fairness of AI-assisted decisions, specifically focusing on the task of predicting occupations from short textual bios. We also investigate how any effects are mediated by humans’ fairness perceptions and their reliance on AI recommendations. Our findings show that explanations influence fairness perceptions, which, in turn, relate to humans’ tendency to adhere to AI recommendations. However, we see that such explanations do not enable humans to discern correct and incorrect AI recommendations. Instead, we show that they may affect reliance irrespective of the correctness of AI recommendations. Depending on which features an explanation highlights, this can foster or hinder distributive fairness: when explanations highlight features that are task-irrelevant and evidently associated with the sensitive attribute, this prompts overrides that counter AI recommendations that align with gender stereotypes. Meanwhile, if explanations appear task-relevant, this induces reliance behavior that reinforces stereotype-aligned errors. These results imply that feature-based explanations are not a reliable mechanism to improve distributive fairness.", "year": 2022, "venue": "International Conference on Human Factors in Computing Systems", "authors": [ "Jakob Schoeffer", "Maria De-Arteaga", "N. Kuehl" ], "externalIds": { "DBLP": "conf/chi/SchoefferD024", "ArXiv": "2209.11812", "DOI": "10.1145/3613904.3642621", "CorpusId": 252531872 }, "url": "https://www.semanticscholar.org/paper/c0bb739a0f5c68d4115c1b73a16070a27ec1039b", "referenceCount": 136, "citationCount": 19, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Advancing Human-AI Complementarity: The Impact of User Expertise and Algorithmic Tuning on Joint Decision Making", "abstract": "Human-AI collaboration for decision-making strives to achieve team performance that exceeds the performance of humans or AI alone. However, many factors can impact success of Human-AI teams, including a user’s domain expertise, mental models of an AI system, trust in recommendations, and more. This article reports on a study that examines users’ interactions with three simulated algorithmic models, all with equivalent accuracy rates but each tuned differently in terms of true positive and true negative rates. Our study examined user performance in a non-trivial blood vessel labeling task where participants indicated whether a given blood vessel was flowing or stalled. Users completed 140 trials across multiple stages, first without an AI and then with recommendations from an AI-Assistant. Although all users had prior experience with the task, their levels of proficiency varied widely. Our results demonstrated that while recommendations from an AI-Assistant can aid in users’ decision making, several underlying factors, including user base expertise and complementary human-AI tuning, significantly impact the overall team performance. First, users’ base performance matters, particularly in comparison to the performance level of the AI. Novice users improved, but not to the accuracy level of the AI. Highly proficient users were generally able to discern when they should follow the AI recommendation and typically maintained or improved their performance. Mid-performers, who had a similar level of accuracy to the AI, were most variable in terms of whether the AI recommendations helped or hurt their performance. Second, tuning an AI algorithm to complement users’ strengths and weaknesses also significantly impacted users’ performance. For example, users in our study were better at detecting flowing blood vessels, so when the AI was tuned to reduce false negatives (at the expense of increasing false positives), users were able to reject those recommendations more easily and improve in accuracy. Finally, users’ perception of the AI’s performance relative to their own performance had an impact on whether users’ accuracy improved when given recommendations from the AI. Overall, this work reveals important insights on the complex interplay of factors influencing Human-AI collaboration and provides recommendations on how to design and tune AI algorithms to complement users in decision-making tasks.", "year": 2022, "venue": "ACM Trans. Comput. Hum. Interact.", "authors": [ "K. Inkpen", "Shreya Chappidi", "Keri Mallari", "Besmira Nushi", "Divya Ramesh", "Pietro Michelucci", "Vani Mandava", "Libuvse Hannah Vepvrek", "Gabrielle Quinn" ], "externalIds": { "DBLP": "journals/corr/abs-2208-07960", "ArXiv": "2208.07960", "DOI": "10.1145/3534561", "CorpusId": 251622535 }, "url": "https://www.semanticscholar.org/paper/80f1fd898ee1428a342d08377c7cf0c23b17c933", "referenceCount": 66, "citationCount": 24, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Grounded Copilot: How Programmers Interact with Code-Generating Models", "abstract": "Powered by recent advances in code-generating models, AI assistants like Github Copilot promise to change the face of programming forever. But what is this new face of programming? We present the first grounded theory analysis of how programmers interact with Copilot, based on observing 20 participants—with a range of prior experience using the assistant—as they solve diverse programming tasks across four languages. Our main finding is that interactions with programming assistants are bimodal: in acceleration mode, the programmer knows what to do next and uses Copilot to get there faster; in exploration mode, the programmer is unsure how to proceed and uses Copilot to explore their options. Based on our theory, we provide recommendations for improving the usability of future AI programming assistants.", "year": 2022, "venue": "Proc. ACM Program. Lang.", "authors": [ "Shraddha Barke", "M. James", "N. Polikarpova" ], "externalIds": { "DBLP": "journals/pacmpl/BarkeJP23", "ArXiv": "2206.15000", "DOI": "10.1145/3586030", "CorpusId": 250144196 }, "url": "https://www.semanticscholar.org/paper/8dd412cd31592ba633b5dac8b2e7b4c679ec1c0a", "referenceCount": 67, "citationCount": 212, "influentialCitationCount": 24, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "GitHub Copilot AI pair programmer: Asset or Liability?", "abstract": "Automatic program synthesis is a long-lasting dream in software engineering. Recently, a promising Deep Learning (DL) based solution, called Copilot, has been proposed by OpenAI and Microsoft as an industrial product. Although some studies evaluate the correctness of Copilot solutions and report its issues, more empirical evaluations are necessary to understand how developers can benefit from it effectively. In this paper, we study the capabilities of Copilot in two different programming tasks: (i) generating (and reproducing) correct and efficient solutions for fundamental algorithmic problems, and (ii) comparing Copilot's proposed solutions with those of human programmers on a set of programming tasks. For the former, we assess the performance and functionality of Copilot in solving selected fundamental problems in computer science, like sorting and implementing data structures. In the latter, a dataset of programming problems with human-provided solutions is used. The results show that Copilot is capable of providing solutions for almost all fundamental algorithmic problems, however, some solutions are buggy and non-reproducible. Moreover, Copilot has some difficulties in combining multiple methods to generate a solution. Comparing Copilot to humans, our results show that the correct ratio of humans' solutions is greater than Copilot's suggestions, while the buggy solutions generated by Copilot require less effort to be repaired.", "year": 2022, "venue": "Journal of Systems and Software", "authors": [ "Arghavan Moradi Dakhel", "Vahid Majdinasab", "Amin Nikanjam", "Foutse Khomh", "M. Desmarais", "Z. Jiang" ], "externalIds": { "ArXiv": "2206.15331", "DBLP": "journals/jss/DakhelMNKDJ23", "DOI": "10.48550/arXiv.2206.15331", "CorpusId": 250144223 }, "url": "https://www.semanticscholar.org/paper/d6954c43aa1ca197319c45d3988bc8fcec3de976", "referenceCount": 66, "citationCount": 213, "influentialCitationCount": 7, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Trust in Human-AI Interaction: Scoping Out Models, Measures, and Methods", "abstract": "Trust has emerged as a key factor in people's interactions with AI-infused systems. Yet, little is known about what models of trust have been used and for what systems: robots, virtual characters, smart vehicles, decision aids, or others. Moreover, there is yet no known standard approach to measuring trust in AI. This scoping review maps out the state of affairs on trust in human-AI interaction (HAII) from the perspectives of models, measures, and methods. Findings suggest that trust is an important and multi-faceted topic of study within HAII contexts. However, most work is under-theorized and under-reported, generally not using established trust models and missing details about methods, especially Wizard of Oz. We offer several targets for systematic review work as well as a research agenda for combining the strengths and addressing the weaknesses of the current literature.", "year": 2022, "venue": "CHI Extended Abstracts", "authors": [ "Takane Ueno", "Yuto Sawa", "Yeongdae Kim", "Jacqueline Urakami", "Hiroki Oura", "Katie Seaborn" ], "externalIds": { "ArXiv": "2205.00189", "DBLP": "conf/chi/UenoSKUOS22", "DOI": "10.1145/3491101.3519772", "CorpusId": 248420019 }, "url": "https://www.semanticscholar.org/paper/0f6263af43bd629313164c819b1d1d3a9d97632d", "referenceCount": 60, "citationCount": 33, "influentialCitationCount": 3, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "On the Influence of Explainable AI on Automation Bias", "abstract": "Artificial intelligence (AI) is gaining momentum, and its importance for the future of work in many areas, such as medicine and banking, is continuously rising. However, insights on the effective collaboration of humans and AI are still rare. Typically, AI supports humans in decision-making by addressing human limitations. However, it may also evoke human bias, especially in the form of automation bias as an over-reliance on AI advice. We aim to shed light on the potential to influence automation bias by explainable AI (XAI). In this pre-test, we derive a research model and describe our study design. Subsequentially, we conduct an online experiment with regard to hotel review classifications and discuss first results. We expect our research to contribute to the design and development of safe hybrid intelligence systems.", "year": 2022, "venue": "European Conference on Information Systems", "authors": [ "Max Schemmer", "Niklas Kühl", "Carina Benz", "G. Satzger" ], "externalIds": { "DBLP": "journals/corr/abs-2204-08859", "ArXiv": "2204.08859", "DOI": "10.48550/arXiv.2204.08859", "CorpusId": 248239725 }, "url": "https://www.semanticscholar.org/paper/3eb7a8b2b674a5b747c1b5fcd017480c62f79bbd", "referenceCount": 53, "citationCount": 22, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Explainability pitfalls: Beyond dark patterns in explainable AI", "abstract": null, "year": 2021, "venue": "Patterns", "authors": [ "Upol Ehsan", "Mark O. Riedl" ], "externalIds": { "DBLP": "journals/patterns/EhsanR24a", "PubMedCentral": "11240172", "ArXiv": "2109.12480", "DOI": "10.1016/j.patter.2024.100971", "CorpusId": 237940863, "PubMed": "39005480" }, "url": "https://www.semanticscholar.org/paper/4abb90edf2ec4045ae62cf6e25725043209bf57b", "referenceCount": 76, "citationCount": 40, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "On the Opportunities and Risks of Foundation Models", "abstract": "AI is undergoing a paradigm shift with the rise of models (e.g., BERT, DALL-E, GPT-3) that are trained on broad data at scale and are adaptable to a wide range of downstream tasks. We call these models foundation models to underscore their critically central yet incomplete character. This report provides a thorough account of the opportunities and risks of foundation models, ranging from their capabilities (e.g., language, vision, robotics, reasoning, human interaction) and technical principles(e.g., model architectures, training procedures, data, systems, security, evaluation, theory) to their applications (e.g., law, healthcare, education) and societal impact (e.g., inequity, misuse, economic and environmental impact, legal and ethical considerations). Though foundation models are based on standard deep learning and transfer learning, their scale results in new emergent capabilities,and their effectiveness across so many tasks incentivizes homogenization. Homogenization provides powerful leverage but demands caution, as the defects of the foundation model are inherited by all the adapted models downstream. Despite the impending widespread deployment of foundation models, we currently lack a clear understanding of how they work, when they fail, and what they are even capable of due to their emergent properties. To tackle these questions, we believe much of the critical research on foundation models will require deep interdisciplinary collaboration commensurate with their fundamentally sociotechnical nature.", "year": 2021, "venue": "arXiv.org", "authors": [ "Rishi Bommasani", "Drew A. Hudson", "E. Adeli", "R. Altman", "Simran Arora", "Sydney von Arx", "Michael S. Bernstein", "J. Bohg", "Antoine Bosselut", "E. Brunskill", "Erik Brynjolfsson", "S. Buch", "Dallas Card", "Rodrigo Castellon", "Niladri S. Chatterji", "Annie S. Chen", "Kathleen A. Creel", "Jared Davis", "Dora Demszky", "Chris Donahue", "M. Doumbouya", "Esin Durmus", "Stefano Ermon", "J. Etchemendy", "Kawin Ethayarajh", "L. Fei-Fei", "Chelsea Finn", "Trevor Gale", "Lauren Gillespie", "Karan Goel", "Noah D. Goodman", "S. Grossman", "Neel Guha", "Tatsunori Hashimoto", "Peter Henderson", "John Hewitt", "Daniel E. Ho", "Jenny Hong", "Kyle Hsu", "Jing Huang", "Thomas F. Icard", "Saahil Jain", "Dan Jurafsky", "Pratyusha Kalluri", "Siddharth Karamcheti", "G. Keeling", "Fereshte Khani", "O. Khattab", "Pang Wei Koh", "M. Krass", "Ranjay Krishna", "Rohith Kuditipudi", "Ananya Kumar", "Faisal Ladhak", "Mina Lee", "Tony Lee", "J. Leskovec", "Isabelle Levent", "Xiang Lisa Li", "Xuechen Li", "Tengyu Ma", "Ali Malik", "Christopher D. Manning", "Suvir Mirchandani", "E. Mitchell", "Zanele Munyikwa", "Suraj Nair", "A. Narayan", "D. Narayanan", "Benjamin Newman", "Allen Nie", "Juan Carlos Niebles", "H. Nilforoshan", "J. Nyarko", "Giray Ogut", "Laurel J. Orr", "Isabel Papadimitriou", "J. Park", "C. Piech", "Eva Portelance", "Christopher Potts", "Aditi Raghunathan", "Robert Reich", "Hongyu Ren", "Frieda Rong", "Yusuf Roohani", "Camilo Ruiz", "Jack Ryan", "Christopher R'e", "Dorsa Sadigh", "Shiori Sagawa", "Keshav Santhanam", "Andy Shih", "K. Srinivasan", "Alex Tamkin", "Rohan Taori", "A. Thomas", "Florian Tramèr", "Rose E. Wang", "William Wang", "Bohan Wu", "Jiajun Wu", "Yuhuai Wu", "Sang Michael Xie", "Michihiro Yasunaga", "Jiaxuan You", "M. Zaharia", "Michael Zhang", "Tianyi Zhang", "Xikun Zhang", "Yuhui Zhang", "Lucia Zheng", "Kaitlyn Zhou", "Percy Liang" ], "externalIds": { "ArXiv": "2108.07258", "DBLP": "journals/corr/abs-2108-07258", "CorpusId": 237091588 }, "url": "https://www.semanticscholar.org/paper/76e9e2ec3de437ffb30d8b7b629f7fe3e61de5c2", "referenceCount": 0, "citationCount": 3226, "influentialCitationCount": 144, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Interpretable Machine Learning: Fundamental Principles and 10 Grand Challenges", "abstract": "Interpretability in machine learning (ML) is crucial for high stakes decisions and troubleshooting. In this work, we provide fundamental principles for interpretable ML, and dispel common misunderstandings that dilute the importance of this crucial topic. We also identify 10 technical challenge areas in interpretable machine learning and provide history and background on each problem. Some of these problems are classically important, and some are recent problems that have arisen in the last few years. These problems are: (1) Optimizing sparse logical models such as decision trees; (2) Optimization of scoring systems; (3) Placing constraints into generalized additive models to encourage sparsity and better interpretability; (4) Modern case-based reasoning, including neural networks and matching for causal inference; (5) Complete supervised disentanglement of neural networks; (6) Complete or even partial unsupervised disentanglement of neural networks; (7) Dimensionality reduction for data visualization; (8) Machine learning models that can incorporate physics and other generative or causal constraints; (9) Characterization of the\"Rashomon set\"of good models; and (10) Interpretable reinforcement learning. This survey is suitable as a starting point for statisticians and computer scientists interested in working in interpretable machine learning.", "year": 2021, "venue": "Statistics Survey", "authors": [ "C. Rudin", "Chaofan Chen", "Zhi Chen", "Haiyang Huang", "Lesia Semenova", "Chudi Zhong" ], "externalIds": { "ArXiv": "2103.11251", "DBLP": "journals/corr/abs-2103-11251", "DOI": "10.1214/21-ss133", "CorpusId": 232307437 }, "url": "https://www.semanticscholar.org/paper/256db9dba1978f004a67c86ffc321563b1aee79a", "referenceCount": 347, "citationCount": 510, "influentialCitationCount": 27, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Human-Centric AI: The Symbiosis of Human and Artificial Intelligence", "abstract": "Well-evidenced advances of data-driven complex machine learning approaches emerging within the so-called second wave of artificial intelligence (AI) fostered the exploration of possible AI applications in various domains and aspects of human life, practices, and society [...].", "year": 2021, "venue": "Entropy", "authors": [ "D. Horvatić", "T. Lipić" ], "externalIds": { "DBLP": "journals/entropy/HorvaticL21", "PubMedCentral": "7998306", "DOI": "10.3390/e23030332", "CorpusId": 232384263, "PubMed": "33799841" }, "url": "https://www.semanticscholar.org/paper/eec7a6b0094a890cdc7e9e8cea8e1a9de745cd9d", "referenceCount": 13, "citationCount": 14, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "Expanding Explainability: Towards Social Transparency in AI systems", "abstract": "As AI-powered systems increasingly mediate consequential decision-making, their explainability is critical for end-users to take informed and accountable actions. Explanations in human-human interactions are socially-situated. AI systems are often socio-organizationally embedded. However, Explainable AI (XAI) approaches have been predominantly algorithm-centered. We take a developmental step towards socially-situated XAI by introducing and exploring Social Transparency (ST), a sociotechnically informed perspective that incorporates the socio-organizational context into explaining AI-mediated decision-making. To explore ST conceptually, we conducted interviews with 29 AI users and practitioners grounded in a speculative design scenario. We suggested constitutive design elements of ST and developed a conceptual framework to unpack ST’s effect and implications at the technical, decision-making, and organizational level. The framework showcases how ST can potentially calibrate trust in AI, improve decision-making, facilitate organizational collective actions, and cultivate holistic explainability. Our work contributes to the discourse of Human-Centered XAI by expanding the design space of XAI.", "year": 2021, "venue": "International Conference on Human Factors in Computing Systems", "authors": [ "Upol Ehsan", "Q. Liao", "Michael J. Muller", "Mark O. Riedl", "Justin D. Weisz" ], "externalIds": { "DBLP": "conf/chi/EhsanLMRW21", "ArXiv": "2101.04719", "DOI": "10.1145/3411764.3445188", "CorpusId": 231592467 }, "url": "https://www.semanticscholar.org/paper/c8965761083d80ff762ce76c08df92d66e01f37d", "referenceCount": 119, "citationCount": 275, "influentialCitationCount": 21, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Sequential Explanations with Mental Model-Based Policies", "abstract": "The act of explaining across two parties is a feedback loop, where one provides information on what needs to be explained and the other provides an explanation relevant to this information. We apply a reinforcement learning framework which emulates this format by providing explanations based on the explainee's current mental model. We conduct novel online human experiments where explanations generated by various explanation methods are selected and presented to participants, using policies which observe participants' mental models, in order to optimize an interpretability proxy. Our results suggest that mental model-based policies (anchored in our proposed state representation) may increase interpretability over multiple sequential explanations, when compared to a random selection baseline. This work provides insight into how to select explanations which increase relevant information for users, and into conducting human-grounded experimentation to understand interpretability.", "year": 2020, "venue": "arXiv.org", "authors": [ "A. Yeung", "Shalmali Joshi", "J. Williams", "Frank Rudzicz" ], "externalIds": { "ArXiv": "2007.09028", "DBLP": "journals/corr/abs-2007-09028", "MAG": "3042234123", "CorpusId": 220633388 }, "url": "https://www.semanticscholar.org/paper/c26be717c6af5709eb9668bd4c597f48b1ef65a4", "referenceCount": 36, "citationCount": 15, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Does the Whole Exceed its Parts? The Effect of AI Explanations on Complementary Team Performance", "abstract": "Many researchers motivate explainable AI with studies showing that human-AI team performance on decision-making tasks improves when the AI explains its recommendations. However, prior studies observed improvements from explanations only when the AI, alone, outperformed both the human and the best team. Can explanations help lead to complementary performance, where team accuracy is higher than either the human or the AI working solo? We conduct mixed-method user studies on three datasets, where an AI with accuracy comparable to humans helps participants solve a task (explaining itself in some conditions). While we observed complementary improvements from AI augmentation, they were not increased by explanations. Rather, explanations increased the chance that humans will accept the AI’s recommendation, regardless of its correctness. Our result poses new challenges for human-centered AI: Can we develop explanatory approaches that encourage appropriate trust in AI, and therefore help generate (or improve) complementary performance?", "year": 2020, "venue": "International Conference on Human Factors in Computing Systems", "authors": [ "Gagan Bansal", "Tongshuang Sherry Wu", "Joyce Zhou", "Raymond Fok", "Besmira Nushi", "Ece Kamar", "Marco Tulio Ribeiro", "Daniel S. Weld" ], "externalIds": { "DBLP": "conf/chi/BansalWZFNKRW21", "ArXiv": "2006.14779", "MAG": "3037634279", "DOI": "10.1145/3411764.3445717", "CorpusId": 220128138 }, "url": "https://www.semanticscholar.org/paper/ebcbbb8fe297940d79b17aeb6d46bedff9db7fec", "referenceCount": 101, "citationCount": 441, "influentialCitationCount": 69, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Does Explainable Artificial Intelligence Improve Human Decision-Making?", "abstract": "Explainable AI provides insights to users into the why formodel predictions, offering potential for users to better un-derstand and trust a model, and to recognize and correct AIpredictions that are incorrect. Prior research on human andexplainable AI interactions has typically focused on measuressuch as interpretability, trust, and usability of the explanation.There are mixed findings whether explainable AI can improveactual human decision-making and the ability to identify theproblems with the underlying model. Using real datasets, wecompare objective human decision accuracy without AI (con-trol), with an AI prediction (no explanation), and AI predic-tion with explanation. We find providing any kind of AI pre-diction tends to improve user decision accuracy, but no con-clusive evidence that explainable AI has a meaningful impact.Moreover, we observed the strongest predictor for human de-cision accuracy was AI accuracy and that users were some-what able to detect when the AI was correct vs. incorrect, butthis was not significantly affected by including an explana-tion. Our results indicate that, at least in some situations, thewhy information provided in explainable AI may not enhanceuser decision-making, and further research may be needed tounderstand how to integrate explainable AI into real systems.", "year": 2020, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Y. Alufaisan", "L. Marusich", "J. Bakdash", "Yan Zhou", "Murat Kantarcioglu" ], "externalIds": { "MAG": "3036047738", "DBLP": "journals/corr/abs-2006-11194", "ArXiv": "2006.11194", "DOI": "10.31234/osf.io/d4r9t", "CorpusId": 219956102 }, "url": "https://www.semanticscholar.org/paper/e7411483b88a977ff046f444800d808135535f65", "referenceCount": 45, "citationCount": 71, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "The Clever Hans Effect in Anomaly Detection", "abstract": "The 'Clever Hans' effect occurs when the learned model produces correct predictions based on the 'wrong' features. This effect which undermines the generalization capability of an ML model and goes undetected by standard validation techniques has been frequently observed for supervised learning where the training algorithm leverages spurious correlations in the data. The question whether Clever Hans also occurs in unsupervised learning, and in which form, has received so far almost no attention. Therefore, this paper will contribute an explainable AI (XAI) procedure that can highlight the relevant features used by popular anomaly detection models of different type. Our analysis reveals that the Clever Hans effect is widespread in anomaly detection and occurs in many (unexpected) forms. Interestingly, the observed Clever Hans effects are in this case not so much due to the data, but due to the anomaly detection models themselves whose structure makes them unable to detect the truly relevant features, even though vast amounts of data points are available. Overall, our work contributes a warning against an unrestrained use of existing anomaly detection models in practical applications, but it also points at a possible way out of the Clever Hans dilemma, specifically, by allowing multiple anomaly models to mutually cancel their individual structural weaknesses to jointly produce a better and more trustworthy anomaly detector.", "year": 2020, "venue": "arXiv.org", "authors": [ "Jacob R. Kauffmann", "Lukas Ruff", "G. Montavon", "Klaus-Robert Muller" ], "externalIds": { "MAG": "3035937807", "ArXiv": "2006.10609", "DBLP": "journals/corr/abs-2006-10609", "CorpusId": 219792725 }, "url": "https://www.semanticscholar.org/paper/d2e6ad4e474666d3d71b92d0892339ffc1c7b972", "referenceCount": 36, "citationCount": 28, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Evaluating Explainable AI: Which Algorithmic Explanations Help Users Predict Model Behavior?", "abstract": "Algorithmic approaches to interpreting machine learning models have proliferated in recent years. We carry out human subject tests that are the first of their kind to isolate the effect of algorithmic explanations on a key aspect of model interpretability, simulatability, while avoiding important confounding experimental factors. A model is simulatable when a person can predict its behavior on new inputs. Through two kinds of simulation tests involving text and tabular data, we evaluate five explanations methods: (1) LIME, (2) Anchor, (3) Decision Boundary, (4) a Prototype model, and (5) a Composite approach that combines explanations from each method. Clear evidence of method effectiveness is found in very few cases: LIME improves simulatability in tabular classification, and our Prototype method is effective in counterfactual simulation tests. We also collect subjective ratings of explanations, but we do not find that ratings are predictive of how helpful explanations are. Our results provide the first reliable and comprehensive estimates of how explanations influence simulatability across a variety of explanation methods and data domains. We show that (1) we need to be careful about the metrics we use to evaluate explanation methods, and (2) there is significant room for improvement in current methods.", "year": 2020, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Peter Hase", "Mohit Bansal" ], "externalIds": { "MAG": "3035371891", "ACL": "2020.acl-main.491", "ArXiv": "2005.01831", "DBLP": "conf/acl/HaseB20", "DOI": "10.18653/v1/2020.acl-main.491", "CorpusId": 218502350 }, "url": "https://www.semanticscholar.org/paper/cffd8f947ba03644f62baea31c64c8920b06288e", "referenceCount": 34, "citationCount": 254, "influentialCitationCount": 23, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "COGAM: Measuring and Moderating Cognitive Load in Machine Learning Model Explanations", "abstract": "Interpretable machine learning models trade -off accuracy for simplicity to make explanations more readable and easier to comprehend. Drawing from cognitive psychology theories in graph comprehension, we formalize readability as visual cognitive chunks to measure and moderate the cognitive load in explanation visualizations. We present Cognitive-GAM (COGAM) to generate explanations with desired cognitive load and accuracy by combining the expressive nonlinear generalized additive models (GAM) with simpler sparse linear models. We calibrated visual cognitive chunks with reading time in a user study, characterized the trade-off between cognitive load and accuracy for four datasets in simulation studies, and evaluated COGAM against baselines with users. We found that COGAM can decrease cognitive load without decreasing accuracy and/or increase accuracy without increasing cognitive load. Our framework and empirical measurement instruments for cognitive load will enable more rigorous assessment of the human interpretability of explainable AI.", "year": 2020, "venue": "International Conference on Human Factors in Computing Systems", "authors": [ "Ashraf Abdul", "C. von der Weth", "Mohan S. Kankanhalli", "Brian Y. Lim" ], "externalIds": { "MAG": "3028689275", "DBLP": "conf/chi/AbdulWKL20", "DOI": "10.1145/3313831.3376615", "CorpusId": 218470679 }, "url": "https://www.semanticscholar.org/paper/06f653975801cb286b93cf237a6ac9284fa3d7e6", "referenceCount": 78, "citationCount": 99, "influentialCitationCount": 7, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Machines as teammates: A research agenda on AI in team collaboration", "abstract": null, "year": 2020, "venue": "Information Manager (The)", "authors": [ "Isabella Seeber", "E. Bittner", "R. Briggs", "Triparna de Vreede", "G. Vreede", "Aaron C. Elkins", "R. Maier", "A. B. Merz", "Sarah Oeste-Reiss", "Nils L. Randrup", "G. Schwabe", "Matthias Söllner" ], "externalIds": { "DBLP": "journals/iam/SeeberBBVVEMMOR20", "MAG": "2955853369", "DOI": "10.1016/J.IM.2019.103174", "CorpusId": 198484097 }, "url": "https://www.semanticscholar.org/paper/f779529aeee1b0b7ad9c0f1845523ff7352ba775", "referenceCount": 115, "citationCount": 331, "influentialCitationCount": 21, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Human-Centered Artificial Intelligence: Reliable, Safe & Trustworthy", "abstract": "ABSTRACT Well-designed technologies that offer high levels of human control and high levels of computer automation can increase human performance, leading to wider adoption. The Human-Centered Artificial Intelligence (HCAI) framework clarifies how to (1) design for high levels of human control and high levels of computer automation so as to increase human performance, (2) understand the situations in which full human control or full computer control are necessary, and (3) avoid the dangers of excessive human control or excessive computer control. The methods of HCAI are more likely to produce designs that are Reliable, Safe & Trustworthy (RST). Achieving these goals will dramatically increase human performance, while supporting human self-efficacy, mastery, creativity, and responsibility.", "year": 2020, "venue": "International journal of human computer interactions", "authors": [ "B. Shneiderman" ], "externalIds": { "MAG": "3008620093", "DBLP": "journals/ijhci/Shneiderman20", "ArXiv": "2002.04087", "DOI": "10.1080/10447318.2020.1741118", "CorpusId": 211259461 }, "url": "https://www.semanticscholar.org/paper/e49f67fa5c946ad24afcf59699a9cacf1ca53924", "referenceCount": 74, "citationCount": 511, "influentialCitationCount": 32, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Making deep neural networks right for the right scientific reasons by interacting with their explanations", "abstract": null, "year": 2020, "venue": "Nature Machine Intelligence", "authors": [ "P. Schramowski", "Wolfgang Stammer", "Stefano Teso", "Anna Brugger", "Xiaoting Shao", "Hans-Georg Luigs", "Anne-Katrin Mahlein", "K. Kersting" ], "externalIds": { "DBLP": "journals/natmi/SchramowskiSTBH20", "MAG": "3035989815", "ArXiv": "2001.05371", "DOI": "10.1038/s42256-020-0212-3", "CorpusId": 219955775 }, "url": "https://www.semanticscholar.org/paper/907f087e34fbc16067a2e47bed8c55c4c26946f3", "referenceCount": 78, "citationCount": 183, "influentialCitationCount": 16, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Effect of confidence and explanation on accuracy and trust calibration in AI-assisted decision making", "abstract": "Today, AI is being increasingly used to help human experts make decisions in high-stakes scenarios. In these scenarios, full automation is often undesirable, not only due to the significance of the outcome, but also because human experts can draw on their domain knowledge complementary to the model's to ensure task success. We refer to these scenarios as AI-assisted decision making, where the individual strengths of the human and the AI come together to optimize the joint decision outcome. A key to their success is to appropriately calibrate human trust in the AI on a case-by-case basis; knowing when to trust or distrust the AI allows the human expert to appropriately apply their knowledge, improving decision outcomes in cases where the model is likely to perform poorly. This research conducts a case study of AI-assisted decision making in which humans and AI have comparable performance alone, and explores whether features that reveal case-specific model information can calibrate trust and improve the joint performance of the human and AI. Specifically, we study the effect of showing confidence score and local explanation for a particular prediction. Through two human experiments, we show that confidence score can help calibrate people's trust in an AI model, but trust calibration alone is not sufficient to improve AI-assisted decision making, which may also depend on whether the human can bring in enough unique knowledge to complement the AI's errors. We also highlight the problems in using local explanation for AI-assisted decision making scenarios and invite the research community to explore new approaches to explainability for calibrating human trust in AI.", "year": 2020, "venue": "FAT*", "authors": [ "Yunfeng Zhang", "Q. Liao", "R. Bellamy" ], "externalIds": { "DBLP": "journals/corr/abs-2001-02114", "ArXiv": "2001.02114", "MAG": "2999637955", "DOI": "10.1145/3351095.3372852", "CorpusId": 210023849 }, "url": "https://www.semanticscholar.org/paper/5cc4100a67fd6f2ce3c760655ba7a12f358c7950", "referenceCount": 33, "citationCount": 530, "influentialCitationCount": 52, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "\"How do I fool you?\": Manipulating User Trust via Misleading Black Box Explanations", "abstract": "As machine learning black boxes are increasingly being deployed in critical domains such as healthcare and criminal justice, there has been a growing emphasis on developing techniques for explaining these black boxes in a human interpretable manner. There has been recent concern that a high-fidelity explanation of a black box ML model may not accurately reflect the biases in the black box. As a consequence, explanations have the potential to mislead human users into trusting a problematic black box. In this work, we rigorously explore the notion of misleading explanations and how they influence user trust in black box models. Specifically, we propose a novel theoretical framework for understanding and generating misleading explanations, and carry out a user study with domain experts to demonstrate how these explanations can be used to mislead users. Our work is the first to empirically establish how user trust in black box models can be manipulated via misleading explanations.", "year": 2019, "venue": "AAAI/ACM Conference on AI, Ethics, and Society", "authors": [ "Himabindu Lakkaraju", "O. Bastani" ], "externalIds": { "MAG": "3005073185", "DBLP": "journals/corr/abs-1911-06473", "ArXiv": "1911.06473", "DOI": "10.1145/3375627.3375833", "CorpusId": 208077044 }, "url": "https://www.semanticscholar.org/paper/e656b0376ca11f533ea01097c70f98c0ff655c00", "referenceCount": 22, "citationCount": 225, "influentialCitationCount": 11, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Beyond Accuracy: The Role of Mental Models in Human-AI Team Performance", "abstract": "Decisions made by human-AI teams (e.g., AI-advised humans) are increasingly common in high-stakes domains such as healthcare, criminal justice, and finance. Achieving high team performance depends on more than just the accuracy of the AI system: Since the human and the AI may have different expertise, the highest team performance is often reached when they both know how and when to complement one another. We focus on a factor that is crucial to supporting such complementary: the human’s mental model of the AI capabilities, specifically the AI system’s error boundary (i.e. knowing “When does the AI err?”). Awareness of this lets the human decide when to accept or override the AI’s recommendation. We highlight two key properties of an AI’s error boundary, parsimony and stochasticity, and a property of the task, dimensionality. We show experimentally how these properties affect humans’ mental models of AI capabilities and the resulting team performance. We connect our evaluations to related work and propose goals, beyond accuracy, that merit consideration during model selection and optimization to improve overall human-AI team performance.", "year": 2019, "venue": "AAAI Conference on Human Computation & Crowdsourcing", "authors": [ "Gagan Bansal", "Besmira Nushi", "Ece Kamar", "Walter S. Lasecki", "Daniel S. Weld", "E. Horvitz" ], "externalIds": { "DBLP": "conf/hcomp/BansalNKLWH19", "MAG": "2984353433", "DOI": "10.1609/hcomp.v7i1.5285", "CorpusId": 201685074 }, "url": "https://www.semanticscholar.org/paper/5688b8077117b3aafd54c2e71d959284f4d5c8b9", "referenceCount": 50, "citationCount": 319, "influentialCitationCount": 20, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Explainable Artificial Intelligence (XAI): Concepts, Taxonomies, Opportunities and Challenges toward Responsible AI", "abstract": null, "year": 2019, "venue": "Information Fusion", "authors": [ "Alejandro Barredo Arrieta", "Natalia Díaz Rodríguez", "J. Ser", "Adrien Bennetot", "S. Tabik", "A. Barbado", "S. García", "S. Gil-Lopez", "D. Molina", "Richard Benjamins", "Raja Chatila", "Francisco Herrera" ], "externalIds": { "ArXiv": "1910.10045", "MAG": "2997428643", "DBLP": "journals/inffus/ArrietaRSBTBGGM20", "DOI": "10.1016/j.inffus.2019.12.012", "CorpusId": 204824113 }, "url": "https://www.semanticscholar.org/paper/530a059cb48477ad1e3d4f8f4b153274c8997332", "referenceCount": 430, "citationCount": 4971, "influentialCitationCount": 288, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "How model accuracy and explanation fidelity influence user trust", "abstract": "Machine learning systems have become popular in fields such as marketing, financing, or data mining. While they are highly accurate, complex machine learning systems pose challenges for engineers and users. Their inherent complexity makes it impossible to easily judge their fairness and the correctness of statistically learned relations between variables and classes. Explainable AI aims to solve this challenge by modelling explanations alongside with the classifiers, potentially improving user trust and acceptance. However, users should not be fooled by persuasive, yet untruthful explanations. We therefore conduct a user study in which we investigate the effects of model accuracy and explanation fidelity, i.e. how truthfully the explanation represents the underlying model, on user trust. Our findings show that accuracy is more important for user trust than explainability. Adding an explanation for a classification result can potentially harm trust, e.g. when adding nonsensical explanations. We also found that users cannot be tricked by high-fidelity explanations into having trust for a bad classifier. Furthermore, we found a mismatch between observed (implicit) and self-reported (explicit) trust.", "year": 2019, "venue": "International Joint Conference on Artificial Intelligence", "authors": [ "A. Papenmeier", "G. Englebienne", "C. Seifert" ], "externalIds": { "DBLP": "journals/corr/abs-1907-12652", "MAG": "2969521705", "ArXiv": "1907.12652", "CorpusId": 198985791 }, "url": "https://www.semanticscholar.org/paper/8b869862c71924517fb1b24d98bc95e8143bac06", "referenceCount": 31, "citationCount": 97, "influentialCitationCount": 5, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Understanding the Effect of Accuracy on Trust in Machine Learning Models", "abstract": "We address a relatively under-explored aspect of human-computer interaction: people's abilities to understand the relationship between a machine learning model's stated performance on held-out data and its expected performance post deployment. We conduct large-scale, randomized human-subject experiments to examine whether laypeople's trust in a model, measured in terms of both the frequency with which they revise their predictions to match those of the model and their self-reported levels of trust in the model, varies depending on the model's stated accuracy on held-out data and on its observed accuracy in practice. We find that people's trust in a model is affected by both its stated accuracy and its observed accuracy, and that the effect of stated accuracy can change depending on the observed accuracy. Our work relates to recent research on interpretable machine learning, but moves beyond the typical focus on model internals, exploring a different component of the machine learning pipeline.", "year": 2019, "venue": "International Conference on Human Factors in Computing Systems", "authors": [ "Ming Yin", "Jennifer Wortman Vaughan", "Hanna M. Wallach" ], "externalIds": { "DBLP": "conf/chi/YinVW19", "MAG": "2942157335", "DOI": "10.1145/3290605.3300509", "CorpusId": 109927933 }, "url": "https://www.semanticscholar.org/paper/b4b1cbd74029f46ef9b462290a46111217552761", "referenceCount": 33, "citationCount": 381, "influentialCitationCount": 22, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Will You Accept an Imperfect AI?: Exploring Designs for Adjusting End-user Expectations of AI Systems", "abstract": "AI technologies have been incorporated into many end-user applications. However, expectations of the capabilities of such systems vary among people. Furthermore, bloated expectations have been identified as negatively affecting perception and acceptance of such systems. Although the intelligibility of ML algorithms has been well studied, there has been little work on methods for setting appropriate expectations before the initial use of an AI-based system. In this work, we use a Scheduling Assistant - an AI system for automated meeting request detection in free-text email - to study the impact of several methods of expectation setting. We explore two versions of this system with the same 50% level of accuracy of the AI component but each designed with a different focus on the types of errors to avoid (avoiding False Positives vs. False Negatives). We show that such different focus can lead to vastly different subjective perceptions of accuracy and acceptance. Further, we design expectation adjustment techniques that prepare users for AI imperfections and result in a significant increase in acceptance.", "year": 2019, "venue": "International Conference on Human Factors in Computing Systems", "authors": [ "Rafal Kocielnik", "Saleema Amershi", "Paul N. Bennett" ], "externalIds": { "DBLP": "conf/chi/KocielnikAB19", "MAG": "2918497321", "DOI": "10.1145/3290605.3300641", "CorpusId": 86389282 }, "url": "https://www.semanticscholar.org/paper/5619b00592912299690b8ae5d0d098af9815f606", "referenceCount": 54, "citationCount": 227, "influentialCitationCount": 11, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Designing Theory-Driven User-Centric Explainable AI", "abstract": "From healthcare to criminal justice, artificial intelligence (AI) is increasingly supporting high-consequence human decisions. This has spurred the field of explainable AI (XAI). This paper seeks to strengthen empirical application-specific investigations of XAI by exploring theoretical underpinnings of human decision making, drawing from the fields of philosophy and psychology. In this paper, we propose a conceptual framework for building human-centered, decision-theory-driven XAI based on an extensive review across these fields. Drawing on this framework, we identify pathways along which human cognitive patterns drives needs for building XAI and how XAI can mitigate common cognitive biases. We then put this framework into practice by designing and implementing an explainable clinical diagnostic tool for intensive care phenotyping and conducting a co-design exercise with clinicians. Thereafter, we draw insights into how this framework bridges algorithm-generated explanations and human decision-making theories. Finally, we discuss implications for XAI design and development.", "year": 2019, "venue": "International Conference on Human Factors in Computing Systems", "authors": [ "Danding Wang", "Qian Yang", "Ashraf Abdul", "Brian Y. Lim" ], "externalIds": { "DBLP": "conf/chi/WangYAL19", "MAG": "2942444880", "DOI": "10.1145/3290605.3300831", "CorpusId": 113404661 }, "url": "https://www.semanticscholar.org/paper/f738a685cd06dbc6a59a511b53ef5b0c65d03b47", "referenceCount": 120, "citationCount": 640, "influentialCitationCount": 49, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Psychology" ] }, { "title": "Guidelines for Human-AI Interaction", "abstract": "Advances in artificial intelligence (AI) frame opportunities and challenges for user interface design. Principles for human-AI interaction have been discussed in the human-computer interaction community for over two decades, but more study and innovation are needed in light of advances in AI and the growing uses of AI technologies in human-facing applications. We propose 18 generally applicable design guidelines for human-AI interaction. These guidelines are validated through multiple rounds of evaluation including a user study with 49 design practitioners who tested the guidelines against 20 popular AI-infused products. The results verify the relevance of the guidelines over a spectrum of interaction scenarios and reveal gaps in our knowledge, highlighting opportunities for further research. Based on the evaluations, we believe the set of design guidelines can serve as a resource to practitioners working on the design of applications and features that harness AI technologies, and to researchers interested in the further development of human-AI interaction design principles.", "year": 2019, "venue": "International Conference on Human Factors in Computing Systems", "authors": [ "Saleema Amershi", "Daniel S. Weld", "Mihaela Vorvoreanu", "Adam Fourney", "Besmira Nushi", "Penny Collisson", "Jina Suh", "Shamsi T. Iqbal", "Paul N. Bennett", "K. Quinn", "J. Teevan", "Ruth Kikin-Gil", "E. Horvitz" ], "externalIds": { "DBLP": "conf/chi/AmershiWVFNCSIB19", "MAG": "2916904544", "DOI": "10.1145/3290605.3300233", "CorpusId": 86866942 }, "url": "https://www.semanticscholar.org/paper/ad3cf68bae32d21f25ac142287d4a556155619d2", "referenceCount": 45, "citationCount": 1060, "influentialCitationCount": 96, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Counterfactual Visual Explanations", "abstract": "In this work, we develop a technique to produce counterfactual visual explanations. Given a 'query' image $I$ for which a vision system predicts class $c$, a counterfactual visual explanation identifies how $I$ could change such that the system would output a different specified class $c'$. To do this, we select a 'distractor' image $I'$ that the system predicts as class $c'$ and identify spatial regions in $I$ and $I'$ such that replacing the identified region in $I$ with the identified region in $I'$ would push the system towards classifying $I$ as $c'$. We apply our approach to multiple image classification datasets generating qualitative results showcasing the interpretability and discriminativeness of our counterfactual explanations. To explore the effectiveness of our explanations in teaching humans, we present machine teaching experiments for the task of fine-grained bird classification. We find that users trained to distinguish bird species fare better when given access to counterfactual explanations in addition to training examples.", "year": 2019, "venue": "International Conference on Machine Learning", "authors": [ "Yash Goyal", "Ziyan Wu", "Jan Ernst", "Dhruv Batra", "Devi Parikh", "Stefan Lee" ], "externalIds": { "ArXiv": "1904.07451", "MAG": "2950932789", "DBLP": "journals/corr/abs-1904-07451", "CorpusId": 119309561 }, "url": "https://www.semanticscholar.org/paper/70dbe3e740a5e7927ccce00fd615365b08a6eaae", "referenceCount": 31, "citationCount": 459, "influentialCitationCount": 41, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Automated rationale generation: a technique for explainable AI and its effects on human perceptions", "abstract": "Automated rationale generation is an approach for real-time explanation generation whereby a computational model learns to translate an autonomous agent's internal state and action data representations into natural language. Training on human explanation data can enable agents to learn to generate human-like explanations for their behavior. In this paper, using the context of an agent that plays Frogger, we describe (a) how to collect a corpus of explanations, (b) how to train a neural rationale generator to produce different styles of rationales, and (c) how people perceive these rationales. We conducted two user studies. The first study establishes the plausibility of each type of generated rationale and situates their user perceptions along the dimensions of confidence, humanlike-ness, adequate justification, and understandability. The second study further explores user preferences between the generated rationales with regard to confidence in the autonomous agent, communicating failure and unexpected behavior. Overall, we find alignment between the intended differences in features of the generated rationales and the perceived differences by users. Moreover, context permitting, participants preferred detailed rationales to form a stable mental model of the agent's behavior.", "year": 2019, "venue": "International Conference on Intelligent User Interfaces", "authors": [ "Upol Ehsan", "Pradyumna Tambwekar", "Larry Chan", "Brent Harrison", "Mark O. Riedl" ], "externalIds": { "DBLP": "journals/corr/abs-1901-03729", "MAG": "2909596867", "ArXiv": "1901.03729", "DOI": "10.1145/3301275.3302316", "CorpusId": 58004583 }, "url": "https://www.semanticscholar.org/paper/ef7df5eae54107c013885231eb7af4431f2e6158", "referenceCount": 48, "citationCount": 213, "influentialCitationCount": 14, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "High-performance medicine: the convergence of human and artificial intelligence", "abstract": null, "year": 2019, "venue": "Nature Network Boston", "authors": [ "E. Topol" ], "externalIds": { "MAG": "2908201961", "DOI": "10.1038/s41591-018-0300-7", "CorpusId": 57574615, "PubMed": "30617339" }, "url": "https://www.semanticscholar.org/paper/f134abeaf9bfd41f29b97aec675ec31895bf541d", "referenceCount": 227, "citationCount": 3368, "influentialCitationCount": 92, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "LEAFAGE: Example-based and Feature importance-based Explanations for Black-box ML models", "abstract": "Explainable Artificial Intelligence (XAI) is an emergent research field which tries to cope with the lack of transparency of AI systems, by providing human understandable explanations for the underlying Machine Learning models. This work presents a new explanation extraction method called LEAFAGE. Explanations are provided both in terms of feature importance and of similar classification examples. The latter is a well known strategy for problem solving and justification in social science. LEAFAGE leverages on the fact that the reasoning behind a single decision/prediction for a single data point is generally simpler to understand than the complete model; it produces explanations by generating simpler yet locally accurate approximations of the original model. LEAFAGE performs overall better than the current state of the art in terms of fidelity of the model approximation, in particular when Machine Learning models with non-linear decision boundaries are analysed. LEAFAGE was also tested in terms of usefulness for the user, an aspect still largely overlooked in the scientific literature. Results show interesting and partly counter-intuitive findings, such as the fact that providing no explanation is sometimes better than providing certain kinds of explanation.", "year": 2018, "venue": "IEEE International Conference on Fuzzy Systems", "authors": [ "Ajaya Adhikari", "D. Tax", "R. Satta", "M. Faeth" ], "externalIds": { "MAG": "2967009565", "DBLP": "conf/fuzzIEEE/AdhikariTSF19", "DOI": "10.1109/FUZZ-IEEE.2019.8858846", "CorpusId": 201058546 }, "url": "https://www.semanticscholar.org/paper/2c2fd1a5920b768f50d11ef3e48b0ffee59a6392", "referenceCount": 25, "citationCount": 24, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Multidisciplinary Survey and Framework for Design and Evaluation of Explainable AI Systems", "abstract": "\n The need for interpretable and accountable intelligent systems grows along with the prevalence of\n artificial intelligence\n (\n AI\n ) applications used in everyday life.\n Explainable AI\n (\n XAI\n ) systems are intended to self-explain the reasoning behind system decisions and predictions. Researchers from different disciplines work together to define, design, and evaluate explainable systems. However, scholars from different disciplines focus on different objectives and fairly independent topics of XAI research, which poses challenges for identifying appropriate design and evaluation methodology and consolidating knowledge across efforts. To this end, this article presents a survey and framework intended to share knowledge and experiences of XAI design and evaluation methods across multiple disciplines. Aiming to support diverse design goals and evaluation methods in XAI research, after a thorough review of XAI related papers in the fields of machine learning, visualization, and human-computer interaction, we present a categorization of XAI design goals and evaluation methods. Our categorization presents the mapping between design goals for different XAI user groups and their evaluation methods. From our findings, we develop a framework with step-by-step design guidelines paired with evaluation methods to close the iterative design and evaluation cycles in multidisciplinary XAI teams. Further, we provide summarized ready-to-use tables of evaluation methods and recommendations for different goals in XAI research.\n", "year": 2018, "venue": "ACM Trans. Interact. Intell. Syst.", "authors": [ "Sina Mohseni", "Niloofar Zarei", "E. Ragan" ], "externalIds": { "DBLP": "journals/tiis/MohseniZR21", "MAG": "2992923261", "DOI": "10.1145/3387166", "CorpusId": 208910731 }, "url": "https://www.semanticscholar.org/paper/df5d893638a668dda246e6bad9a5387f86f74bb6", "referenceCount": 236, "citationCount": 486, "influentialCitationCount": 40, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Stop explaining black box machine learning models for high stakes decisions and use interpretable models instead", "abstract": null, "year": 2018, "venue": "Nature Machine Intelligence", "authors": [ "C. Rudin" ], "externalIds": { "MAG": "2974440810", "DBLP": "journals/natmi/Rudin19", "DOI": "10.1038/s42256-019-0048-x", "CorpusId": 182656421, "PubMed": "35603010" }, "url": "https://www.semanticscholar.org/paper/bc00ff34ec7772080c7039b17f7069a2f7df0889", "referenceCount": 86, "citationCount": 4952, "influentialCitationCount": 298, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "On Human Predictions with Explanations and Predictions of Machine Learning Models: A Case Study on Deception Detection", "abstract": "Humans are the final decision makers in critical tasks that involve ethical and legal concerns, ranging from recidivism prediction, to medical diagnosis, to fighting against fake news. Although machine learning models can sometimes achieve impressive performance in these tasks, these tasks are not amenable to full automation. To realize the potential of machine learning for improving human decisions, it is important to understand how assistance from machine learning models affects human performance and human agency. In this paper, we use deception detection as a testbed and investigate how we can harness explanations and predictions of machine learning models to improve human performance while retaining human agency. We propose a spectrum between full human agency and full automation, and develop varying levels of machine assistance along the spectrum that gradually increase the influence of machine predictions. We find that without showing predicted labels, explanations alone slightly improve human performance in the end task. In comparison, human performance is greatly improved by showing predicted labels (>20% relative improvement) and can be further improved by explicitly suggesting strong machine performance. Interestingly, when predicted labels are shown, explanations of machine predictions induce a similar level of accuracy as an explicit statement of strong machine performance. Our results demonstrate a tradeoff between human performance and human agency and show that explanations of machine predictions can moderate this tradeoff.", "year": 2018, "venue": "FAT", "authors": [ "Vivian Lai", "Chenhao Tan" ], "externalIds": { "DBLP": "journals/corr/abs-1811-07901", "MAG": "2901895173", "ArXiv": "1811.07901", "DOI": "10.1145/3287560.3287590", "CorpusId": 53774958 }, "url": "https://www.semanticscholar.org/paper/127c1cb96b73399f429de553d315561504cc7cd4", "referenceCount": 85, "citationCount": 316, "influentialCitationCount": 32, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Physics", "Mathematics" ] }, { "title": "Anchors: High-Precision Model-Agnostic Explanations", "abstract": "\n \n We introduce a novel model-agnostic system that explains the behavior of complex models with high-precision rules called anchors, representing local, \"sufficient\" conditions for predictions. We propose an algorithm to efficiently compute these explanations for any black-box model with high-probability guarantees. We demonstrate the flexibility of anchors by explaining a myriad of different models for different domains and tasks. In a user study, we show that anchors enable users to predict how a model would behave on unseen instances with less effort and higher precision, as compared to existing linear explanations or no explanations.\n \n", "year": 2018, "venue": "AAAI Conference on Artificial Intelligence", "authors": [ "Marco Tulio Ribeiro", "Sameer Singh", "Carlos Guestrin" ], "externalIds": { "MAG": "2788403449", "DBLP": "conf/aaai/Ribeiro0G18", "DOI": "10.1609/aaai.v32i1.11491", "CorpusId": 3366554 }, "url": "https://www.semanticscholar.org/paper/1d8f4f76ac6534627ef8a1c24b9937d8ab2a5c5f", "referenceCount": 30, "citationCount": 1782, "influentialCitationCount": 199, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Bringing Transparency Design into Practice", "abstract": "Intelligent systems, which are on their way to becoming mainstream in everyday products, make recommendations and decisions for users based on complex computations. Researchers and policy makers increasingly raise concerns regarding the lack of transparency and comprehensibility of these computations from the user perspective. Our aim is to advance existing UI guidelines for more transparency in complex real-world design scenarios involving multiple stakeholders. To this end, we contribute a stage-based participatory process for designing transparent interfaces incorporating perspectives of users, designers, and providers, which we developed and validated with a commercial intelligent fitness coach. With our work, we hope to provide guidance to practitioners and to pave the way for a pragmatic approach to transparency in intelligent systems.", "year": 2018, "venue": "International Conference on Intelligent User Interfaces", "authors": [ "Malin Eiband", "H. Schneider", "Mark Bilandzic", "Julian Fazekas-Con", "Mareike Haug", "H. Hussmann" ], "externalIds": { "DBLP": "conf/iui/EibandSBFHH18", "MAG": "2790505917", "DOI": "10.1145/3172944.3172961", "CorpusId": 3788634 }, "url": "https://www.semanticscholar.org/paper/1c367aadbfbeb54b4a00d3f03fa54dea17a9baec", "referenceCount": 59, "citationCount": 158, "influentialCitationCount": 18, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Fairness in Machine Learning: Lessons from Political Philosophy", "abstract": "What does it mean for a machine learning model to be `fair', in terms which can be operationalised? Should fairness consist of ensuring everyone has an equal probability of obtaining some benefit, or should we aim instead to minimise the harms to the least advantaged? Can the relevant ideal be determined by reference to some alternative state of affairs in which a particular social pattern of discrimination does not exist? Various definitions proposed in recent literature make different assumptions about what terms like discrimination and fairness mean and how they can be defined in mathematical terms. Questions of discrimination, egalitarianism and justice are of significant interest to moral and political philosophers, who have expended significant efforts in formalising and defending these central concepts. It is therefore unsurprising that attempts to formalise `fairness' in machine learning contain echoes of these old philosophical debates. This paper draws on existing work in moral and political philosophy in order to elucidate emerging debates about fair machine learning.", "year": 2017, "venue": "FAT", "authors": [ "Reuben Binns" ], "externalIds": { "MAG": "2952507238", "DBLP": "journals/corr/abs-1712-03586", "ArXiv": "1712.03586", "CorpusId": 3315224 }, "url": "https://www.semanticscholar.org/paper/2a944564c2466883ec14a6f6ef461f0e34d21b38", "referenceCount": 42, "citationCount": 404, "influentialCitationCount": 26, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Sociology" ] }, { "title": "Prolific.ac—A subject pool for online experiments", "abstract": null, "year": 2017, "venue": "", "authors": [ "Stefan Palan", "Christian Schitter" ], "externalIds": { "MAG": "2779206865", "DOI": "10.1016/J.JBEF.2017.12.004", "CorpusId": 196127175 }, "url": "https://www.semanticscholar.org/paper/3f9e866a004563ba3b3330f40d0c1ff344ccf2a1", "referenceCount": 31, "citationCount": 2002, "influentialCitationCount": 121, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Counterfactual Explanations Without Opening the Black Box: Automated Decisions and the GDPR", "abstract": "There has been much discussion of the right to explanation in the EU General Data Protection Regulation, and its existence, merits, and disadvantages. Implementing a right to explanation that opens the black box of algorithmic decision-making faces major legal and technical barriers. Explaining the functionality of complex algorithmic decision-making systems and their rationale in specific cases is a technically challenging problem. Some explanations may offer little meaningful information to data subjects, raising questions around their value. Explanations of automated decisions need not hinge on the general public understanding how algorithmic systems function. Even though such interpretability is of great importance and should be pursued, explanations can, in principle, be offered without opening the black box. Looking at explanations as a means to help a data subject act rather than merely understand, one could gauge the scope and content of explanations according to the specific goal or action they are intended to support. From the perspective of individuals affected by automated decision-making, we propose three aims for explanations: (1) to inform and help the individual understand why a particular decision was reached, (2) to provide grounds to contest the decision if the outcome is undesired, and (3) to understand what would need to change in order to receive a desired result in the future, based on the current decision-making model. We assess how each of these goals finds support in the GDPR. We suggest data controllers should offer a particular type of explanation, unconditional counterfactual explanations, to support these three aims. These counterfactual explanations describe the smallest change to the world that can be made to obtain a desirable outcome, or to arrive at the closest possible world, without needing to explain the internal logic of the system.", "year": 2017, "venue": "arXiv.org", "authors": [ "Sandra Wachter", "B. Mittelstadt", "Chris Russell" ], "externalIds": { "MAG": "2765204106", "ArXiv": "1711.00399", "DBLP": "journals/corr/abs-1711-00399", "DOI": "10.2139/ssrn.3063289", "CorpusId": 3995299 }, "url": "https://www.semanticscholar.org/paper/4f309712e705210df5695240a5d5fb53ea1f8641", "referenceCount": 21, "citationCount": 2019, "influentialCitationCount": 279, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Attention by design: Using attention checks to detect inattentive respondents and improve data quality", "abstract": null, "year": 2017, "venue": "", "authors": [ "James D. Abbey", "Margaret G. Meloy" ], "externalIds": { "MAG": "2726093832", "DOI": "10.1016/J.JOM.2017.06.001", "CorpusId": 149261130 }, "url": "https://www.semanticscholar.org/paper/05816c7392d72ba3182161342b36e6e6cb89d182", "referenceCount": 70, "citationCount": 293, "influentialCitationCount": 12, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Explanation in Artificial Intelligence: Insights from the Social Sciences", "abstract": null, "year": 2017, "venue": "Artificial Intelligence", "authors": [ "Tim Miller" ], "externalIds": { "ArXiv": "1706.07269", "MAG": "2953283116", "DBLP": "journals/ai/Miller19", "DOI": "10.1016/J.ARTINT.2018.07.007", "CorpusId": 36024272 }, "url": "https://www.semanticscholar.org/paper/e89dfa306723e8ef031765e9c44e5f6f94fd8fda", "referenceCount": 200, "citationCount": 3692, "influentialCitationCount": 408, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Rationalization: A Neural Machine Translation Approach to Generating Natural Language Explanations", "abstract": "We introduce \\em AI rationalization, an approach for generating explanations of autonomous system behavior as if a human had performed the behavior. We describe a rationalization technique that uses neural machine translation to translate internal state-action representations of an autonomous agent into natural language. We evaluate our technique in the Frogger game environment, training an autonomous game playing agent to rationalize its action choices using natural language. A natural language training corpus is collected from human players thinking out loud as they play the game. We motivate the use of rationalization as an approach to explanation generation and show the results of two experiments evaluating the effectiveness of rationalization. Results of these evaluations show that neural machine translation is able to accurately generate rationalizations that describe agent behavior, and that rationalizations are more satisfying to humans than other alternative methods of explanation.", "year": 2017, "venue": "AAAI/ACM Conference on AI, Ethics, and Society", "authors": [ "Brent Harrison", "Upol Ehsan", "Mark O. Riedl" ], "externalIds": { "MAG": "2594164664", "DBLP": "journals/corr/HarrisonER17", "ArXiv": "1702.07826", "DOI": "10.1145/3278721.3278736", "CorpusId": 236152083 }, "url": "https://www.semanticscholar.org/paper/acbea6c0a445fc61963c8587c6021dd5d80c95e3", "referenceCount": 29, "citationCount": 210, "influentialCitationCount": 12, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Beyond the Turk: Alternative Platforms for Crowdsourcing Behavioral Research", "abstract": "Abstract The success of Amazon Mechanical Turk (MTurk) as an online research platform has come at a price: MTurk has suffered from slowing rates of population replenishment, and growing participant non-naivety. Recently, a number of alternative platforms have emerged, offering capabilities similar to MTurk but providing access to new and more naive populations. After surveying several options, we empirically examined two such platforms, CrowdFlower (CF) and Prolific Academic (ProA). In two studies, we found that participants on both platforms were more naive and less dishonest compared to MTurk participants. Across the three platforms, CF provided the best response rate, but CF participants failed more attention-check questions and did not reproduce known effects replicated on ProA and MTurk. Moreover, ProA participants produced data quality that was higher than CF's and comparable to MTurk's. ProA and CF participants were also much more diverse than participants from MTurk.", "year": 2016, "venue": "", "authors": [ "Eyal Péer", "S. Samat", "L. Brandimarte", "Alessandro Acquisti" ], "externalIds": { "MAG": "2585292421", "DOI": "10.2139/ssrn.2594183", "CorpusId": 120097026 }, "url": "https://www.semanticscholar.org/paper/ca9f3a04a1b6524105f51676abdd198b118a7d18", "referenceCount": 28, "citationCount": 1965, "influentialCitationCount": 86, "isOpenAccess": true, "fieldsOfStudy": [ "Psychology" ] }, { "title": "“Why Should I Trust You?”: Explaining the Predictions of Any Classifier", "abstract": "Despite widespread adoption, machine learning models remain mostly black boxes. Understanding the reasons behind predictions is, however, quite important in assessing trust, which is fundamental if one plans to take action based on a prediction, or when choosing whether to deploy a new model. Such understanding also provides insights into the model, which can be used to transform an untrustworthy model or prediction into a trustworthy one. In this work, we propose LIME, a novel explanation technique that explains the predictions of any classifier in an interpretable and faithful manner, by learning an interpretable model locally varound the prediction. We also propose a method to explain models by presenting representative individual predictions and their explanations in a non-redundant way, framing the task as a submodular optimization problem. We demonstrate the flexibility of these methods by explaining different models for text (e.g. random forests) and image classification (e.g. neural networks). We show the utility of explanations via novel experiments, both simulated and with human subjects, on various scenarios that require trust: deciding if one should trust a prediction, choosing between models, improving an untrustworthy classifier, and identifying why a classifier should not be trusted.", "year": 2016, "venue": "North American Chapter of the Association for Computational Linguistics", "authors": [ "Marco Tulio Ribeiro", "Sameer Singh", "Carlos Guestrin" ], "externalIds": { "ACL": "N16-3020", "MAG": "2951501516", "DBLP": "journals/corr/RibeiroSG16", "ArXiv": "1602.04938", "DOI": "10.1145/2939672.2939778", "CorpusId": 13029170 }, "url": "https://www.semanticscholar.org/paper/c0883f5930a232a9c1ad601c978caede29155979", "referenceCount": 41, "citationCount": 14452, "influentialCitationCount": 1693, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Architectural Style Classification Using Multinomial Latent Logistic Regression", "abstract": null, "year": 2014, "venue": "European Conference on Computer Vision", "authors": [ "Zhe Xu", "D. Tao", "Ya Zhang", "Junjie Wu", "A. Tsoi" ], "externalIds": { "MAG": "50236904", "DBLP": "conf/eccv/XuTZWT14", "DOI": "10.1007/978-3-319-10590-1_39", "CorpusId": 6570946 }, "url": "https://www.semanticscholar.org/paper/bf6fd53680c5ec7b998c60bd75243d5b7cf7f93f", "referenceCount": 20, "citationCount": 91, "influentialCitationCount": 13, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Updating during reading comprehension: why causality matters.", "abstract": "The present set of 7 experiments systematically examined the effectiveness of adding causal explanations to simple refutations in reducing or eliminating the impact of outdated information on subsequent comprehension. The addition of a single causal-explanation sentence to a refutation was sufficient to eliminate any measurable disruption in comprehension caused by the outdated information (Experiment 1) but was not sufficient to eliminate its reactivation (Experiment 2). However, a 3 sentence causal-explanation addition to a refutation eliminated both any measurable disruption in comprehension (Experiment 3) and the reactivation of the outdated information (Experiment 4). A direct comparison between the 1 and 3 causal-explanation conditions provided converging evidence for these findings (Experiment 5). Furthermore, a comparison of the 3 sentence causal-explanation condition with a 3 sentence qualified-elaboration condition demonstrated that even though both conditions were sufficient to eliminate any measurable disruption in comprehension (Experiment 6), only the causal-explanation condition was sufficient to eliminate the reactivation of the outdated information (Experiment 7). These results establish a boundary condition under which outdated information will influence comprehension; they also have broader implications for both the updating process and knowledge revision in general.", "year": 2013, "venue": "Journal of Experimental Psychology. Learning, Memory and Cognition", "authors": [ "Panayiota Kendeou", "Emily R. Smith", "E. J. O'Brien" ], "externalIds": { "MAG": "2313422041", "DOI": "10.1037/a0029468", "CorpusId": 12280888, "PubMed": "22845069" }, "url": "https://www.semanticscholar.org/paper/6cec4be7f015f8998b31cc0ab6f34853afdbfb64", "referenceCount": 47, "citationCount": 145, "influentialCitationCount": 9, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Psychology" ] }, { "title": "Correcting false information in memory: Manipulating the strength of misinformation encoding and its retraction", "abstract": null, "year": 2011, "venue": "Psychonomic Bulletin & Review", "authors": [ "Ullrich K. H. Ecker", "S. Lewandowsky", "Briony Swire", "Darren Chang" ], "externalIds": { "MAG": "2128155832", "DOI": "10.3758/s13423-011-0065-1", "CorpusId": 1664205, "PubMed": "21359617" }, "url": "https://www.semanticscholar.org/paper/3124629de999af7765b49c1bd8df4a95f0c4d3f2", "referenceCount": 37, "citationCount": 261, "influentialCitationCount": 13, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Psychology" ] }, { "title": "Taking advice from intelligent systems: the double-edged sword of explanations", "abstract": "Research on intelligent systems has emphasized the benefits of providing explanations along with recommendations. But can explanations lead users to make incorrect decisions? We explored this question in a controlled experimental study with 18 professional network security analysts doing an incident classification task using a prototype cybersecurity system. The system provided three recommendations on each trial. The recommendations were displayed with explanations (called \"justifications\") or without. On half the trials, one of the recommendations was correct; in the other half none of the recommendations was correct. Users were more accurate with correct recommendations. Although there was no benefit overall of explanation, we found that a segment of the analysts were more accurate with explanations when a correct choice was available but were less accurate with explanations in the absence of a correct choice. We discuss implications of these results for the design of intelligent systems.", "year": 2011, "venue": "International Conference on Intelligent User Interfaces", "authors": [ "Kate Ehrlich", "Susanna E. Kirk", "J. Patterson", "Jamie C. Rasmussen", "Steven I. Ross", "D. Gruen" ], "externalIds": { "DBLP": "conf/iui/EhrlichKPRRG11", "MAG": "2037306653", "DOI": "10.1145/1943403.1943424", "CorpusId": 18311315 }, "url": "https://www.semanticscholar.org/paper/8ba0aa6f04272922c957e36c87c5d2fe50fa0a44", "referenceCount": 27, "citationCount": 37, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The critical role of retrieval practice in long-term retention", "abstract": null, "year": 2011, "venue": "Trends in Cognitive Sciences", "authors": [ "H. Roediger", "A. C. Butler" ], "externalIds": { "MAG": "2111804672", "DOI": "10.1016/j.tics.2010.09.003", "CorpusId": 11014168, "PubMed": "20951630" }, "url": "https://www.semanticscholar.org/paper/5fe7bb49dca35e9b06c3b37bf60fe48ef779903c", "referenceCount": 100, "citationCount": 1335, "influentialCitationCount": 48, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Psychology" ] }, { "title": "Prospering in Dynamically-Competitive Environments: Organizational Capability as Knowledge Integration", "abstract": null, "year": 2009, "venue": "", "authors": [ "M. Zack" ], "externalIds": { "MAG": "3112041062", "DOI": "10.4324/9780080509778-14", "CorpusId": 233500689 }, "url": "https://www.semanticscholar.org/paper/22bfec997428eab8710ca39f5cabcae4be1152b7", "referenceCount": 0, "citationCount": 2114, "influentialCitationCount": 394, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The role of trust in automation reliance", "abstract": null, "year": 2003, "venue": "Int. J. Hum. Comput. Stud.", "authors": [ "Mary T. Dzindolet", "S. Peterson", "Regina A. Pomranky", "L. Pierce", "Hall P. Beck" ], "externalIds": { "DBLP": "journals/ijmms/DzindoletPPPB03", "MAG": "2010158189", "DOI": "10.1016/S1071-5819(03)00038-7", "CorpusId": 29006669 }, "url": "https://www.semanticscholar.org/paper/b0e89da3eb67767b63ca5c23c233238f70263ae7", "referenceCount": 37, "citationCount": 1031, "influentialCitationCount": 78, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Working Knowledge: How Organizations Manage What They Know", "abstract": null, "year": 2000, "venue": "", "authors": [ "M. Bradley" ], "externalIds": { "MAG": "1585075410", "DOI": "10.1108/NLW.2000.101.6.282.4", "CorpusId": 54132309 }, "url": "https://www.semanticscholar.org/paper/79482062c125e00455a7a71b8976b20f6025a463", "referenceCount": 0, "citationCount": 2376, "influentialCitationCount": 255, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Organizing knowledge in the knowledge development cycle", "abstract": "The main aim of the paper is to examine some of the strategies that can be matched to increase the effectiveness of the knowledge development cycle. In manufacturing and operational works, the effectiveness of different organizing strategies to enhance the quality of manufacturing processes and products is well established. In knowledge works, however, we lack such frameworks. Unlike manufacturing and operational processes, knowledge development processes are often chaotic, unstructured, and unsystematic, resulting in intangible products. Therefore, the principles of manufacturing strategies cannot be applied in the knowledge development cycle. In knowledge works, organizing strategies should be defined and initiated based on knowledge development phases (e.g. knowledge creation, knowledge adoption, knowledge distribution, and knowledge review and revision). Each phase, in the knowledge development cycle, needs to be evaluated in context of its characteristics on repetition, standardization, reliability, and specifications.", "year": 2000, "venue": "Journal of Knowledge Management", "authors": [ "G. Bhatt" ], "externalIds": { "MAG": "2030823974", "DBLP": "journals/jkm/Bhatt00", "DOI": "10.1108/13673270010315371", "CorpusId": 35210091 }, "url": "https://www.semanticscholar.org/paper/c27c42b245aab7cfe6f870622d8dfa863f51bbff", "referenceCount": 43, "citationCount": 375, "influentialCitationCount": 27, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A theoretical review of the misinformation effect: Predictions from an activation-based memory model", "abstract": null, "year": 1998, "venue": "", "authors": [ "Michael S. Ayers", "L. Reder" ], "externalIds": { "MAG": "2135614762", "DOI": "10.3758/BF03209454", "CorpusId": 145622629 }, "url": "https://www.semanticscholar.org/paper/7b354da543d1160aa9145a58886fc3d914e90b6a", "referenceCount": 97, "citationCount": 282, "influentialCitationCount": 12, "isOpenAccess": true, "fieldsOfStudy": [ "Psychology" ] }, { "title": "Cognitive Load During Problem Solving: Effects on Learning", "abstract": "Considerable evidence indicates that domain specific knowledge in the form of schemas is the primary factor distinguishing experts from novices in problem-solving skill. Evidence that conventional problem-solving activity is not effective in schema acquisition is also accumulating. It is suggested that a major reason for the ineffectiveness of problem solving as a learning device, is that the cognitive processes required by the two activities overlap insufficiently, and that conventional problem solving in the form of means-ends analysis requires a relatively large amount of cognitive processing capacity which is consequently unavailable for schema acquisition. A computational model and experimental evidence provide support for this contention. Theoretical and practical implications are discussed.", "year": 1988, "venue": "Cognitive Sciences", "authors": [ "J. Sweller" ], "externalIds": { "DBLP": "journals/cogsci/Sweller88", "MAG": "2130736456", "DOI": "10.1207/S15516709COG1202_4", "CorpusId": 9585835 }, "url": "https://www.semanticscholar.org/paper/d88c481743db95687bf9d2861c16cd006f67a0a1", "referenceCount": 36, "citationCount": 6587, "influentialCitationCount": 588, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Psychology" ] }, { "title": "Reconstruction of automobile destruction: An example of the interaction between language and memory", "abstract": null, "year": 1974, "venue": "", "authors": [ "E. Loftus", "J. Palmer" ], "externalIds": { "MAG": "2018290856", "DOI": "10.1016/S0022-5371(74)80011-3", "CorpusId": 143526400 }, "url": "https://www.semanticscholar.org/paper/6cba61868b7cc3689aca04c9541cb884ac02f7ee", "referenceCount": 12, "citationCount": 1561, "influentialCitationCount": 44, "isOpenAccess": false, "fieldsOfStudy": [ "Psychology" ] }, { "title": "Human-AI Complementarity in Hybrid Intelligence Systems: A Structured Literature Review", "abstract": null, "year": 2021, "venue": "Pacific Asia Conference on Information Systems", "authors": [ "Patrick Hemmer", "Max Schemmer", "Michael Vössing", "Niklas Kühl" ], "externalIds": { "DBLP": "conf/pacis/HemmerSVK21", "CorpusId": 237357256 }, "url": "https://www.semanticscholar.org/paper/acb0c1fbf5bee3a3b87b376b2dfed51cda41f17d", "referenceCount": 0, "citationCount": 52, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Towards Design Principles for User-Centric Explainable AI in Fraud Detection", "abstract": null, "year": 2021, "venue": "Interacción", "authors": [ "Douglas Cirqueira", "M. Helfert", "Marija Bezbradica" ], "externalIds": { "DBLP": "conf/hci/CirqueiraHB21", "DOI": "10.1007/978-3-030-77772-2_2", "CorpusId": 236150494 }, "url": "https://www.semanticscholar.org/paper/4bfc5c7f6c79f031a2fc9fcb9cf1dc0a3e1949d2", "referenceCount": 69, "citationCount": 11, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Explainable Artificial Intelligence (XAI): How the Visualization of AI Predictions Affects User Cognitive Load and Confidence", "abstract": null, "year": 2021, "venue": "Information Systems and Neuroscience", "authors": [ "Antoine Hudon", "Théophile Demazure", "A. Karran", "Pierre-Majorique Léger", "S. Sénécal" ], "externalIds": { "DOI": "10.1007/978-3-030-88900-5_27", "CorpusId": 240199397 }, "url": "https://www.semanticscholar.org/paper/6fcdd600a53fe05c83d5c92352a8cef067a4ecfd", "referenceCount": 21, "citationCount": 17, "influentialCitationCount": 2, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "EvaluatingXAI:Acomparisonofrule-basedandexample-based explanations", "abstract": null, "year": 2021, "venue": "Artificial intelligence", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "To trust or to think: cognitive forcing functions can reduce overreliance on AI in AI-assisted decision-making", "abstract": null, "year": 2021, "venue": "Proceedings of the ACM on Human-computer Interaction", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "UnremarkableAI:Fittingintelligentdecisionsupportintocritical,clinicaldecision-making processes", "abstract": null, "year": 2019, "venue": "Proceedings of the 2019 CHI conference on human factors in computing systems", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Fake news, false information and more: Countering human biases", "abstract": null, "year": 2018, "venue": "Institute of Policy Studies (IPS) Working Papers", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "TheICAPframework:Linkingcognitiveengagementtoactivelearningoutcomes", "abstract": null, "year": 2014, "venue": "Educationalpsychologist", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Human Reasoning and Cognitive Science", "abstract": "A new proposal for integrating the employment of formal and empirical methods in the study of human reasoning.", "year": 2008, "venue": "", "authors": [ "K. Stenning", "M. Lambalgen" ], "externalIds": { "DBLP": "books/daglib/0035278", "MAG": "1595235134", "DOI": "10.7551/mitpress/7964.001.0001", "CorpusId": 14439847 }, "url": "https://www.semanticscholar.org/paper/1ce03bdf54afc8b04f9aa71f92f020877c6902fe", "referenceCount": 309, "citationCount": 493, "influentialCitationCount": 37, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Psychology" ] }, { "title": "The knowledge-creating company", "abstract": "How have Japanese companies become world leaders in the automotive and electronics industries, among others? What is the secret of their success? Two leading Japanese business experts, Ikujiro Nonaka and Hirotaka Takeuchi, are the first to tie the success of Japanese companies to their ability to create new knowledge and use it to produce successful products and technologies. In The Knowledge-Creating Company, Nonaka and Takeuchi provide an inside look at how Japanese companies go about creating this new knowledge organizationally. The authors point out that there are two types of knowledge: explicit knowledge, contained in manuals and procedures, and tacit knowledge, learned only by experience, and communicated only indirectly, through metaphor and analogy. U.S. managers focus on explicit knowledge. The Japanese, on the other hand, focus on tacit knowledge. And this, the authors argue, is the key to their success--the Japanese have learned how to transform tacit into explicit knowledge. To explain how this is done--and illuminate Japanese business practices as they do so--the authors range from Greek philosophy to Zen Buddhism, from classical economists to modern management gurus, illustrating the theory of organizational knowledge creation with case studies drawn from such firms as Honda, Canon, Matsushita, NEC, Nissan, 3M, GE, and even the U.S. Marines. For instance, using Matsushita's development of the Home Bakery (the world's first fully automated bread-baking machine for home use), they show how tacit knowledge can be converted to explicit knowledge: when the designers couldn't perfect the dough kneading mechanism, a software programmer apprenticed herself withthe master baker at Osaka International Hotel, gained a tacit understanding of kneading, and then conveyed this information to the engineers. In addition, the authors show that, to create knowledge, the best management style is neither top-down nor bottom-up, but rather what they call \"middle-up-down,\" in which the middle managers form a bridge between the ideals of top management and the chaotic realities of the frontline. As we make the turn into the 21st century, a new society is emerging. Peter Drucker calls it the \"knowledge society,\" one that is drastically different from the \"industrial society,\" and one in which acquiring and applying knowledge will become key competitive factors. Nonaka and Takeuchi go a step further, arguing that creating knowledge will become the key to sustaining a competitive advantage in the future. Because the competitive environment and customer preferences changes constantly, knowledge perishes quickly. With The Knowledge-Creating Company, managers have at their fingertips years of insight from Japanese firms that reveal how to create knowledge continuously, and how to exploit it to make successful new products, services, and systems.", "year": 2008, "venue": "", "authors": [ "野中 郁次郎" ], "externalIds": { "MAG": "658088948", "CorpusId": 106944911 }, "url": "https://www.semanticscholar.org/paper/26abbbc8c25cc25c207026347e1ed991e498c07f", "referenceCount": 0, "citationCount": 6387, "influentialCitationCount": 662, "isOpenAccess": false, "fieldsOfStudy": [ "Engineering" ] }, { "title": "Misinformation", "abstract": null, "year": 2006, "venue": "British Dental Journal", "authors": [ "M. Heliotis" ], "externalIds": { "DOI": "10.1038/sj.bdj.4813381", "CorpusId": 29724391, "PubMed": "16568034" }, "url": "https://www.semanticscholar.org/paper/c14781d7043f314cfb8a56d65a014f995d2a26f5", "referenceCount": 0, "citationCount": 47, "influentialCitationCount": 4, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Sources of the continued influence effect: When misinformation in memory affects later inferences.", "abstract": "Several lines of research have found that information previously encoded into memory can influence inferences and judgments, even when more recent information discredits it. Previous theories have attributed this to difficulties in editing memory—failing to successfully trace out and alter inferences or explanations generated before a correction. However, in Experiments 1A and IB, Ss who had received an immediate correction made as many inferences based on misinformation as Ss who had received the correction later in the account (and presumably had made more inferences requiring editing.) In a 2nd experiment, the availability (Tversky & Kahneman, 1973) of the misinformation within the comprehension context was tested. The results showed that Ss continued to make inferences involving discredited information when it afforded causal structure, but not when only incidentally mentioned or primed during an intervening task. Experiments 3A and 3B found that providing a plausible causal alternative, rather than simply negating misinformation, mitigated the effect. The findings suggest that misinformation can still influence inferences one generates after a correction has occurred; however, providing an alternative that replaces the causal structure it affords can reduce the effects of misinformation.", "year": 1994, "venue": "", "authors": [ "Hollyn M. Johnson", "Colleen Seifert" ], "externalIds": { "MAG": "2012975501", "DOI": "10.1037/0278-7393.20.6.1420", "CorpusId": 143733932 }, "url": "https://www.semanticscholar.org/paper/17936961f3d3cf4487a6c7d37f704e74394891fc", "referenceCount": 42, "citationCount": 457, "influentialCitationCount": 49, "isOpenAccess": false, "fieldsOfStudy": [ "Psychology" ] }, { "title": "Self-Explonations: How Students Study and Use Examples in Learning to Solve Problems", "abstract": "The present paper analyzes the self-generated explanations (from talk-aloud protocols) that “Good” ond “Poor” students produce while studying worked-out exomples of mechanics problems, and their subsequent reliance on examples during problem solving. We find that “Good” students learn with understanding: They generate many explanations which refine and expand the conditions for the action ports of the exomple solutions, ond relate these actions to principles in the text. These self-explanations are guided by accurate monitoring of their own understanding and misunderstanding. Such learning results in example-inde-pendent knowledge and in a better understanding of the principles presented in the text. “Poor” students do not generate sufficient self-explonations, monitor their learning inaccurately, and subsequently rely heovily an examples. We then discuss the role of self-explanations in facilitating problem solving, as well OS the adequacy of current Al models of explanation-based learning to account for these psychological findings.", "year": 1989, "venue": "Cognitive Sciences", "authors": [ "M. Chi", "M. Bassok", "Matthew W. Lewis", "P. Reimann", "R. Glaser" ], "externalIds": { "DBLP": "journals/cogsci/ChiBLRG89", "DOI": "10.1207/s15516709cog1302_1", "CorpusId": 2078404 }, "url": "https://www.semanticscholar.org/paper/9f4beefc9b2a0eb8b92b4b79a878d39163c0d428", "referenceCount": 30, "citationCount": 2272, "influentialCitationCount": 121, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Simple Sequentially Rejective Multiple Test Procedure", "abstract": "This paper presents a simple and widely ap- plicable multiple test procedure of the sequentially rejective type, i.e. hypotheses are rejected one at a tine until no further rejections can be done. It is shown that the test has a prescribed level of significance protection against error of the first kind for any combination of true hypotheses. The power properties of the test and a number of possible applications are also discussed.", "year": 1979, "venue": "", "authors": [ "S. Holm" ], "externalIds": { "MAG": "2121044470", "DOI": "10.2307/4615733", "CorpusId": 122415379 }, "url": "https://www.semanticscholar.org/paper/b0ebbcf713b3ddf3f94325bc58dc39ff76fdc412", "referenceCount": 6, "citationCount": 22206, "influentialCitationCount": 1971, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Semantic integration of verbal information into a visual memory.", "abstract": "A total of 1,242 subjects, in five experiments plus a pilot study, saw a series of slides depicting a single auto-pedestrian accident. The purpose of these experiments was to investigate how information supplied after an event influences a witness's memory for that event. Subjects were exposed to either consistent, misleading, or irrelevant information after the accident event. Misleading information produced less accurate responding on both a yes-no and a two-alternative forced-choice recognition test. Further, misleading information had a larger impact if introduced just prior to a final test rather than immediately after the initial event. The effects of misleading information cannot be accounted for by a simple demand-characteristics explanation. Overall, the results suggest that information to which a witness is exposed after an event, whether that information is consistent or misleading, is integrated into the witness's memory of the event.", "year": 1978, "venue": "Journal of Experimental Psychology Human Learning & Memory", "authors": [ "E. Loftus", "David G. Miller", "Helen J. Burns" ], "externalIds": { "MAG": "2103430676", "DOI": "10.1037/0278-7393.4.1.19", "CorpusId": 9734949, "PubMed": "621467" }, "url": "https://www.semanticscholar.org/paper/3ec201417a7d607678a524dd1e9bbe2278ca81bd", "referenceCount": 9, "citationCount": 1458, "influentialCitationCount": 55, "isOpenAccess": false, "fieldsOfStudy": [ "Psychology", "Medicine", "Materials Science" ] }, { "title": "2024. Explanations considered harmful: The Impact of misleading Explanations on Accuracy in hybrid human-AI decision making", "abstract": null, "year": null, "venue": "World Conference on Explainable Artificial Intelligence", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2023. Explainable artificial intelligence: Evaluating the objective and subjective impacts of xai on human-agent interaction", "abstract": null, "year": null, "venue": "International Journal of Human–Computer Interaction", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2023. Opinion Paper:“So what if ChatGPT wrote it?” Multidisciplinary perspectives on opportunities, challenges and implications of generative conversational AI for research, practice and policy", "abstract": null, "year": null, "venue": "International Journal of Information Management", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2023. Towards meaningful anomaly detection: The effect of counterfactual explanations on the investigation of anomalies in multivariate time series", "abstract": null, "year": null, "venue": "arXiv preprint", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2024. On the Effect of Contextual Information on Human Delegation Behavior in Human-AI collaboration", "abstract": null, "year": null, "venue": "arXiv", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2023. The Landscape of User-centered Misinformation Interventions-A Systematic Literature Review", "abstract": null, "year": null, "venue": "Comput. Surveys", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2024. ExplainingtheUnexplainable:TheImpactofMisleadingExplanations on Trust in Unreliable Predictions for Hardly Assessable Tasks", "abstract": null, "year": null, "venue": "Proceedings of the 32nd ACM Conference on User Modeling, Adaptation and Personalization", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Towards Interpretable End-Stage Renal Disease (ESRD) Prediction: Utilizing Administrative Claims Data with Explainable AI Techniques": { "paper_title": "Towards Interpretable End-Stage Renal Disease (ESRD) Prediction: Utilizing Administrative Claims Data with Explainable AI Techniques", "arxiv_id": "2409.12087v1", "keyword": "explainable ai", "authors": [ "Yubo Li", "Saba Al-Sayouri", "Rema Padman" ], "references": [ { "title": "A Predictive Model for Progression of Chronic Kidney Disease to Kidney Failure Using a Large Administrative Claims Database", "abstract": "Background To create an appropriate chronic kidney disease (CKD) management program, we developed a predictive model to identify patients in a large administrative claims database with CKD stages 3 or 4 who were at high risk for progression to kidney failure. Methods The predictive model was developed and validated utilizing a subset of patients with CKD stages 3 or 4 derived from a large Aetna claims database. The study spanned 36 months, comprised of a 12-month (2015) baseline period and a 24-month (2016–2017) prediction period. All patients were ≥18 years of age and continuously enrolled for 36 months. Multivariate logistic regression was used to develop models. Prediction model performance measures included area under the receiver operating characteristic curve (AUROC), calibration, and gain and lift charts. Results Of the 74,114 patients identified as having CKD stages 3 or 4 during the baseline period, 2476 (3.3%) had incident kidney failure during the prediction period. The predictive model included the effect of numerous variables, including age, gender, CKD stage, hypertension (HTN), diabetes mellitus (DM), congestive heart failure, peripheral vascular disease, anemia, hyperkalemia (HK), prospective episode risk group score, and poor adherence to renin-angiotensin-aldosterone system inhibitors. The strongest predictors of progression to kidney failure were CKD stage (4 vs 3), HTN, DM, and HK. The ROC and calibration analyses in the validation sample demonstrated good predictive accuracy (AUROC=0.844) and calibration. The top two prediction deciles identified 70.8% of patients who progressed to kidney failure during the prediction period. Conclusion This novel predictive model had good accuracy for identifying, from a large national database, patients with CKD who were at high risk of progressing to kidney failure within 2 years. Early identification using this model could potentially lead to improved health outcomes and reduced healthcare expenditures in this at-risk population.", "year": 2021, "venue": "ClinicoEconomics and Outcomes Research", "authors": [ "D. Dai", "Paula J. Alvarez", "Steven D. Woods" ], "externalIds": { "PubMedCentral": "8186939", "DOI": "10.2147/CEOR.S313857", "CorpusId": 235391977, "PubMed": "34113139" }, "url": "https://www.semanticscholar.org/paper/d8a5d0fa699316615172a6b859c45f6506a884f3", "referenceCount": 42, "citationCount": 32, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Chronic kidney disease progression among patients with type 2 diabetes identified in US administrative claims: a population cohort study", "abstract": "ABSTRACT Background Chronic kidney disease (CKD), one of the most common complications of type 2 diabetes (T2D), is associated with poor health outcomes and high healthcare expenditures. As the CKD population increases, a better understanding of the prevalence and progression of CKD is critical. However, few contemporary studies have explored the progression of CKD relative to its onset in T2D patients using established markers derived from real-world care settings. Methods This retrospective, population-based cohort study assessed CKD progression among adults with T2D and with newly recognized CKD identified from US administrative claims data between 1 January 2008 and 30 September 2018. Included were patients with T2D and laboratory evidence of CKD as indicated by the established estimated glomerular filtration rate (eGFR) and urine albumin:creatinine ratio (UACR) criteria. Disease progression was described as transitions across the eGFR- and UACR-based stages. Results A total of 65 731 and 23 035 patients with T2D contributed to the analysis of eGFR- and UACR-based CKD stage progression, respectively. CKD worsening was observed in approximately 10–17% of patients over a median follow-up of 2 years. Approximately one-third of patients experienced an increase in eGFR values or a decrease in UACR values during follow-up. Conclusions A relatively high proportion of patients were observed with disease progression over a short period of time, highlighting the need for better identification of patients at risk of rapidly progressive CKD. Future studies are needed to determine the clinical characteristics of these patients to inform earlier diagnostic and therapeutic interventions aimed at slowing disease progression.", "year": 2020, "venue": "Clinical Kidney Journal", "authors": [ "C. Kovesdy", "D. Isaman", "N. Petruski-Ivleva", "L. Fried", "M. Blankenburg", "A. Gay", "P. Velentgas", "K. Folkerts" ], "externalIds": { "PubMedCentral": "8162850", "MAG": "3115036011", "DOI": "10.1093/ckj/sfaa200", "CorpusId": 234437746, "PubMed": "34084461" }, "url": "https://www.semanticscholar.org/paper/860a768d4461e0e8dbd8a8a97e5c47cacc43233c", "referenceCount": 29, "citationCount": 16, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Mortality in the United States, 2019.", "abstract": "This report presents final 2019 U.S. mortality data on deaths and death rates by demographic and medical characteristics. These data provide information on mortality patterns among U.S. residents by variables such as sex, age, race and Hispanic origin, and cause of death. Life expectancy estimates, agespecific death rates, 10 leading causes of death, and 10 leading causes of infant death were analyzed by comparing 2019 and 2018 final data (1).", "year": 2020, "venue": "NCHS data brief", "authors": [ "K. Kochanek", "Jiaquan Xu", "E. Arias" ], "externalIds": { "CorpusId": 230658956, "PubMed": "33395387" }, "url": "https://www.semanticscholar.org/paper/c3231613e940ff0cf0cfb1f122d8ff5e521ed592", "referenceCount": 6, "citationCount": 157, "influentialCitationCount": 12, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine" ] }, { "title": "A Model to Predict Risk of Hyperkalemia in Patients with Chronic Kidney Disease Using a Large Administrative Claims Database", "abstract": "Background Chronic kidney disease (CKD) is responsible for substantial clinical and economic burden. Drugs that inhibit the renin-angiotensin-aldosterone system inhibitors (RAASi) slow CKD progression in many common clinical scenarios. Guideline-directed medical therapy requires maximal recommended doses of RAASi, which clinicians are often reluctant to prescribe because of the associated risk of hyperkalemia (HK). Objective This study aims to develop and validate a model to identify individuals with CKD at elevated risk for developing HK over a 12-month period on the basis of lab, medical, and pharmacy claims. Methods Using claims from a large US healthcare payer, we developed a model to predict the probability of individuals identified with CKD but not HK in 2016 (baseline year [BY]) who developed HK in 2017 (prediction year [PY]). The study population was comprised of members continuously enrolled with medical and pharmacy benefits and CKD (BY). Members were excluded from the analysis if they had HK (by lab results or diagnosis code) or dialysis (BY). Prediction model performance measures included area under the receiver operating characteristic curve (AUROC), calibration, and gain and lift charts. Results Of 435,512 members identified with CKD but not HK (BY), 6235 (1.43%) showed incident HK (PY). Compared with individuals without incident HK (PY), these members had a higher comorbidity burden, use of RAASi, and healthcare utilization. The AUROC and calibration analyses showed good predictive accuracy (area under the curve [AUC]=0.843 and calibration). The top 2 HK-prediction deciles identified 75.94% of members who went on to develop HK (PY). Conclusion Guideline-recommended doses of RAASi therapy can be limited by the risk of HK. Novel potassium binders may permit more patients at risk to benefit from these maximal RAASi doses. This predictive model successfully identified the risk of developing HK up to 1 year in advance.", "year": 2020, "venue": "ClinicoEconomics and Outcomes Research", "authors": [ "Ajay Sharma", "Paula J. Alvarez", "Steven D. Woods", "D. Dai" ], "externalIds": { "PubMedCentral": "7665575", "MAG": "3106001390", "DOI": "10.2147/CEOR.S267063", "CorpusId": 226965049, "PubMed": "33204127" }, "url": "https://www.semanticscholar.org/paper/1dc78ebd08dd68f0906f2dac7cfb93315f38462c", "referenceCount": 47, "citationCount": 8, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Agreement Between Administrative Database and Medical Chart Review for the Prediction of Chronic Kidney Disease G category", "abstract": "Background: Chronic kidney disease (CKD) is a major health issue and cardiovascular risk factor. Validity assessment of administrative data for the detection of CKD in research for drug benefit and risk using real-world data is important. Existing algorithms have limitations and we need to develop new algorithms using administrative data, giving the importance of drug benefit/risk ratio in real world. Objective: The aim of this study was to validate a predictive algorithm for CKD GFR category 4-5 (eGFR < 30 mL/min/1.73 m2 but not receiving dialysis or CKD G4-5ND) using the administrative databases of the province of Quebec relative to estimated glomerular filtration rate (eGFR) as a reference standard. Design: This is a retrospective cohort study using chart collection and administrative databases. Setting: The study was conducted in a community outpatient medical clinic and pre-dialysis outpatient clinic in downtown Montreal and rural area. Patients: Patient medical files with at least 2 serum creatinine measures (up to 1 year apart) between September 1, 2013, and June 30, 2015, were reviewed consecutively (going back in time from the day we started the study). We excluded patients with end-stage renal disease on dialysis. The study was started in September 2013. Measurement: Glomerular filtration rate was estimated using the CKD Epidemiological Collaboration (CKD-EPI) from each patient’s file. Several algorithms were developed using 3 administrative databases with different combinations of physician claims (diagnostics and number of visits) and hospital discharge data in the 5 years prior to the cohort entry, as well as specific drug use and medical intervention in preparation for dialysis in the 2 years prior to the cohort entry. Methods: Chart data were used to assess eGFR. The validity of various algorithms for detection of CKD groups was assessed with sensitivity, specificity, positive predictive value (PPV), and negative predictive value (NPV). Results: A total of 434 medical files were reviewed; mean age of patients was 74.2 ± 10.6 years, and 83% were older than 65 years. Sensitivity of algorithm #3 (diagnosis within 2-5 years and/or specific drug use within 2 years and nephrologist visit ≥4 within 2-5 years) in identification of CKD G4-5ND ranged from 82.5% to 89.0%, specificity from 97.1% to 98.9% with PPV and NPV ranging from 94.5% to 97.7% and 91.1% to 94.2%, respectively. The subsequent subgroup analysis (diabetes, hypertension, and <65 and ≥65 years) and also the comparisons of predicted prevalence in a cohort of older adults relative to published data emphasized the accuracy of our algorithm for patients with severe CKD (CKD G4-5ND). Limitations: Our cohort comprised mostly older adults, and results may not be generalizable to all adults. Participants with CKD without 2 serum creatinine measurements up to 1 year apart were excluded. Conclusions: The case definition of severe CKD G4-5ND derived from an algorithm using diagnosis code, drug use, and nephrologist visits from administrative databases is a valid algorithm compared with medical chart reviews in older adults.", "year": 2020, "venue": "Canadian Journal of Kidney Health and Disease", "authors": [ "L. Roy", "M. Zappitelli", "B. White-Guay", "J. Lafrance", "M. Dorais", "S. Perreault" ], "externalIds": { "MAG": "3092569311", "PubMedCentral": "7549183", "DOI": "10.1177/2054358120959908", "CorpusId": 225040140, "PubMed": "33101698" }, "url": "https://www.semanticscholar.org/paper/396bce29f65fa6098fd4dfa7d74f92881e71fb88", "referenceCount": 64, "citationCount": 19, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Machine‐learning–based early prediction of end‐stage renal disease in patients with diabetic kidney disease using clinical trials data", "abstract": "To predict end‐stage renal disease (ESRD) in patients with type 2 diabetes by using machine‐learning models with multiple baseline demographic and clinical characteristics.", "year": 2020, "venue": "Diabetes, obesity and metabolism", "authors": [ "S. Belur Nagaraj", "M. Pena", "W. Ju", "Hiddo J. L. Heerspink" ], "externalIds": { "PubMedCentral": "7756814", "MAG": "3081103314", "DOI": "10.1111/dom.14178", "CorpusId": 221328160, "PubMed": "32844582" }, "url": "https://www.semanticscholar.org/paper/62936faff758332176a701410b06e3bd30cc79ca", "referenceCount": 23, "citationCount": 34, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Health Care Resource Use and Cost Burden of Chronic Kidney Disease in Patients With Chronic Liver Disease: A Real‐World Claims Analysis", "abstract": "Chronic Liver Disease (CLD) is associated with an increased risk of chronic kidney disease (CKD). However, the health care burden of CKD in the CLD spectrum is unknown. We aimed to evaluate the health care use and cost burdens associated with CKD in patients with CLD in the United States by using real‐world claims data. We analyzed data from the Truven Health MarketScan Commercial Claims database from 2010 to 2015. A total of 19,664 patients with CLD with or without comorbid CKD were identified using International Classification of Diseases, Ninth Revision, codes and matched 1:1 by sociodemographic characteristics and comorbidities using propensity scores. Total and service‐specific unadjusted and adjusted health care parameters were analyzed for the 12 months following an index date selected at random to capture whole disease burdens. In CLD, comorbid CKD was associated with a higher annual number of claims per person (CKD vs. no CKD, 69 vs. 55) and higher total annual median health care costs (CKD vs. no CKD, $21,397 vs. $16,995). A subanalysis stratified by CKD category showed that health care use and cost burden in CLD increased with disease stage, with a peak 12‐month median cost difference of $77,859 in patients on dialysis. The adjusted per person annual health care cost was higher for CKD cases compared to controls ($35,793 vs. $24,048, respectively; P < 0.0001). Stratified by the type of CLD, the highest between‐group adjusted cost differences were for cirrhosis, viral hepatitis, hemochromatosis, and nonalcoholic fatty liver disease. Conclusion: CKD is a cost multiplier in CLD. The CKD health care burden in liver disease differs by the type of CLD. Improved CKD screening and proactive treatment interventions for at‐risk patients can limit the excess burden associated with CKD in patients with CLD.", "year": 2020, "venue": "Hepatology Communications", "authors": [ "V. Rustgi", "You Li", "T. John", "C. Catalano", "Mohamed I. Elsaid" ], "externalIds": { "MAG": "3046585025", "PubMedCentral": "7527762", "DOI": "10.1002/hep4.1573", "CorpusId": 222072220, "PubMed": "33024912" }, "url": "https://www.semanticscholar.org/paper/5caea0dfdd2bf0f972440132ef2012f395bf16e5", "referenceCount": 37, "citationCount": 11, "influentialCitationCount": 2, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Machine Learning Prediction Models for Chronic Kidney Disease Using National Health Insurance Claim Data in Taiwan", "abstract": "Background and Objective: Chronic kidney disease (CKD) represent a heavy burden on the healthcare system because of the increasing number of patients, high risk of progression to end-stage renal disease, and poor prognosis of morbidity and mortality. The aim of this study is to develop a machine-learning model that uses the comorbidity and medication data, obtained from Taiwan's National Health Insurance Research Database, to forecast whether an individual will develop CKD within the next 6 or 12 months, and thus forecast the prevalence in the population. Methods: A total of 18,000 people with CKD and 72,000 people without CKD diagnosis along with the past two years of medication and comorbidity data matched by propensity score were used to build a predicting model. A series of approaches were tested, including Convoluted Neural Networks (CNN). 5-fold cross-validation was used to assess the performance metrics of the algorithms. Results: Both for the 6 month and 12-month models, the CNN approach performed best, with the AUROC of 0.957 and 0.954, respectively. The most prominent features in the tree-based models were identified, including diabetes mellitus, age, gout, and medications such as sulfonamides, angiotensins which had an impact on the progression of CKD. Conclusions: The model proposed in this study can be a useful tool for the policy-makers helping them in predicting the trends of CKD in the population in the next 6 to 12 months. Information provided by this model can allow closely monitoring the people with risk, early detection of CKD, better allocation of resources, and patient-centric management.", "year": 2020, "venue": "medRxiv", "authors": [ "S. Krishnamurthy", "Kapeleshh Ks", "E. Dovgan", "M. Luštrek", "Barbara Gradišek Piletič", "Kathiravan Srinivasan", "Y. Li", "A. Gradišek", "S. Syed-Abdul" ], "externalIds": { "MAG": "3037478678", "PubMedCentral": "8151834", "DOI": "10.3390/healthcare9050546", "CorpusId": 220069587, "PubMed": "34067129" }, "url": "https://www.semanticscholar.org/paper/7d0b4c325afe803ac35fb8b3fc66411f24c7eacb", "referenceCount": 28, "citationCount": 62, "influentialCitationCount": 5, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine" ] }, { "title": "Machine learning distilled metabolite biomarkers for early stage renal injury", "abstract": null, "year": 2019, "venue": "Metabolomics", "authors": [ "Yan Guo", "Hui Yu", "Danqian Chen", "Ying-yong Zhao" ], "externalIds": { "MAG": "2993999592", "DOI": "10.1007/s11306-019-1624-0", "CorpusId": 208650241, "PubMed": "31807893" }, "url": "https://www.semanticscholar.org/paper/68f25c3ea3958fb4646f09c42a9e56032f3035bf", "referenceCount": 44, "citationCount": 19, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine" ] }, { "title": "An Empirical Evaluation of Generic Convolutional and Recurrent Networks for Sequence Modeling", "abstract": "For most deep learning practitioners, sequence modeling is synonymous with recurrent networks. Yet recent results indicate that convolutional architectures can outperform recurrent networks on tasks such as audio synthesis and machine translation. Given a new sequence modeling task or dataset, which architecture should one use? We conduct a systematic evaluation of generic convolutional and recurrent architectures for sequence modeling. The models are evaluated across a broad range of standard tasks that are commonly used to benchmark recurrent networks. Our results indicate that a simple convolutional architecture outperforms canonical recurrent networks such as LSTMs across a diverse range of tasks and datasets, while demonstrating longer effective memory. We conclude that the common association between sequence modeling and recurrent networks should be reconsidered, and convolutional networks should be regarded as a natural starting point for sequence modeling tasks. To assist related work, we have made code available at this http URL .", "year": 2018, "venue": "arXiv.org", "authors": [ "Shaojie Bai", "J. Z. Kolter", "V. Koltun" ], "externalIds": { "MAG": "2792764867", "DBLP": "journals/corr/abs-1803-01271", "ArXiv": "1803.01271", "CorpusId": 4747877 }, "url": "https://www.semanticscholar.org/paper/921196c32213a229245a9705ee4768bc941e7a26", "referenceCount": 84, "citationCount": 4012, "influentialCitationCount": 572, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Exploratory review on class imbalance problem: An overview", "abstract": "Nothing is as important as human lives. There are so many cases where we need to predict the things, the causes that lead to the destruction of mankind. Such things occur occasionally but can be destructive. Therefore their prediction is very important so as to solve them at the early and safe stage. Such data is said to be imbalanced where negative cases outnumber the positive cases by huge proportions and the prediction of these rare occurring positive cases is very important. So far all the machine learners are biased towards the majority class. In this overview we are exploring all the techniques that have been used to mine the imbalanced data sets. Techniques at pre-processing level, algorithmic level are being discussed in this review. Also ensemble and hybrid techniques are being reviewed. In this paper, techniques of two types of imbalanced data sets are being reviewed viz binary class imbalanced data and multi class imbalanced data.", "year": 2017, "venue": "International Conference on Computing Communication and Networking Technologies", "authors": [ "Fatima Shakeel", "A. Sai Sabhitha", "Seema Sharma" ], "externalIds": { "MAG": "2775751058", "DOI": "10.1109/ICCCNT.2017.8204150", "CorpusId": 32093723 }, "url": "https://www.semanticscholar.org/paper/4ab637357f69911131f1f8102ee8948a55cf71fa", "referenceCount": 31, "citationCount": 20, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Dipole: Diagnosis Prediction in Healthcare via Attention-based Bidirectional Recurrent Neural Networks", "abstract": "Predicting the future health information of patients from the historical Electronic Health Records (EHR) is a core research task in the development of personalized healthcare. Patient EHR data consist of sequences of visits over time, where each visit contains multiple medical codes, including diagnosis, medication, and procedure codes. The most important challenges for this task are to model the temporality and high dimensionality of sequential EHR data and to interpret the prediction results. Existing work solves this problem by employing recurrent neural networks (RNNs) to model EHR data and utilizing simple attention mechanism to interpret the results. However, RNN-based approaches suffer from the problem that the performance of RNNs drops when the length of sequences is large, and the relationships between subsequent visits are ignored by current RNN-based approaches. To address these issues, we propose Dipole, an end-to-end, simple and robust model for predicting patients' future health information. Dipole employs bidirectional recurrent neural networks to remember all the information of both the past visits and the future visits, and it introduces three attention mechanisms to measure the relationships of different visits for the prediction. With the attention mechanisms, Dipole can interpret the prediction results effectively. Dipole also allows us to interpret the learned medical code representations which are confirmed positively by medical experts. Experimental results on two real world EHR datasets show that the proposed Dipole can significantly improve the prediction accuracy compared with the state-of-the-art diagnosis prediction approaches and provide clinically meaningful interpretation.", "year": 2017, "venue": "Knowledge Discovery and Data Mining", "authors": [ "Fenglong Ma", "Radha Chitta", "Jing Zhou", "Quanzeng You", "Tong Sun", "Jing Gao" ], "externalIds": { "MAG": "3099136959", "ArXiv": "1706.05764", "DBLP": "conf/kdd/MaCZYSG17", "DOI": "10.1145/3097983.3098088", "CorpusId": 10361189 }, "url": "https://www.semanticscholar.org/paper/cfc2d39011c1ae0e9f0bdf64a13616142e155de5", "referenceCount": 33, "citationCount": 509, "influentialCitationCount": 55, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Unified Approach to Interpreting Model Predictions", "abstract": "Understanding why a model makes a certain prediction can be as crucial as the prediction's accuracy in many applications. However, the highest accuracy for large modern datasets is often achieved by complex models that even experts struggle to interpret, such as ensemble or deep learning models, creating a tension between accuracy and interpretability. In response, various methods have recently been proposed to help users interpret the predictions of complex models, but it is often unclear how these methods are related and when one method is preferable over another. To address this problem, we present a unified framework for interpreting predictions, SHAP (SHapley Additive exPlanations). SHAP assigns each feature an importance value for a particular prediction. Its novel components include: (1) the identification of a new class of additive feature importance measures, and (2) theoretical results showing there is a unique solution in this class with a set of desirable properties. The new class unifies six existing methods, notable because several recent methods in the class lack the proposed desirable properties. Based on insights from this unification, we present new methods that show improved computational performance and/or better consistency with human intuition than previous approaches.", "year": 2017, "venue": "Neural Information Processing Systems", "authors": [ "Scott M. Lundberg", "Su-In Lee" ], "externalIds": { "MAG": "2618851150", "DBLP": "journals/corr/LundbergL17", "ArXiv": "1705.07874", "CorpusId": 21889700 }, "url": "https://www.semanticscholar.org/paper/442e10a3c6640ded9408622005e3c2a8906ce4c2", "referenceCount": 10, "citationCount": 16601, "influentialCitationCount": 1905, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "RETAIN: An Interpretable Predictive Model for Healthcare using Reverse Time Attention Mechanism", "abstract": "Accuracy and interpretability are two dominant features of successful predictive models. Typically, a choice must be made in favor of complex black box models such as recurrent neural networks (RNN) for accuracy versus less accurate but more interpretable traditional models such as logistic regression. This tradeoff poses challenges in medicine where both accuracy and interpretability are important. We addressed this challenge by developing the REverse Time AttentIoN model (RETAIN) for application to Electronic Health Records (EHR) data. RETAIN achieves high accuracy while remaining clinically interpretable and is based on a two-level neural attention model that detects influential past visits and significant clinical variables within those visits (e.g. key diagnoses). RETAIN mimics physician practice by attending the EHR data in a reverse time order so that recent clinical visits are likely to receive higher attention. RETAIN was tested on a large health system EHR dataset with 14 million visits completed by 263K patients over an 8 year period and demonstrated predictive accuracy and computational scalability comparable to state-of-the-art methods such as RNN, and ease of interpretability comparable to traditional models.", "year": 2016, "venue": "Neural Information Processing Systems", "authors": [ "E. Choi", "M. T. Bahadori", "Jimeng Sun", "Joshua A. Kulas", "A. Schuetz", "W. Stewart" ], "externalIds": { "DBLP": "conf/nips/ChoiBSKSS16", "MAG": "2517259736", "ArXiv": "1608.05745", "CorpusId": 948039 }, "url": "https://www.semanticscholar.org/paper/e8e9125704edbcf73999f2f452fe4a701163d6b6", "referenceCount": 41, "citationCount": 1112, "influentialCitationCount": 176, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "“Why Should I Trust You?”: Explaining the Predictions of Any Classifier", "abstract": "Despite widespread adoption, machine learning models remain mostly black boxes. Understanding the reasons behind predictions is, however, quite important in assessing trust, which is fundamental if one plans to take action based on a prediction, or when choosing whether to deploy a new model. Such understanding also provides insights into the model, which can be used to transform an untrustworthy model or prediction into a trustworthy one. In this work, we propose LIME, a novel explanation technique that explains the predictions of any classifier in an interpretable and faithful manner, by learning an interpretable model locally varound the prediction. We also propose a method to explain models by presenting representative individual predictions and their explanations in a non-redundant way, framing the task as a submodular optimization problem. We demonstrate the flexibility of these methods by explaining different models for text (e.g. random forests) and image classification (e.g. neural networks). We show the utility of explanations via novel experiments, both simulated and with human subjects, on various scenarios that require trust: deciding if one should trust a prediction, choosing between models, improving an untrustworthy classifier, and identifying why a classifier should not be trusted.", "year": 2016, "venue": "North American Chapter of the Association for Computational Linguistics", "authors": [ "Marco Tulio Ribeiro", "Sameer Singh", "Carlos Guestrin" ], "externalIds": { "ACL": "N16-3020", "MAG": "2951501516", "DBLP": "journals/corr/RibeiroSG16", "ArXiv": "1602.04938", "DOI": "10.1145/2939672.2939778", "CorpusId": 13029170 }, "url": "https://www.semanticscholar.org/paper/c0883f5930a232a9c1ad601c978caede29155979", "referenceCount": 41, "citationCount": 14452, "influentialCitationCount": 1693, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Learning Phrase Representations using RNN Encoder–Decoder for Statistical Machine Translation", "abstract": "In this paper, we propose a novel neural network model called RNN Encoder‐ Decoder that consists of two recurrent neural networks (RNN). One RNN encodes a sequence of symbols into a fixedlength vector representation, and the other decodes the representation into another sequence of symbols. The encoder and decoder of the proposed model are jointly trained to maximize the conditional probability of a target sequence given a source sequence. The performance of a statistical machine translation system is empirically found to improve by using the conditional probabilities of phrase pairs computed by the RNN Encoder‐Decoder as an additional feature in the existing log-linear model. Qualitatively, we show that the proposed model learns a semantically and syntactically meaningful representation of linguistic phrases.", "year": 2014, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Kyunghyun Cho", "B. V. Merrienboer", "Çaglar Gülçehre", "Dzmitry Bahdanau", "Fethi Bougares", "Holger Schwenk", "Yoshua Bengio" ], "externalIds": { "MAG": "2950635152", "DBLP": "conf/emnlp/ChoMGBBSB14", "ACL": "D14-1179", "ArXiv": "1406.1078", "DOI": "10.3115/v1/D14-1179", "CorpusId": 5590763 }, "url": "https://www.semanticscholar.org/paper/0b544dfe355a5070b60986319a3f51fb45d1348e", "referenceCount": 33, "citationCount": 21611, "influentialCitationCount": 2934, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "ADASYN: Adaptive synthetic sampling approach for imbalanced learning", "abstract": "This paper presents a novel adaptive synthetic (ADASYN) sampling approach for learning from imbalanced data sets. The essential idea of ADASYN is to use a weighted distribution for different minority class examples according to their level of difficulty in learning, where more synthetic data is generated for minority class examples that are harder to learn compared to those minority examples that are easier to learn. As a result, the ADASYN approach improves learning with respect to the data distributions in two ways: (1) reducing the bias introduced by the class imbalance, and (2) adaptively shifting the classification decision boundary toward the difficult examples. Simulation analyses on several machine learning data sets show the effectiveness of this method across five evaluation metrics.", "year": 2008, "venue": "IEEE World Congress on Computational Intelligence", "authors": [ "Haibo He", "Yang Bai", "E. A. Garcia", "Shutao Li" ], "externalIds": { "DBLP": "conf/ijcnn/HeBGL08", "MAG": "2104933073", "DOI": "10.1109/IJCNN.2008.4633969", "CorpusId": 1438164 }, "url": "https://www.semanticscholar.org/paper/48234756b7cf798bfeb47328f7c5d597fd4838c2", "referenceCount": 38, "citationCount": 3455, "influentialCitationCount": 306, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A study of the behavior of several methods for balancing machine learning training data", "abstract": "There are several aspects that might influence the performance achieved by existing learning systems. It has been reported that one of these aspects is related to class imbalance in which examples in training data belonging to one class heavily outnumber the examples in the other class. In this situation, which is found in real world data describing an infrequent but important event, the learning system may have difficulties to learn the concept related to the minority class. In this work we perform a broad experimental evaluation involving ten methods, three of them proposed by the authors, to deal with the class imbalance problem in thirteen UCI data sets. Our experiments provide evidence that class imbalance does not systematically hinder the performance of learning systems. In fact, the problem seems to be related to learning with too few minority class examples in the presence of other complicating factors, such as class overlapping. Two of our proposed methods deal with these conditions directly, allying a known over-sampling method with data cleaning methods in order to produce better-defined class clusters. Our comparative experiments show that, in general, over-sampling methods provide more accurate results than under-sampling methods considering the area under the ROC curve (AUC). This result seems to contradict results previously published in the literature. Two of our proposed methods, Smote + Tomek and Smote + ENN, presented very good results for data sets with a small number of positive examples. Moreover, Random over-sampling, a very simple over-sampling method, is very competitive to more complex over-sampling methods. Since the over-sampling methods provided very good performance results, we also measured the syntactic complexity of the decision trees induced from over-sampled data. Our results show that these trees are usually more complex then the ones induced from original data. Random over-sampling usually produced the smallest increase in the mean number of induced rules and Smote + ENN the smallest increase in the mean number of conditions per rule, when compared among the investigated over-sampling methods.", "year": 2004, "venue": "SKDD", "authors": [ "Gustavo E. A. P. A. Batista", "R. Prati", "M. C. Monard" ], "externalIds": { "DBLP": "journals/sigkdd/BatistaPM04", "MAG": "1993220166", "DOI": "10.1145/1007730.1007735", "CorpusId": 207155015 }, "url": "https://www.semanticscholar.org/paper/6aae0dc122102693e8136856ffc8b72df7f78386", "referenceCount": 41, "citationCount": 3324, "influentialCitationCount": 249, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Long Short-Term Memory", "abstract": "Learning to store information over extended time intervals by recurrent backpropagation takes a very long time, mostly because of insufficient, decaying error backflow. We briefly review Hochreiter's (1991) analysis of this problem, then address it by introducing a novel, efficient, gradient based method called long short-term memory (LSTM). Truncating the gradient where this does not do harm, LSTM can learn to bridge minimal time lags in excess of 1000 discrete-time steps by enforcing constant error flow through constant error carousels within special units. Multiplicative gate units learn to open and close access to the constant error flow. LSTM is local in space and time; its computational complexity per time step and weight is O. 1. Our experiments with artificial data involve local, distributed, real-valued, and noisy pattern representations. In comparisons with real-time recurrent learning, back propagation through time, recurrent cascade correlation, Elman nets, and neural sequence chunking, LSTM leads to many more successful runs, and learns much faster. LSTM also solves complex, artificial long-time-lag tasks that have never been solved by previous recurrent network algorithms.", "year": 1997, "venue": "Neural Computation", "authors": [ "Sepp Hochreiter", "J. Schmidhuber" ], "externalIds": { "MAG": "2064675550", "DBLP": "journals/neco/HochreiterS97", "DOI": "10.1162/neco.1997.9.8.1735", "CorpusId": 1915014, "PubMed": "9377276" }, "url": "https://www.semanticscholar.org/paper/2e9d221c206e9503ceb452302d68d10e293f2a10", "referenceCount": 48, "citationCount": 80986, "influentialCitationCount": 9250, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Learning representations by back-propagating errors", "abstract": null, "year": 1986, "venue": "Nature", "authors": [ "D. Rumelhart", "Geoffrey E. Hinton", "Ronald J. Williams" ], "externalIds": { "MAG": "1498436455", "DOI": "10.1038/323533a0", "CorpusId": 205001834 }, "url": "https://www.semanticscholar.org/paper/052b1d8ce63b07fec3de9dbb583772d860b7c769", "referenceCount": 2, "citationCount": 25673, "influentialCitationCount": 794, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep Learning", "abstract": null, "year": 2016, "venue": "International Journal of Semantic Computing", "authors": [ "Xing Hao", "Guigang Zhang", "Shang Ma" ], "externalIds": { "DBLP": "journals/escri/HaoZ17", "DOI": "10.1142/S1793351X16500045", "CorpusId": 1779661 }, "url": "https://www.semanticscholar.org/paper/4f8d648c52edf74e41b0996128aa536e13cc7e82", "referenceCount": 2, "citationCount": 50533, "influentialCitationCount": 2842, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Multi-Trajectory Models of Chronic Kidney Disease Progression", "abstract": "An ever increasing number of people are affected by chronic kidney disease (CKD). A better understanding of the progression ofCKD and its complications is needed to address what is becoming a major burden for health-care systems worldwide. Utilizing a rich data set consisting of the Electronic Health Records (EHRs) of more than 33,000 patients from a leading community nephrology practice in Western Pennsylvania, we applied group-based trajectory modeling (GBTM) in order to detect patient risk groups and uncover typical progressions of CKD and related comorbidities and complications. We have found distinct risk groups with differing trajectories and are able to classify new patients into these groups with high accuracy (up to ≈ 90%). Our results suggest that multitrajectory modeling via GBTM can shed light on the developmental course ofCKD and the interactions between related complications.", "year": 2016, "venue": "American Medical Informatics Association Annual Symposium", "authors": [ "Philipp Burckhardt", "D. Nagin", "R. Padman" ], "externalIds": { "MAG": "2621141933", "DBLP": "conf/amia/BurckhardtNP16", "CorpusId": 6864947, "PubMed": "28269932" }, "url": "https://www.semanticscholar.org/paper/656d649fa870b9c3f1c5fece6bad986f4af0e47a", "referenceCount": 22, "citationCount": 23, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine", "Computer Science" ] }, { "title": "SMOTE: Synthetic Minority Over-sampling Technique", "abstract": "An approach to the construction of classifiers from imbalanced datasets is described. A dataset is imbalanced if the classification categories are not approximately equally represented. Often real-world data sets are predominately composed of \"normal\" examples with only a small percentage of \"abnormal\" or \"interesting\" examples. It is also the case that the cost of misclassifying an abnormal (interesting) example as a normal example is often much higher than the cost of the reverse error. Under-sampling of the majority (normal) class has been proposed as a good means of increasing the sensitivity of a classifier to the minority class. This paper shows that a combination of our method of oversampling the minority (abnormal)cla ss and under-sampling the majority (normal) class can achieve better classifier performance (in ROC space)tha n only under-sampling the majority class. This paper also shows that a combination of our method of over-sampling the minority class and under-sampling the majority class can achieve better classifier performance (in ROC space)t han varying the loss ratios in Ripper or class priors in Naive Bayes. Our method of over-sampling the minority class involves creating synthetic minority class examples. Experiments are performed using C4.5, Ripper and a Naive Bayes classifier. The method is evaluated using the area under the Receiver Operating Characteristic curve (AUC)and the ROC convex hull strategy.", "year": 2002, "venue": "Journal of Artificial Intelligence Research", "authors": [ "N. Chawla", "K. Bowyer", "L. Hall", "W. Kegelmeyer" ], "externalIds": { "ArXiv": "1106.1813", "DBLP": "journals/corr/abs-1106-1813", "MAG": "3100785508", "DOI": "10.1613/jair.953", "CorpusId": 1554582 }, "url": "https://www.semanticscholar.org/paper/8cb44f06586f609a29d9b496cc752ec01475dffe", "referenceCount": 42, "citationCount": 22523, "influentialCitationCount": 2431, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Addressing the Curse of Imbalanced Training Sets: One-Sided Selection", "abstract": "Adding examples of the majority class to the training set can have a detrimental e(cid:11)ect on the learner's behavior: noisy or otherwise unreliable examples from the majority class can overwhelm the minority class. The paper discusses criteria to evaluate the utility of classi(cid:12)ers induced from such imbalanced training sets, gives explanation of the poor behavior of some learners under these circumstances, and suggests as a solution a simple technique called one-sided selection of examples", "year": 1997, "venue": "International Conference on Machine Learning", "authors": [ "M. Kubát", "S. Matwin" ], "externalIds": { "MAG": "85350352", "DBLP": "conf/icml/KubatM97", "CorpusId": 18370956 }, "url": "https://www.semanticscholar.org/paper/ebc3914181d76c817f0e35f788b7c4c0f80abb07", "referenceCount": 13, "citationCount": 2504, "influentialCitationCount": 163, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "National Kidney Foundation", "abstract": null, "year": null, "venue": "Chronic Kidney Disease (CKD)", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] }, "Structure Learning via Mutual Information": { "paper_title": "Structure Learning via Mutual Information", "arxiv_id": "2409.14235v1", "keyword": "automated machine learning", "authors": [ "Jeremy Nixon" ], "references": [ { "title": "Multivariate Adaptive Regression Splines", "abstract": "В работе рассматриваются многомерные адаптивные регрессионные сплайны. Метод позволяет получить модели, дающие достаточно точную аппроксимацию, даже в тех случаях, когда связи между предикторными и зависимыми переменными имеют немонотонный характер и сложны для приближения параметрическими моделями. Экспериментально исследуется зависимость ошибки аппроксимации от сложности модели. Для иллюстрации работы метода используются тестовые данные, данные ЭКГ и данные из области финансовой математики.", "year": 2019, "venue": "Hands-On Machine Learning with R", "authors": [ "Bradley C. Boehmke", "Brandon M. Greenwell" ], "externalIds": { "MAG": "2999663256", "DOI": "10.1002/9781118445112.stat07551", "CorpusId": 33779230 }, "url": "https://www.semanticscholar.org/paper/a85479fef62f87ab4f7ea03465db08c928cae5bb", "referenceCount": 7, "citationCount": 2596, "influentialCitationCount": 382, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Invariant Risk Minimization", "abstract": "We introduce Invariant Risk Minimization (IRM), a learning paradigm to estimate invariant correlations across multiple training distributions. To achieve this goal, IRM learns a data representation such that the optimal classifier, on top of that data representation, matches for all training distributions. Through theory and experiments, we show how the invariances learned by IRM relate to the causal structures governing the data and enable out-of-distribution generalization.", "year": 2019, "venue": "arXiv.org", "authors": [ "Martín Arjovsky", "L. Bottou", "Ishaan Gulrajani", "David Lopez-Paz" ], "externalIds": { "MAG": "2953494151", "DBLP": "journals/corr/abs-1907-02893", "ArXiv": "1907.02893", "CorpusId": 195820364 }, "url": "https://www.semanticscholar.org/paper/753b7a701adc1b6072378bd048cfa8567885d9c7", "referenceCount": 77, "citationCount": 1862, "influentialCitationCount": 469, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Reinforcement Learning and Control as Probabilistic Inference: Tutorial and Review", "abstract": "The framework of reinforcement learning or optimal control provides a mathematical formalization of intelligent decision making that is powerful and broadly applicable. While the general form of the reinforcement learning problem enables effective reasoning about uncertainty, the connection between reinforcement learning and inference in probabilistic models is not immediately obvious. However, such a connection has considerable value when it comes to algorithm design: formalizing a problem as probabilistic inference in principle allows us to bring to bear a wide array of approximate inference tools, extend the model in flexible and powerful ways, and reason about compositionality and partial observability. In this article, we will discuss how a generalization of the reinforcement learning or optimal control problem, which is sometimes termed maximum entropy reinforcement learning, is equivalent to exact probabilistic inference in the case of deterministic dynamics, and variational inference in the case of stochastic dynamics. We will present a detailed derivation of this framework, overview prior work that has drawn on this and related ideas to propose new reinforcement learning and control algorithms, and describe perspectives on future research.", "year": 2018, "venue": "arXiv.org", "authors": [ "S. Levine" ], "externalIds": { "DBLP": "journals/corr/abs-1805-00909", "MAG": "2799151646", "ArXiv": "1805.00909", "CorpusId": 19077536 }, "url": "https://www.semanticscholar.org/paper/6ecc4b1ab05f3ec12484a0ea36abfd6271c5c5ba", "referenceCount": 64, "citationCount": 595, "influentialCitationCount": 88, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "UMAP: Uniform Manifold Approximation and Projection for Dimension Reduction", "abstract": "UMAP (Uniform Manifold Approximation and Projection) is a novel manifold learning technique for dimension reduction. UMAP is constructed from a theoretical framework based in Riemannian geometry and algebraic topology. The result is a practical scalable algorithm that applies to real world data. The UMAP algorithm is competitive with t-SNE for visualization quality, and arguably preserves more of the global structure with superior run time performance. Furthermore, UMAP has no computational restrictions on embedding dimension, making it viable as a general purpose dimension reduction technique for machine learning.", "year": 2018, "venue": "arXiv.org", "authors": [ "Leland McInnes", "John Healy" ], "externalIds": { "DBLP": "journals/corr/abs-1802-03426", "MAG": "2786672974", "ArXiv": "1802.03426", "CorpusId": 3641284 }, "url": "https://www.semanticscholar.org/paper/3a288c63576fc385910cb5bc44eaea75b442e62e", "referenceCount": 64, "citationCount": 8026, "influentialCitationCount": 1177, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Mutual Information Neural Estimation", "abstract": "We argue that the estimation of mutual information between high dimensional continuous random variables can be achieved by gradient descent over neural networks. We present a Mutual Information Neural Estimator (MINE) that is linearly scalable in dimensionality as well as in sample size, trainable through back-prop, and strongly consistent. We present a handful of applications on which MINE can be used to minimize or maximize mutual information. We apply MINE to improve adversarially trained generative models. We also use MINE to implement Information Bottleneck, applying it to supervised classification; our results demonstrate substantial improvement in flexibility and performance in these settings.", "year": 2018, "venue": "International Conference on Machine Learning", "authors": [ "Mohamed Ishmael Belghazi", "A. Baratin", "Sai Rajeswar", "Sherjil Ozair", "Yoshua Bengio", "R. Devon Hjelm", "Aaron C. Courville" ], "externalIds": { "MAG": "2803832867", "DBLP": "conf/icml/BelghaziBROBHC18", "ArXiv": "1801.04062", "CorpusId": 44220142 }, "url": "https://www.semanticscholar.org/paper/6b73775f40467aed52784ff355b9bb7168e9078c", "referenceCount": 61, "citationCount": 1098, "influentialCitationCount": 194, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Elements of Causal Inference: Foundations and Learning Algorithms", "abstract": null, "year": 2017, "venue": "", "authors": [ "J. Peters", "D. Janzing", "Bernhard Schölkopf" ], "externalIds": { "MAG": "2801890059", "CorpusId": 86533208 }, "url": "https://www.semanticscholar.org/paper/46f6a90fcf0ecc4b60470a1f35cd95d65d5f8d9b", "referenceCount": 0, "citationCount": 1576, "influentialCitationCount": 252, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Emergence of Invariance and Disentanglement in Deep Representations", "abstract": "Using established principles from Information Theory and Statistics, we show that in a deep neural network invariance to nuisance factors is equivalent to information minimality of the learned representation, and that stacking layers and injecting noise during training naturally bias the network towards learning invariant representations. We then show that, in order to avoid memorization, we need to limit the quantity of information stored in the weights, which leads to a novel usage of the Information Bottleneck Lagrangian on the weights as a learning criterion. This also has an alternative interpretation as minimizing a PAC-Bayesian bound on the test error. Finally, we exploit a duality between weights and activations induced by the architecture, to show that the information in the weights bounds the minimality and Total Correlation of the layers, therefore showing that regularizing the weights explicitly or implicitly, using SGD, not only helps avoid overfitting, but also fosters invariance and disentangling of the learned representation. The theory also enables predicting sharp phase transitions between underfitting and overfitting random labels at precise information values, and sheds light on the relation between the geometry of the loss function, in particular so-called “flat minima,” and generalization.", "year": 2017, "venue": "Information Theory and Applications Workshop", "authors": [ "A. Achille", "Stefano Soatto" ], "externalIds": { "MAG": "2949669420", "ArXiv": "1706.01350", "DBLP": "journals/jmlr/AchilleS18", "DOI": "10.1109/ITA.2018.8503149", "CorpusId": 53082914 }, "url": "https://www.semanticscholar.org/paper/4d7574c0c4aca70e5811a8e33906f0106d6b76e6", "referenceCount": 46, "citationCount": 441, "influentialCitationCount": 35, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Model-Agnostic Meta-Learning for Fast Adaptation of Deep Networks", "abstract": "We propose an algorithm for meta-learning that is model-agnostic, in the sense that it is compatible with any model trained with gradient descent and applicable to a variety of different learning problems, including classification, regression, and reinforcement learning. The goal of meta-learning is to train a model on a variety of learning tasks, such that it can solve new learning tasks using only a small number of training samples. In our approach, the parameters of the model are explicitly trained such that a small number of gradient steps with a small amount of training data from a new task will produce good generalization performance on that task. In effect, our method trains the model to be easy to fine-tune. We demonstrate that this approach leads to state-of-the-art performance on two few-shot image classification benchmarks, produces good results on few-shot regression, and accelerates fine-tuning for policy gradient reinforcement learning with neural network policies.", "year": 2017, "venue": "International Conference on Machine Learning", "authors": [ "Chelsea Finn", "P. Abbeel", "S. Levine" ], "externalIds": { "MAG": "2604763608", "DBLP": "journals/corr/FinnAL17", "ArXiv": "1703.03400", "CorpusId": 6719686 }, "url": "https://www.semanticscholar.org/paper/c889d6f98e6d79b89c3a6adf8a921f88fa6ba518", "referenceCount": 52, "citationCount": 10456, "influentialCitationCount": 2326, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Towards A Rigorous Science of Interpretable Machine Learning", "abstract": "As machine learning systems become ubiquitous, there has been a surge of interest in interpretable machine learning: systems that provide explanation for their outputs. These explanations are often used to qualitatively assess other criteria such as safety or non-discrimination. However, despite the interest in interpretability, there is very little consensus on what interpretable machine learning is and how it should be measured. In this position paper, we first define interpretability and describe when interpretability is needed (and when it is not). Next, we suggest a taxonomy for rigorous evaluation and expose open questions towards a more rigorous science of interpretable machine learning.", "year": 2017, "venue": "", "authors": [ "F. Doshi-Velez", "Been Kim" ], "externalIds": { "MAG": "2594475271", "ArXiv": "1702.08608", "CorpusId": 11319376 }, "url": "https://www.semanticscholar.org/paper/5c39e37022661f81f79e481240ed9b175dec6513", "referenceCount": 57, "citationCount": 3217, "influentialCitationCount": 257, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Domain Adaptation for Visual Applications: A Comprehensive Survey", "abstract": "The aim of this paper is to give an overview of domain adaptation and transfer learning with a specific view on visual applications. After a general motivation, we first position domain adaptation in the larger transfer learning problem. Second, we try to address and analyze briefly the state-of-the-art methods for different types of scenarios, first describing the historical shallow methods, addressing both the homogeneous and the heterogeneous domain adaptation methods. Third, we discuss the effect of the success of deep convolutional architectures which led to new type of domain adaptation methods that integrate the adaptation within the deep architecture. Fourth, we overview the methods that go beyond image categorization, such as object detection or image segmentation, video analyses or learning visual attributes. Finally, we conclude the paper with a section where we relate domain adaptation to other machine learning solutions.", "year": 2017, "venue": "arXiv.org", "authors": [ "G. Csurka" ], "externalIds": { "ArXiv": "1702.05374", "DBLP": "journals/corr/Csurka17", "MAG": "2590953969", "CorpusId": 7559976 }, "url": "https://www.semanticscholar.org/paper/9e47c7cfecba80ba48713013546ecad542ebf4d9", "referenceCount": 289, "citationCount": 469, "influentialCitationCount": 18, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "beta-VAE: Learning Basic Visual Concepts with a Constrained Variational Framework", "abstract": "an", "year": 2016, "venue": "International Conference on Learning Representations", "authors": [ "I. Higgins", "L. Matthey", "Arka Pal", "Christopher P. Burgess", "Xavier Glorot", "M. Botvinick", "S. Mohamed", "Alexander Lerchner" ], "externalIds": { "MAG": "2753738274", "CorpusId": 46798026 }, "url": "https://www.semanticscholar.org/paper/a90226c41b79f8b06007609f39f82757073641e2", "referenceCount": 34, "citationCount": 4451, "influentialCitationCount": 549, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Functional Data Analysis", "abstract": "With the advance of modern technology, more and more data are being recorded continuously during a time interval or intermittently at several discrete time points. These are both examples of functional data, which has become a commonly encountered type of data. Functional data analysis (FDA) encompasses the statistical methodology for such data. Broadly interpreted, FDA deals with the analysis and theory of data that are in the form of functions. This paper provides an overview of FDA, starting with simple statistical notions such as mean and covariance functions, then covering some core techniques, the most popular of which is functional principal component analysis (FPCA). FPCA is an important dimension reduction tool, and in sparse data situations it can be used to impute functional data that are sparsely observed. Other dimension reduction approaches are also discussed. In addition, we review another core technique, functional linear regression, as well as clustering and classification of functional d...", "year": 2016, "venue": "", "authors": [ "Jane-ling Wang", "Jeng-Min Chiou", "H. Müller" ], "externalIds": { "MAG": "16794263", "DOI": "10.1146/ANNUREV-STATISTICS-041715-033624", "CorpusId": 13709250 }, "url": "https://www.semanticscholar.org/paper/b29e5be7987216cb1aaf46c6927ff216e048240a", "referenceCount": 209, "citationCount": 3864, "influentialCitationCount": 517, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Deep learning and the information bottleneck principle", "abstract": "Deep Neural Networks (DNNs) are analyzed via the theoretical framework of the information bottleneck (IB) principle. We first show that any DNN can be quantified by the mutual information between the layers and the input and output variables. Using this representation we can calculate the optimal information theoretic limits of the DNN and obtain finite sample generalization bounds. The advantage of getting closer to the theoretical limit is quantifiable both by the generalization bound and by the network's simplicity. We argue that both the optimal architecture, number of layers and features/connections at each layer, are related to the bifurcation points of the information bottleneck tradeoff, namely, relevant compression of the input layer with respect to the output layer. The hierarchical representations at the layered network naturally correspond to the structural phase transitions along the information curve. We believe that this new insight can lead to new optimality bounds and deep learning algorithms.", "year": 2015, "venue": "Information Theory Workshop", "authors": [ "Naftali Tishby", "Noga Zaslavsky" ], "externalIds": { "MAG": "2949385597", "ArXiv": "1503.02406", "DBLP": "conf/itw/TishbyZ15", "DOI": "10.1109/ITW.2015.7133169", "CorpusId": 5541663 }, "url": "https://www.semanticscholar.org/paper/415229903f91a1f3fc7404f5e5997fde025c221d", "referenceCount": 14, "citationCount": 1396, "influentialCitationCount": 100, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Big Data: New Tricks for Econometrics", "abstract": "Computers are now involved in many economic transactions and can capture data associated with these transactions, which can then be manipulated and analyzed. Conventional statistical and econometric techniques such as regression often work well, but there are issues unique to big datasets that may require different tools. First, the sheer size of the data involved may require more powerful data manipulation tools. Second, we may have more potential predictors than appropriate for estimation, so we need to do some kind of variable selection. Third, large datasets may allow for more flexible relationships than simple linear models. Machine learning techniques such as decision trees, support vector machines, neural nets, deep learning, and so on may allow for more effective ways to model complex relationships. In this essay, I will describe a few of these tools for manipulating and analyzing big data. I believe that these methods have a lot to offer and should be more widely known and used by economists.", "year": 2014, "venue": "", "authors": [ "H. Varian" ], "externalIds": { "MAG": "2155419203", "DOI": "10.1257/JEP.28.2.3", "CorpusId": 12146482 }, "url": "https://www.semanticscholar.org/paper/9b0dd87208a03e78105491e3727213b9b8ac0419", "referenceCount": 62, "citationCount": 1261, "influentialCitationCount": 73, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Exploiting compositionality to explore a large space of model structures", "abstract": "The recent proliferation of richly structured probabilistic models raises the question of how to automatically determine an appropriate model for a dataset. We investigate this question for a space of matrix decomposition models which can express a variety of widely used models from unsupervised learning. To enable model selection, we organize these models into a context-free grammar which generates a wide variety of structures through the compositional application of a few simple rules. We use our grammar to generically and efficiently infer latent components and estimate predictive likelihood for nearly 2500 structures using a small toolbox of reusable algorithms. Using a greedy search over our grammar, we automatically choose the decomposition structure from raw data by evaluating only a small fraction of all models. The proposed method typically finds the correct structure for synthetic data and backs off gracefully to simpler models under heavy noise. It learns sensible structures for datasets as diverse as image patches, motion capture, 20 Questions, and U.S. Senate votes, all using exactly the same code.", "year": 2012, "venue": "Conference on Uncertainty in Artificial Intelligence", "authors": [ "R. Grosse", "R. Salakhutdinov", "W. Freeman", "J. Tenenbaum" ], "externalIds": { "DBLP": "conf/uai/GrosseSFT12", "ArXiv": "1210.4856", "MAG": "1949894903", "CorpusId": 7197 }, "url": "https://www.semanticscholar.org/paper/11035f85ead2b4b386956f4480b3d38d2e9a5ff5", "referenceCount": 37, "citationCount": 105, "influentialCitationCount": 3, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Representation Learning: A Review and New Perspectives", "abstract": "The success of machine learning algorithms generally depends on data representation, and we hypothesize that this is because different representations can entangle and hide more or less the different explanatory factors of variation behind the data. Although specific domain knowledge can be used to help design representations, learning with generic priors can also be used, and the quest for AI is motivating the design of more powerful representation-learning algorithms implementing such priors. This paper reviews recent work in the area of unsupervised feature learning and deep learning, covering advances in probabilistic models, autoencoders, manifold learning, and deep networks. This motivates longer term unanswered questions about the appropriate objectives for learning good representations, for computing representations (i.e., inference), and the geometrical connections between representation learning, density estimation, and manifold learning.", "year": 2012, "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", "authors": [ "Yoshua Bengio", "Aaron C. Courville", "Pascal Vincent" ], "externalIds": { "ArXiv": "1206.5538", "MAG": "2952111767", "DBLP": "journals/pami/BengioCV13", "DOI": "10.1109/TPAMI.2013.50", "CorpusId": 393948, "PubMed": "23787338" }, "url": "https://www.semanticscholar.org/paper/184ac0766262312ba76bbdece4e7ffad0aa8180b", "referenceCount": 264, "citationCount": 11612, "influentialCitationCount": 550, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Medicine", "Mathematics" ] }, { "title": "The thermodynamics of prediction", "abstract": "A system responding to a stochastic driving signal can be interpreted as computing, by means of its dynamics, an implicit model of the environmental variables. The system's state retains information about past environmental fluctuations, and a fraction of this information is predictive of future ones. The remaining nonpredictive information reflects model complexity that does not improve predictive power, and thus represents the ineffectiveness of the model. We expose the fundamental equivalence between this model inefficiency and thermodynamic inefficiency, measured by dissipation. Our results hold arbitrarily far from thermodynamic equilibrium and are applicable to a wide range of systems, including biomolecular machines. They highlight a profound connection between the effective use of information and efficient thermodynamic operation: any system constructed to keep memory about its environment and to operate with maximal energetic efficiency has to be predictive.", "year": 2012, "venue": "Physical Review Letters", "authors": [ "Susanne Still", "David A. Sivak", "A. J. Bell", "G. Crooks" ], "externalIds": { "MAG": "2963121067", "DBLP": "journals/corr/abs-1203-3271", "ArXiv": "1203.3271", "DOI": "10.1103/PhysRevLett.109.120604", "CorpusId": 7039367, "PubMed": "23005932" }, "url": "https://www.semanticscholar.org/paper/073dc236ad2712732891fc7fe847058c9d5f9bcf", "referenceCount": 56, "citationCount": 223, "influentialCitationCount": 12, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Physics", "Mathematics", "Biology", "Medicine" ] }, { "title": "Detecting Novel Associations in Large Data Sets", "abstract": "A statistical method reveals relationships among variables in complex data sets. Identifying interesting relationships between pairs of variables in large data sets is increasingly important. Here, we present a measure of dependence for two-variable relationships: the maximal information coefficient (MIC). MIC captures a wide range of associations both functional and not, and for functional relationships provides a score that roughly equals the coefficient of determination (R2) of the data relative to the regression function. MIC belongs to a larger class of maximal information-based nonparametric exploration (MINE) statistics for identifying and classifying relationships. We apply MIC and MINE to data sets in global health, gene expression, major-league baseball, and the human gut microbiota and identify known and novel relationships.", "year": 2011, "venue": "Science", "authors": [ "David N. Reshef", "Yakir A. Reshef", "H. Finucane", "S. Grossman", "G. McVean", "P. Turnbaugh", "E. Lander", "M. Mitzenmacher", "Pardis C Sabeti" ], "externalIds": { "MAG": "2165700458", "DOI": "10.1126/science.1205438", "CorpusId": 1437878, "PubMed": "22174245" }, "url": "https://www.semanticscholar.org/paper/e7b5f2a2f23e0d32972705b5ec857599253601bc", "referenceCount": 74, "citationCount": 2609, "influentialCitationCount": 304, "isOpenAccess": true, "fieldsOfStudy": [ "Medicine", "Computer Science", "Biology" ] }, { "title": "Group Invariant Scattering", "abstract": "This paper constructs translation‐invariant operators on $\\font\\open=msbm10 at 10pt\\def\\R{\\hbox{\\open R}}{\\bf L}^2({{{\\R}}}^d)$, which are Lipschitz‐continuous to the action of diffeomorphisms. A scattering propagator is a path‐ordered product of nonlinear and noncommuting operators, each of which computes the modulus of a wavelet transform. A local integration defines a windowed scattering transform, which is proved to be Lipschitz‐continuous to the action of C2 diffeomorphisms. As the window size increases, it converges to a wavelet scattering transform that is translation invariant. Scattering coefficients also provide representations of stationary processes. Expected values depend upon high‐order moments and can discriminate processes having the same power spectrum. Scattering operators are extended on L2(G), where G is a compact Lie group, and are invariant under the action of G. Combining a scattering on $\\font\\open=msbm10 at 10pt\\def\\R{\\hbox{\\open R}}{\\bf L}^2({{{\\R}}}^d)$ and on L2(SO(d)) defines a translation‐ and rotation‐invariant scattering on $\\font\\open=msbm10 at 10pt\\def\\R{\\hbox{\\open R}}{\\bf L}^2({{{\\R}}}^d)$. © 2012 Wiley Periodicals, Inc.", "year": 2011, "venue": "arXiv.org", "authors": [ "S. Mallat" ], "externalIds": { "MAG": "2951967965", "ArXiv": "1101.2286", "DBLP": "journals/corr/abs-1101-2286", "DOI": "10.1002/CPA.21413", "CorpusId": 5851469 }, "url": "https://www.semanticscholar.org/paper/53aea2e87e124eaa587ef20dae55094d868fd57e", "referenceCount": 37, "citationCount": 934, "influentialCitationCount": 145, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "A Survey on Transfer Learning", "abstract": "A major assumption in many machine learning and data mining algorithms is that the training and future data must be in the same feature space and have the same distribution. However, in many real-world applications, this assumption may not hold. For example, we sometimes have a classification task in one domain of interest, but we only have sufficient training data in another domain of interest, where the latter data may be in a different feature space or follow a different data distribution. In such cases, knowledge transfer, if done successfully, would greatly improve the performance of learning by avoiding much expensive data-labeling efforts. In recent years, transfer learning has emerged as a new learning framework to address this problem. This survey focuses on categorizing and reviewing the current progress on transfer learning for classification, regression, and clustering problems. In this survey, we discuss the relationship between transfer learning and other related machine learning techniques such as domain adaptation, multitask learning and sample selection bias, as well as covariate shift. We also explore some potential future issues in transfer learning research.", "year": 2010, "venue": "IEEE Transactions on Knowledge and Data Engineering", "authors": [ "Sinno Jialin Pan", "Qiang Yang" ], "externalIds": { "DBLP": "journals/tkde/PanY10", "MAG": "2165698076", "DOI": "10.1109/TKDE.2009.191", "CorpusId": 740063 }, "url": "https://www.semanticscholar.org/paper/a25fbcbbae1e8f79c4360d26aa11a3abf1a11972", "referenceCount": 98, "citationCount": 19529, "influentialCitationCount": 935, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Nonparametric Information Theoretic Clustering Algorithm", "abstract": "In this paper we propose a novel clustering algorithm based on maximizing the mutual information between data points and clusters. Unlike previous methods, we neither assume the data are given in terms of distributions nor impose any parametric model on the within-cluster distribution. Instead, we utilize a non-parametric estimation of the average cluster entropies and search for a clustering that maximizes the estimated mutual information between data points and clusters. The improved performance of the proposed algorithm is demonstrated on several standard datasets.", "year": 2010, "venue": "International Conference on Machine Learning", "authors": [ "Lev Faivishevsky", "J. Goldberger" ], "externalIds": { "MAG": "2171108520", "DBLP": "conf/icml/FaivishevskyG10", "CorpusId": 15578983 }, "url": "https://www.semanticscholar.org/paper/0ec7735becb0bfb952a73c46fe80fdb241f509ab", "referenceCount": 23, "citationCount": 66, "influentialCitationCount": 9, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Distilling Free-Form Natural Laws from Experimental Data", "abstract": "For centuries, scientists have attempted to identify and document analytical laws that underlie physical phenomena in nature. Despite the prevalence of computing power, the process of finding natural laws and their corresponding equations has resisted automation. A key challenge to finding analytic relations automatically is defining algorithmically what makes a correlation in observed data important and insightful. We propose a principle for the identification of nontriviality. We demonstrated this approach by automatically searching motion-tracking data captured from various physical systems, ranging from simple harmonic oscillators to chaotic double-pendula. Without any prior knowledge about physics, kinematics, or geometry, the algorithm discovered Hamiltonians, Lagrangians, and other laws of geometric and momentum conservation. The discovery rate accelerated as laws found for simpler systems were used to bootstrap explanations for more complex systems, gradually uncovering the “alphabet” used to describe those systems.", "year": 2009, "venue": "Science", "authors": [ "Michael D. Schmidt", "Hod Lipson" ], "externalIds": { "MAG": "1979769287", "DOI": "10.1126/science.1165893", "CorpusId": 7366016, "PubMed": "19342586" }, "url": "https://www.semanticscholar.org/paper/164157672985407454f5edfd92a1da287445445d", "referenceCount": 31, "citationCount": 2445, "influentialCitationCount": 117, "isOpenAccess": false, "fieldsOfStudy": [ "Medicine", "Physics" ] }, { "title": "Topology and data", "abstract": "An important feature of modern science and engineering is that data of various kinds is being produced at an unprecedented rate. This is so in part because of new experimental methods, and in part because of the increase in the availability of high powered computing technology. It is also clear that the nature of the data we are obtaining is significantly different. For example, it is now often the case that we are given data in the form of very long vectors, where all but a few of the coordinates turn out to be irrelevant to the questions of interest, and further that we don’t necessarily know which coordinates are the interesting ones. A related fact is that the data is often very high-dimensional, which severely restricts our ability to visualize it. The data obtained is also often much noisier than in the past and has more missing information (missing data). This is particularly so in the case of biological data, particularly high throughput data from microarray or other sources. Our ability to analyze this data, both in terms of quantity and the nature of the data, is clearly not keeping pace with the data being produced. In this paper, we will discuss how geometry and topology can be applied to make useful contributions to the analysis of various kinds of data. Geometry and topology are very natural tools to apply in this direction, since geometry can be regarded as the study of distance functions, and what one often works with are distance functions on large finite sets of data. The mathematical formalism which has been developed for incorporating geometric and topological techniques deals with point clouds, i.e. finite sets of points equipped with a distance function. It then adapts tools from the various branches of geometry to the study of point clouds. The point clouds are intended to be thought of as finite samples taken from a geometric object, perhaps with noise. Here are some of the key points which come up when applying these geometric methods to data analysis. • Qualitative information is needed: One important goal of data analysis is to allow the user to obtain knowledge about the data, i.e. to understand how it is organized on a large scale. For example, if we imagine that we are looking at a data set constructed somehow from diabetes patients, it would be important to develop the understanding that there are two types of the disease, namely the juvenile and adult onset forms. Once that is established, one of course wants to develop quantitative methods for distinguishing them, but the first insight about the distinct forms of the disease is key.", "year": 2009, "venue": "", "authors": [ "G. Carlsson" ], "externalIds": { "MAG": "1991566301", "DOI": "10.1090/S0273-0979-09-01249-X", "CorpusId": 1472609 }, "url": "https://www.semanticscholar.org/paper/a4b603ca6aaaa18968e08ac1b0ee093db8a99a6b", "referenceCount": 79, "citationCount": 2154, "influentialCitationCount": 188, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Pattern Recognition and Machine Learning", "abstract": null, "year": 2006, "venue": "Technometrics", "authors": [ "Radford M. Neal" ], "externalIds": { "MAG": "1663973292", "DBLP": "journals/jei/BishopN07", "DOI": "10.1007/978-0-387-45528-0", "CorpusId": 31993898 }, "url": "https://www.semanticscholar.org/paper/668b1277fbece28c4841eeab1c97e4ebd0079700", "referenceCount": 361, "citationCount": 36225, "influentialCitationCount": 4245, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Distinctive Image Features from Scale-Invariant Keypoints", "abstract": null, "year": 2004, "venue": "International Journal of Computer Vision", "authors": [ "D. Lowe" ], "externalIds": { "MAG": "2151103935", "DBLP": "journals/ijcv/Lowe04", "DOI": "10.1023/B:VISI.0000029664.99615.94", "CorpusId": 174065 }, "url": "https://www.semanticscholar.org/paper/8c04f169203f9e55056a6f7f956695babe622a38", "referenceCount": 50, "citationCount": 45881, "influentialCitationCount": 6596, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "The Elements of Statistical Learning: Data Mining, Inference, and Prediction", "abstract": "In the words of the authors, the goal of this book was to “bring together many of the important new ideas in learning, and explain them in a statistical framework.” The authors have been quite successful in achieving this objective, and their work is a welcome addition to the statistics and learning literatures. Statistics has always been interdisciplinary, borrowing ideas from diverse Ž elds and repaying the debt with contributions, both theoretical and practical, to the other intellectual disciplines. For statistical learning, this cross-fertilization is especially noticeable. This book is a valuable resource, both for the statistician needing an introduction to machine learning and related Ž elds and for the computer scientist wishing to learn more about statistics. Statisticians will especially appreciate that it is written in their own language. The level of the book is roughly that of a second-year doctoral student in statistics, and it will be useful as a textbook for such students. In a stimulating article, Breiman (2001) argued that statistics has been focused too much on a “data modeling culture,” where the model is paramount. Breiman argued instead for an “algorithmic modeling culture,” with emphasis on black-box types of prediction. Breiman’s article is controversial, and in his discussion, Efron objects that “prediction is certainly an interesting subject, but Leo’s paper overstates both its role and our profession’s lack of interest in it.” Although I mostly agree with Efron, I worry that the courses offered by most statistics departments include little, if any, treatment of statistical learning and prediction. (Stanford, where Efron and the authors of this book teach, is an exception.) Graduate students in statistics certainly need to know more than they do now about prediction, machine learning, statistical learning, and data mining (not disjoint subjects). I hope that graduate courses covering the topics of this book will become more common in statistics curricula. Most of the book is focused on supervised learning, where one has inputs and outputs from some system and wishes to predict unknown outputs corresponding to known inputs. The methods discussed for supervised learning include linear and logistic regression; basis expansion, such as splines and wavelets; kernel techniques, such as local regression, local likelihood, and radial basis functions; neural networks; additive models; decision trees based on recursive partitioning, such as CART; and support vector machines. There is a Ž nal chapter on unsupervised learning, including association rules, cluster analysis, self-organizing maps, principal components and curves, and independent component analysis. Many statisticians will be unfamiliar with at least some of these algorithms. Association rules are popular for mining commercial data in what is called “market basket analysis.” The aim is to discover types of products often purchased together. Such knowledge can be used to develop marketing strategies, such as store or catalog layouts. Self-organizing maps (SOMs) involve essentially constrained k-means clustering, where prototypes are mapped to a two-dimensional curved coordinate system. Independent components analysis is similar to principal components analysis and factor analysis, but it uses higher-order moments to achieve independence, not merely zero correlation between components. A strength of the book is the attempt to organize a plethora of methods into a coherent whole. The relationships among the methods are emphasized. I know of no other book that covers so much ground. Of course, with such broad coverage, it is not possible to cover any single topic in great depth, so this book will encourage further reading. Fortunately, each chapter includes bibliographic notes surveying the recent literature. These notes and the extensive references provide a good introduction to the learning literature, including much outside of statistics. The book might be more suitable as a textbook if less material were covered in greater depth; however, such a change would compromise the book’s usefulness as a reference, and so I am happier with the book as it was written.", "year": 2004, "venue": "", "authors": [ "D. Ruppert" ], "externalIds": { "MAG": "1984514442", "DOI": "10.1198/jasa.2004.s339", "CorpusId": 118901444 }, "url": "https://www.semanticscholar.org/paper/fa25610fb8586c2b50a3654edc5bb42fa7fc4729", "referenceCount": 1, "citationCount": 18958, "influentialCitationCount": 2100, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Feature selection based on mutual information criteria of max-dependency, max-relevance, and min-redundancy", "abstract": "Feature selection is an important problem for pattern classification systems. We study how to select good features according to the maximal statistical dependency criterion based on mutual information. Because of the difficulty in directly implementing the maximal dependency condition, we first derive an equivalent form, called minimal-redundancy-maximal-relevance criterion (mRMR), for first-order incremental feature selection. Then, we present a two-stage feature selection algorithm by combining mRMR and other more sophisticated feature selectors (e.g., wrappers). This allows us to select a compact set of superior features at very low cost. We perform extensive experimental comparison of our algorithm and other methods using three different classifiers (naive Bayes, support vector machine, and linear discriminate analysis) and four different data sets (handwritten digits, arrhythmia, NCI cancer cell lines, and lymphoma tissues). The results confirm that mRMR leads to promising improvement on feature selection and classification accuracy.", "year": 2003, "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence", "authors": [ "Hanchuan Peng", "Fuhui Long", "C. Ding" ], "externalIds": { "MAG": "2154053567", "DBLP": "journals/pami/PengLD05", "DOI": "10.1109/TPAMI.2005.159", "CorpusId": 206764015, "PubMed": "16119262" }, "url": "https://www.semanticscholar.org/paper/ba969a4f3bc5f5a84f6025478e566c40661d85f3", "referenceCount": 39, "citationCount": 8760, "influentialCitationCount": 509, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Medicine" ] }, { "title": "Estimation of Entropy and Mutual Information", "abstract": "We present some new results on the nonparametric estimation of entropy and mutual information. First, we use an exact local expansion of the entropy function to prove almost sure consistency and central limit theorems for three of the most commonly used discretized information estimators. The setup is related to Grenander's method of sieves and places no assumptions on the underlying probability measure generating the data. Second, we prove a converse to these consistency theorems, demonstrating that a misapplication of the most common estimation techniques leads to an arbitrarily poor estimate of the true information, even given unlimited data. This inconsistency theorem leads to an analytical approximation of the bias, valid in surprisingly small sample regimes and more accurate than the usual formula of Miller and Madow over a large region of parameter space. The two most practical implications of these results are negative: (1) information estimates in a certain data regime are likely contaminated by bias, even if bias-corrected estimators are used, and (2) confidence intervals calculated by standard techniques drastically underestimate the error of the most common estimation methods. Finally, we note a very useful connection between the bias of entropy estimators and a certain polynomial approximation problem. By casting bias calculation problems in this approximation theory framework, we obtain the best possible generalization of known asymptotic bias results. More interesting, this framework leads to an estimator with some nice properties: the estimator comes equipped with rigorous bounds on the maximum error over all possible underlying probability distributions, and this maximum error turns out to be surprisingly small. We demonstrate the application of this new estimator on both real and simulated data.", "year": 2003, "venue": "Neural Computation", "authors": [ "L. Paninski" ], "externalIds": { "MAG": "347952836", "DBLP": "journals/neco/Paninski03", "DOI": "10.1162/089976603321780272", "CorpusId": 2034914 }, "url": "https://www.semanticscholar.org/paper/8277d67853a3b4dd3060a7aab1ae1c4af6c81211", "referenceCount": 67, "citationCount": 1461, "influentialCitationCount": 96, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science", "Chemistry", "Mathematics" ] }, { "title": "Feature Extraction by Non-Parametric Mutual Information Maximization", "abstract": "We present a method for learning discriminative feature transforms using as criterion the mutual information between class labels and transformed features. Instead of a commonly used mutual information measure based on Kullback-Leibler divergence, we use a quadratic divergence measure, which allows us to make an efficient non-parametric implementation and requires no prior assumptions about class densities. In addition to linear transforms, we also discuss nonlinear transforms that are implemented as radial basis function networks. Extensions to reduce the computational complexity are also presented, and a comparison to greedy feature selection is made.", "year": 2003, "venue": "Journal of machine learning research", "authors": [ "K. Torkkola" ], "externalIds": { "DBLP": "journals/jmlr/Torkkola03", "MAG": "2116801843", "CorpusId": 3181596 }, "url": "https://www.semanticscholar.org/paper/56ed0d2509339b6a107bfc4c60a8ddf3089de3a6", "referenceCount": 41, "citationCount": 700, "influentialCitationCount": 54, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "The information bottleneck method", "abstract": "We define the relevant information in a signal $x\\in X$ as being the information that this signal provides about another signal $y\\in \\Y$. Examples include the information that face images provide about the names of the people portrayed, or the information that speech sounds provide about the words spoken. Understanding the signal $x$ requires more than just predicting $y$, it also requires specifying which features of $\\X$ play a role in the prediction. We formalize this problem as that of finding a short code for $\\X$ that preserves the maximum information about $\\Y$. That is, we squeeze the information that $\\X$ provides about $\\Y$ through a `bottleneck' formed by a limited set of codewords $\\tX$. This constrained optimization problem can be seen as a generalization of rate distortion theory in which the distortion measure $d(x,\\x)$ emerges from the joint statistics of $\\X$ and $\\Y$. This approach yields an exact set of self consistent equations for the coding rules $X \\to \\tX$ and $\\tX \\to \\Y$. Solutions to these equations can be found by a convergent re-estimation method that generalizes the Blahut-Arimoto algorithm. Our variational principle provides a surprisingly rich framework for discussing a variety of problems in signal processing and learning, as will be described in detail elsewhere.", "year": 2000, "venue": "arXiv.org", "authors": [ "Naftali Tishby", "Fernando C Pereira", "W. Bialek" ], "externalIds": { "MAG": "1686946872", "ArXiv": "physics/0004057", "DBLP": "journals/corr/physics-0004057", "CorpusId": 8936496 }, "url": "https://www.semanticscholar.org/paper/4ef483f819e11873822416042a4b6dc4652e010c", "referenceCount": 8, "citationCount": 3318, "influentialCitationCount": 358, "isOpenAccess": false, "fieldsOfStudy": [ "Physics", "Computer Science", "Mathematics" ] }, { "title": "A Model of Inductive Bias Learning", "abstract": "A major problem in machine learning is that of inductive bias: how to choose a learner's hypothesis space so that it is large enough to contain a solution to the problem being learnt, yet small enough to ensure reliable generalization from reasonably-sized training sets. Typically such bias is supplied by hand through the skill and insights of experts. In this paper a model for automatically learning bias is investigated. The central assumption of the model is that the learner is embedded within an environment of related learning tasks. Within such an environment the learner can sample from multiple tasks, and hence it can search for a hypothesis space that contains good solutions to many of the problems in the environment. Under certain restrictions on the set of all hypothesis spaces available to the learner, we show that a hypothesis space that performs well on a sufficiently large number of training tasks will also perform well when learning novel tasks in the same environment. Explicit bounds are also derived demonstrating that learning multiple tasks within an environment of related tasks can potentially give much better generalization than learning a single task.", "year": 2000, "venue": "Journal of Artificial Intelligence Research", "authors": [ "Jonathan Baxter" ], "externalIds": { "MAG": "2162888803", "DBLP": "journals/corr/abs-1106-0245", "ArXiv": "1106.0245", "DOI": "10.1613/jair.731", "CorpusId": 9803204 }, "url": "https://www.semanticscholar.org/paper/727e1e16ede6eaad241bad11c525da07b154c688", "referenceCount": 53, "citationCount": 1164, "influentialCitationCount": 111, "isOpenAccess": true, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Evolving artificial neural networks", "abstract": "Learning and evolution are two fundamental forms of adaptation. There has been a great interest in combining learning and evolution with artificial neural networks (ANNs) in recent years. This paper: 1) reviews different combinations between ANNs and evolutionary algorithms (EAs), including using EAs to evolve ANN connection weights, architectures, learning rules, and input features; 2) discusses different search operators which have been used in various EAs; and 3) points out possible future research directions. It is shown, through a considerably large literature review, that combinations between ANNs and EAs can lead to significantly better intelligent systems than relying on ANNs or EAs alone.", "year": 1999, "venue": "Proceedings of the IEEE", "authors": [ "X. Yao" ], "externalIds": { "MAG": "2124290836", "DBLP": "journals/pieee/Yao99", "DOI": "10.1109/5.784219", "CorpusId": 61519731 }, "url": "https://www.semanticscholar.org/paper/5ae89454a8a41015ebe0eeab9f51fe4eee459a25", "referenceCount": 316, "citationCount": 2064, "influentialCitationCount": 106, "isOpenAccess": true, "fieldsOfStudy": [ "Engineering", "Computer Science" ] }, { "title": "Locally Weighted Regression: An Approach to Regression Analysis by Local Fitting", "abstract": "Abstract Locally weighted regression, or loess, is a way of estimating a regression surface through a multivariate smoothing procedure, fitting a function of the independent variables locally and in a moving fashion analogous to how a moving average is computed for a time series. With local fitting we can estimate a much wider class of regression surfaces than with the usual classes of parametric functions, such as polynomials. The goal of this article is to show, through applications, how loess can be used for three purposes: data exploration, diagnostic checking of parametric models, and providing a nonparametric regression surface. Along the way, the following methodology is introduced: (a) a multivariate smoothing procedure that is an extension of univariate locally weighted regression; (b) statistical procedures that are analogous to those used in the least-squares fitting of parametric functions; (c) several graphical methods that are useful tools for understanding loess estimates and checking the a...", "year": 1988, "venue": "", "authors": [ "W. Cleveland", "S. J. Devlin" ], "externalIds": { "MAG": "2017977879", "DOI": "10.1080/01621459.1988.10478639", "CorpusId": 14960635 }, "url": "https://www.semanticscholar.org/paper/b81e61b920b986ff1495af426aad7437c9011d85", "referenceCount": 47, "citationCount": 5415, "influentialCitationCount": 342, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Self-organization in a perceptual network", "abstract": "The emergence of a feature-analyzing function from the development rules of simple, multilayered networks is explored. It is shown that even a single developing cell of a layered network exhibits a remarkable set of optimization properties that are closely related to issues in statistics, theoretical physics, adaptive signal processing, the formation of knowledge representation in artificial intelligence, and information theory. The network studied is based on the visual system. These results are used to infer an information-theoretic principle that can be applied to the network as a whole, rather than a single cell. The organizing principle proposed is that the network connections develop in such a way as to maximize the amount of information that is preserved when signals are transformed at each processing stage, subject to certain constraints. The operation of this principle is illustrated for some simple cases.<>", "year": 1988, "venue": "Computer", "authors": [ "R. Linsker" ], "externalIds": { "MAG": "2122925692", "DBLP": "journals/computer/Linsker88", "DOI": "10.1109/2.36", "CorpusId": 1527671 }, "url": "https://www.semanticscholar.org/paper/16d70e8af45ca0ae2c1bb73f3be6628518d40b8f", "referenceCount": 16, "citationCount": 1559, "influentialCitationCount": 72, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Probabilistic Symmetries and Invariant Neural Networks", "abstract": "Treating neural network inputs and outputs as random variables, we characterize the structure of neural networks that can be used to model data that are invariant or equivariant under the action of a compact group. Much recent research has been devoted to encoding invariance under symmetry transformations into neural network architectures, in an effort to improve the performance of deep neural networks in data-scarce, non-i.i.d., or unsupervised settings. By considering group invariance from the perspective of probabilistic symmetry, we establish a link between functional and probabilistic symmetry, and obtain generative functional representations of probability distributions that are invariant or equivariant under the action of a compact group. Our representations completely characterize the structure of neural networks that can be used to model such distributions and yield a general program for constructing invariant stochastic or deterministic neural networks. We demonstrate that examples from the recent literature are special cases, and develop the details of the general program for exchangeable sequences and arrays.", "year": 2020, "venue": "Journal of machine learning research", "authors": [ "Benjamin Bloem-Reddy", "Y. Teh" ], "externalIds": { "DBLP": "journals/jmlr/Bloem-ReddyT20", "MAG": "3038350744", "CorpusId": 58028957 }, "url": "https://www.semanticscholar.org/paper/37dca01f8cc191505e435cc8f019ff413bee761d", "referenceCount": 128, "citationCount": 143, "influentialCitationCount": 15, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Neural architecture search: A survey", "abstract": null, "year": 2019, "venue": "Journal of Machine Learning Research", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Automated Machine Learning: Meth-ods, Systems, Challenges", "abstract": null, "year": 2019, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "UCI Machine Learning Repository", "abstract": null, "year": 2019, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Deep Variational Information Bottleneck", "abstract": "We present a variational approximation to the information bottleneck of Tishby et al. (1999). This variational approach allows us to parameterize the information bottleneck model using a neural network and leverage the reparameterization trick for efficient training. We call this method \"Deep Variational Information Bottleneck\", or Deep VIB. We show that models trained with the VIB objective outperform those that are trained with other forms of regularization, in terms of generalization performance and robustness to adversarial attack.", "year": 2017, "venue": "International Conference on Learning Representations", "authors": [ "Alexander A. Alemi", "Ian Fischer", "Joshua V. Dillon" ], "externalIds": { "MAG": "2964160479", "DBLP": "conf/iclr/AlemiFD017", "ArXiv": "1612.00410", "CorpusId": 204922497 }, "url": "https://www.semanticscholar.org/paper/a181fb5a42ad8fe2cc27b5542fa40384e9a8d72c", "referenceCount": 49, "citationCount": 1487, "influentialCitationCount": 278, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Principal Component Analysis", "abstract": null, "year": 2017, "venue": "Encyclopedia of Machine Learning and Data Mining", "authors": [ "Y. Bing" ], "externalIds": { "DOI": "10.1007/978-1-4899-7687-1_665", "CorpusId": 4169234 }, "url": "https://www.semanticscholar.org/paper/2b77dafd65079d18d0c1a618974dec5debd840ca", "referenceCount": 0, "citationCount": 330, "influentialCitationCount": 35, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Deep Learning", "abstract": null, "year": 2016, "venue": "International Journal of Semantic Computing", "authors": [ "Xing Hao", "Guigang Zhang", "Shang Ma" ], "externalIds": { "DBLP": "journals/escri/HaoZ17", "DOI": "10.1142/S1793351X16500045", "CorpusId": 1779661 }, "url": "https://www.semanticscholar.org/paper/4f8d648c52edf74e41b0996128aa536e13cc7e82", "referenceCount": 2, "citationCount": 50533, "influentialCitationCount": 2842, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Conditional Likelihood Maximisation: A Unifying Framework for Information Theoretic Feature Selection", "abstract": "We present a unifying framework for information theoretic feature selection, bringing almost two decades of research on heuristic filter criteria under a single theoretical interpretation. This is in response to the question: \"what are the implicit statistical assumptions of feature selection criteria based on mutual information?\". To answer this, we adopt a different strategy than is usual in the feature selection literature--instead of trying to define a criterion, we derive one, directly from a clearly specified objective function: the conditional likelihood of the training labels. While many hand-designed heuristic criteria try to optimize a definition of feature 'relevancy' and 'redundancy', our approach leads to a probabilistic framework which naturally incorporates these concepts. As a result we can unify the numerous criteria published over the last two decades, and show them to be low-order approximations to the exact (but intractable) optimisation problem. The primary contribution is to show that common heuristics for information based feature selection (including Markov Blanket algorithms as a special case) are approximate iterative maximisers of the conditional likelihood. A large empirical study provides strong evidence to favour certain classes of criteria, in particular those that balance the relative size of the relevancy/redundancy terms. Overall we conclude that the JMI criterion (Yang and Moody, 1999; Meyer et al., 2008) provides the best tradeoff in terms of accuracy, stability, and flexibility with small data samples.", "year": 2012, "venue": "Journal of machine learning research", "authors": [ "Gavin Brown", "Adam Craig Pocock", "Ming-Jie Zhao", "M. Luján" ], "externalIds": { "DBLP": "journals/jmlr/BrownPZL12", "MAG": "2156504490", "DOI": "10.5555/2503308.2188387", "CorpusId": 6621217 }, "url": "https://www.semanticscholar.org/paper/2bf64ef156ad46ea94c8b84bd106bc07ad11f7d6", "referenceCount": 45, "citationCount": 1137, "influentialCitationCount": 124, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science", "Mathematics" ] }, { "title": "Machine learning - a probabilistic perspective", "abstract": "All rights reserved. No part of this book may be reproduced in any form by any electronic or mechanical means (including photocopying, recording, or information storage and retrieval) without permission in writing from the publisher. Machine learning : a probabilistic perspective / Kevin P. Murphy. p. cm. — (Adaptive computation and machine learning series) Includes bibliographical references and index. Contents Preface xxvii 1 Introduction 1 1.1 Machine learning: what and why? 1 1.1.1 Types of machine learning 2 1.2 Supervised learning 3 1.2.1 Classification 3 1.2.2 Regression 8 1.3 Unsupervised learning 9 1.3.1 Discovering clusters 10 1.3.2 Discovering latent factors 11 1.3.3 Discovering graph structure 13 1.3.4 Matrix completion 14 1.4 Some basic concepts in machine learning 16 1.4.1 Parametric vs non-parametric models 16 1.4.2 A simple non-parametric classifier: K-nearest neighbors 16 1.4.3 The curse of dimensionality 18 1.4.4 Parametric models for classification and regression 19 1.4.5", "year": 2012, "venue": "Adaptive computation and machine learning series", "authors": [ "Kevin P. Murphy" ], "externalIds": { "DBLP": "books/lib/Murphy12", "CorpusId": 17793133 }, "url": "https://www.semanticscholar.org/paper/360ca02e6f5a5e1af3dce4866a257aafc2d6d6f5", "referenceCount": 0, "citationCount": 9092, "influentialCitationCount": 1130, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "A Mathematical Theory of Communication", "abstract": "This paper opened the new area the information theory. Before this paper, most people believed that the only way to make the error probability of transmission as small as desired is to reduce the data rate (such as a long repetition scheme). However, surprisingly this paper revealed that it does not need to reduce the data rate for achieving that much of small errors. It proved that we can get some positive data rate that has the same small error probability and also there is an upper bound of the data rate, which means we cannot achieve the data rate with any encoding scheme that has small enough error probability over the upper bound.", "year": 2006, "venue": "", "authors": [ "Jin Woo Shin", "Sang Joon Kim" ], "externalIds": { "CorpusId": 5747983 }, "url": "https://www.semanticscholar.org/paper/6d12a1d23b21a9b170118a56386552bc5d4727de", "referenceCount": 0, "citationCount": 62860, "influentialCitationCount": 5070, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "Cover and", "abstract": null, "year": 2006, "venue": "Elements of Information Theory", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Induction of decision trees", "abstract": null, "year": 2004, "venue": "Machine-mediated learning", "authors": [ "J. R. Quinlan" ], "externalIds": { "MAG": "2466432605", "DOI": "10.1007/BF00116251", "CorpusId": 13252401 }, "url": "https://www.semanticscholar.org/paper/6ccb34bf2122304af5cbecf54402ee3d970e43f2", "referenceCount": 36, "citationCount": 15592, "influentialCitationCount": 1146, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Cluster Ensembles --- A Knowledge Reuse Framework for Combining Multiple Partitions", "abstract": "This paper introduces the problem of combining multiple partitionings of a set of objects into a single consolidated clustering without accessing the features or algorithms that determined these partitionings. We first identify several application scenarios for the resultant ‘knowledge reuse’ framework that we call cluster ensembles . The cluster ensemble problem is then formalized as a combinatorial optimization problem in terms of shared mutual information. In addition to a direct maximization approach, we propose three effective and efficient techniques for obtaining high-quality combiners (consensus functions). The first combiner induces a similarity measure from the partitionings and then reclusters the objects. The second combiner is based on hypergraph partitioning. The third one collapses groups of clusters into meta-clusters which then compete for each object to determine the combined clustering. Due to the low computational costs of our techniques, it is quite feasible to use a supra-consensus function that evaluates all three approaches against the objective function and picks the best solution for a given situation. We evaluate the effectiveness of cluster ensembles in three qualitatively different application scenarios: (i) where the original clusters were formed based on non-identical sets of features, (ii) where the original clustering algorithms worked on non-identical sets of objects, and (iii) where a common data-set is used and the main purpose of combining multiple clusterings is to improve the quality and robustness of the solution. Promising results are obtained in all three situations for synthetic as well as real data-sets.", "year": 2002, "venue": "Journal of machine learning research", "authors": [ "Alexander Strehl", "Joydeep Ghosh" ], "externalIds": { "DBLP": "journals/jmlr/StrehlG02", "CorpusId": 3068944 }, "url": "https://www.semanticscholar.org/paper/b8c282f76923d89e00dcd17ec425d496ade6ddc7", "referenceCount": 64, "citationCount": 4539, "influentialCitationCount": 769, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] } ] }, "AutoIRT: Calibrating Item Response Theory Models with Automated Machine Learning": { "paper_title": "AutoIRT: Calibrating Item Response Theory Models with Automated Machine Learning", "arxiv_id": "2409.08823v1", "keyword": "automated machine learning", "authors": [ "James Sharpnack", "Phoebe Mulcaire", "Klinton Bicknell", "Geoff LaFlair", "Kevin Yancey" ], "references": [ { "title": "Digital-first assessments: A security framework", "abstract": null, "year": 2022, "venue": "Journal of Computer Assisted Learning", "authors": [ "Geoffrey T. LaFlair", "Thomas E. Langenfeld", "Basim Baig", "Andre Kenji Horie", "Yigal Attali", "A. A. Davier" ], "externalIds": { "DBLP": "journals/jcal/LaFlairLBHAD22", "DOI": "10.1111/jcal.12665", "CorpusId": 247883674 }, "url": "https://www.semanticscholar.org/paper/f01f5ddc0d68081cfc3ad34efa07658fa9895e16", "referenceCount": 23, "citationCount": 12, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Learning Transferable Visual Models From Natural Language Supervision", "abstract": "State-of-the-art computer vision systems are trained to predict a fixed set of predetermined object categories. This restricted form of supervision limits their generality and usability since additional labeled data is needed to specify any other visual concept. Learning directly from raw text about images is a promising alternative which leverages a much broader source of supervision. We demonstrate that the simple pre-training task of predicting which caption goes with which image is an efficient and scalable way to learn SOTA image representations from scratch on a dataset of 400 million (image, text) pairs collected from the internet. After pre-training, natural language is used to reference learned visual concepts (or describe new ones) enabling zero-shot transfer of the model to downstream tasks. We study the performance of this approach by benchmarking on over 30 different existing computer vision datasets, spanning tasks such as OCR, action recognition in videos, geo-localization, and many types of fine-grained object classification. The model transfers non-trivially to most tasks and is often competitive with a fully supervised baseline without the need for any dataset specific training. For instance, we match the accuracy of the original ResNet-50 on ImageNet zero-shot without needing to use any of the 1.28 million training examples it was trained on. We release our code and pre-trained model weights at https://github.com/OpenAI/CLIP.", "year": 2021, "venue": "International Conference on Machine Learning", "authors": [ "Alec Radford", "Jong Wook Kim", "Chris Hallacy", "A. Ramesh", "Gabriel Goh", "Sandhini Agarwal", "Girish Sastry", "Amanda Askell", "Pamela Mishkin", "Jack Clark", "Gretchen Krueger", "I. Sutskever" ], "externalIds": { "ArXiv": "2103.00020", "DBLP": "conf/icml/RadfordKHRGASAM21", "CorpusId": 231591445 }, "url": "https://www.semanticscholar.org/paper/6f870f7f02a8c59c3e23f407f3ef00dd1dcf8fc4", "referenceCount": 220, "citationCount": 18886, "influentialCitationCount": 5013, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Motivation and Test Anxiety in Test Performance across Three Testing Contexts: The CAEL, CET, and GEPT.", "abstract": "This study examined test-takers' motivation, test anxiety, and test performance across a range of social and educational contexts in three high-stakes language tests: the Canadian Academic English Language (CAEL) Assessment in Canada, the College English Test (CET) in the People's Republic of China, and the General English Proficiency Test (GEPT) in Taiwan. The researchers issued a questionnaire exploring motivation, test anxiety, and perceptions of test importance and purpose to test-takers in each of the three contexts. A total of 1,281 valid questionnaire responses were obtained: 255 from CAEL, 493 from CET, and 533 from GEPT. Questionnaire responses were linked to each test-taker's respective test performance. The results illustrate complex interrelationships of test-takers' motivation and test anxiety in their test performance. Differences in motivation and test anxiety emerged with regard to social variables (i.e., test importance to stakeholders and test purposes). Further, motivation and test anxiety, along with personal variables (i.e., gender and age), were associated with test performance. Given that motivation and test anxiety have typically been examined separately and in relation to a single testing context, this study addresses an important research gap.", "year": 2014, "venue": "", "authors": [ "Liying Cheng", "D. Klinger", "Janna Fox", "Christine Doe", "Yan-ping Jin", "Jessica R. W. Wu" ], "externalIds": { "MAG": "2086922103", "DOI": "10.1002/TESQ.105", "CorpusId": 145543211 }, "url": "https://www.semanticscholar.org/paper/dbf0f9c871e705bef277491c7b0c69f3580b2a00", "referenceCount": 49, "citationCount": 75, "influentialCitationCount": 6, "isOpenAccess": false, "fieldsOfStudy": [ "Psychology" ] }, { "title": "Bayesian Item Response Modeling: Theory and Applications", "abstract": "to Bayesian Response Modeling.- Bayesian Hierarchical Response Modeling.- Basic Elements of Bayesian Statistics.- Estimation of Bayesian Item Response Models.- Assessment of Bayesian Item Response Models.- Multilevel Item Response Theory Models.- Random Item Effects Models.- Response Time Item Response Models.- Randomized Item Response Models.", "year": 2010, "venue": "", "authors": [ "J. Fox" ], "externalIds": { "MAG": "438677521", "CorpusId": 118482014 }, "url": "https://www.semanticscholar.org/paper/a66ad66d14ff6261d4dce9447c2a3cf75dd8896c", "referenceCount": 0, "citationCount": 415, "influentialCitationCount": 35, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Explanatory Item Response Models: A Generalized Linear and Nonlinear Approach", "abstract": "(2006). Explanatory Item Response Models: A Generalized Linear and Nonlinear Approach. Technometrics: Vol. 48, No. 4, pp. 568-569.", "year": 2006, "venue": "Technometrics", "authors": [ "Deniz Senturk-Doganaksoy" ], "externalIds": { "DBLP": "journals/technometrics/Senturk06", "MAG": "1982870715", "DOI": "10.1198/tech.2006.s428", "CorpusId": 38690690 }, "url": "https://www.semanticscholar.org/paper/ab29e74b7f64dc765f2814d0c32f7d574ad09771", "referenceCount": 0, "citationCount": 460, "influentialCitationCount": 41, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics", "Computer Science" ] }, { "title": "Applications and Extensions of MCMC in IRT: Multiple Item Types, Missing Data, and Rated Responses", "abstract": "Patz and Junker (1999) describe a general Markov chain Monte Carlo (MCMC) strategy, based on Metropolis-Hastings sampling, for Bayesian inference in complex item response theory (IRT) settings. They demonstrate the basic methodology using the two-parameter logistic (2PL) model. In this paper we extend their basic MCMC methodology to address issues such as non-response, designed missingness, multiple raters, guessing behavior and partial credit (polytomous) test items. We apply the basic MCMC methodology to two examples from the National Assessment of Educational Progress 1992 Trial State Assessment in Reading: (a) a multiple item format (2PL, 3PL, and generalized partial credit) subtest with missing response data; and (b) a sequence of rated, dichotomous short-response items, using a new IRT model called the generalized linear logistic test model (GLLTM).", "year": 1999, "venue": "", "authors": [ "Richard J. Patz", "B. Junker" ], "externalIds": { "MAG": "2158518800", "DOI": "10.3102/10769986024004342", "CorpusId": 122806403 }, "url": "https://www.semanticscholar.org/paper/c2ff2f35f19b7a91caef93a8ad46846f5aa023a7", "referenceCount": 58, "citationCount": 397, "influentialCitationCount": 33, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Protecting the Integrity of Computerized Testing Item Pools", "abstract": "What are the issues and techniques involved in protecting the integrity of item pools in computerized testing? How can item exposure be limited? How do security issues differ in computerized testing and paper-and-pencil testing?", "year": 1998, "venue": "", "authors": [ "Walter D. Way" ], "externalIds": { "MAG": "2024029582", "DOI": "10.1111/J.1745-3992.1998.TB00632.X", "CorpusId": 144734440 }, "url": "https://www.semanticscholar.org/paper/0382d69d619cb00abfe014128e658d994917ac0f", "referenceCount": 23, "citationCount": 123, "influentialCitationCount": 15, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Item Response Theory: Parameter Estimation Techniques", "abstract": null, "year": 1998, "venue": "", "authors": [ "M. Reckase" ], "externalIds": { "MAG": "2044780707", "DOI": "10.1177/01466216980221009", "CorpusId": 122188996 }, "url": "https://www.semanticscholar.org/paper/84430c5407358b926ca04b09b627f4eac6ace0cf", "referenceCount": 1, "citationCount": 614, "influentialCitationCount": 77, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Marginal maximum likelihood estimation of item parameters: Application of an EM algorithm", "abstract": null, "year": 1981, "venue": "", "authors": [ "R. D. Bock", "M. Aitkin" ], "externalIds": { "MAG": "2017966270", "DOI": "10.1007/BF02293801", "CorpusId": 122123206 }, "url": "https://www.semanticscholar.org/paper/d48e7ec3387d2c942b80e9dad0e57c930915c734", "referenceCount": 27, "citationCount": 2268, "influentialCitationCount": 133, "isOpenAccess": false, "fieldsOfStudy": [ "Mathematics" ] }, { "title": "Applications of Item Response Theory To Practical Testing Problems", "abstract": null, "year": 1980, "venue": "", "authors": [ "F. Lord" ], "externalIds": { "MAG": "1535520578", "DOI": "10.4324/9780203056615", "CorpusId": 142740299 }, "url": "https://www.semanticscholar.org/paper/d0476004085419b8a44953f5cdab11442c12ffaa", "referenceCount": 4, "citationCount": 5051, "influentialCitationCount": 658, "isOpenAccess": false, "fieldsOfStudy": [ "Psychology" ] }, { "title": "Statistical Theories of Mental Test Scores.", "abstract": "This is a reprint of the orginal book released in 1968. Our primary goal in this book is to sharpen the skill, sophistication, and in- tuition of the reader in the interpretation of mental test data, and in the construction and use of mental tests both as instruments of psychological theory and as tools in the practical problems of selection, evaluation, and guidance. We seek to do this by exposing the reader to some psychologically meaningful statistical theories of mental test scores. Although this book is organized in terms of test-score theories and models, the practical applications and limitations of each model studied receive substantial emphasis, and these discussions are presented in as nontechnical a manner as we have found possible. Since this book catalogues a host of test theory models and formulas, it may serve as a reference handbook. Also, for a limited group of specialists, this book aims to provide a more rigorous foundation for further theoretical research than has heretofore been available.One aim of this book is to present statements of the assumptions, together with derivations of the implications, of a selected group of statistical models that the authors believe to be useful as guides in the practices of test construction and utilization. With few exceptions we have given a complete proof for each major result presented in the book. In many cases these proofs are simpler, more complete, and more illuminating than those originally offered. When we have omitted proofs or parts of proofs, we have generally provided a reference containing the omitted argument. We have left some proofs as exercises for the reader, but only when the general method of proof has already been demonstrated. At times we have proved only special cases of more generally stated theorems, when the general proof affords no additional insight into the problem and yet is substantially more complex mathematically.", "year": 1971, "venue": "", "authors": [ "M. Tatsuoka", "F. Lord", "M. R. Novick", "A. Birnbaum" ], "externalIds": { "MAG": "2322861433", "DOI": "10.2307/2283550", "CorpusId": 124110050 }, "url": "https://www.semanticscholar.org/paper/52c6b6f04736152d3262510074f5e8c591e2e8f0", "referenceCount": 0, "citationCount": 7270, "influentialCitationCount": 425, "isOpenAccess": false, "fieldsOfStudy": [ "Psychology" ] }, { "title": "BERT-IRT: Accelerating Item Piloting with BERT Embeddings and Explainable IRT Models", "abstract": "Estimating item parameters (e.g., the difficulty of a question) is an important part of modern high-stakes tests. Conventional methods require lengthy pilots to collect response data from a representative population of test-takers. The need for these pilots limit item bank size and how often those item banks can be refreshed, impacting test security, while increasing costs needed to support the test and taking up the test-taker’s valuable time. Our paper presents a novel explanatory item response theory (IRT) model, BERT-IRT, that has been used on the Duolingo English Test (DET), a high-stakes test of English, to reduce the length of pilots by a factor of 10. Our evaluation shows how the model uses BERT embeddings and engineered NLP features to accelerate item piloting without sacrificing criterion validity or reliability.", "year": 2024, "venue": "Workshop on Innovative Use of NLP for Building Educational Applications", "authors": [ "K. Yancey", "Andrew Runge", "Geoffrey T. LaFlair", "Phoebe Mulcaire" ], "externalIds": { "DBLP": "conf/bea/YanceyRLM24", "ACL": "2024.bea-1.35", "CorpusId": 270765517 }, "url": "https://www.semanticscholar.org/paper/d95d576c6b16ecb1c78b51fcee97fd4c8ac98507", "referenceCount": 40, "citationCount": 1, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Multiple-Choice Questions Difficulty Prediction with Neural Networks", "abstract": null, "year": 2023, "venue": "International Conference on Methodologies and Intelligent Systems for Technology Enhanced Learning", "authors": [ "Diego Reyes", "Abelino Jimenez", "P. Dartnell", "Séverin Lions", "Sebastián Ríos" ], "externalIds": { "DBLP": "conf/mis4tel/ReyesJDLR23", "DOI": "10.1007/978-3-031-41226-4_2", "CorpusId": 261529772 }, "url": "https://www.semanticscholar.org/paper/98063d63abcd20e2de112619aa5e9380a5765085", "referenceCount": 0, "citationCount": 2, "influentialCitationCount": 0, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Predicting Difficulty and Discrimination of Natural Language Questions", "abstract": "Item Response Theory (IRT) has been extensively used to numerically characterize question difficulty and discrimination for human subjects in domains including cognitive psychology and education (Primi et al., 2014; Downing, 2003). More recently, IRT has been used to similarly characterize item difficulty and discrimination for natural language models across various datasets (Lalor et al., 2019; Vania et al., 2021; Rodriguez et al., 2021). In this work, we explore predictive models for directly estimating and explaining these traits for natural language questions in a question-answering context. We use HotpotQA for illustration. Our experiments show that it is possible to predict both difficulty and discrimination parameters for new questions, and these traits are correlated with features of questions, answers, and associated contexts. Our findings can have significant implications for the creation of new datasets and tests on the one hand and strategies such as active learning and curriculum learning on the other.", "year": 2022, "venue": "Annual Meeting of the Association for Computational Linguistics", "authors": [ "Matthew E. Byrd", "Shashank Srivastava" ], "externalIds": { "DBLP": "conf/acl/ByrdS22", "ACL": "2022.acl-short.15", "DOI": "10.18653/v1/2022.acl-short.15", "CorpusId": 248780313 }, "url": "https://www.semanticscholar.org/paper/8e6f67c8093eeb1ab4061df6aaea3fa9f5f5123f", "referenceCount": 34, "citationCount": 9, "influentialCitationCount": 0, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "On the application of Transformers for estimating the difficulty of Multiple-Choice Questions from text", "abstract": "Classical approaches to question calibration are either subjective or require newly created questions to be deployed before being calibrated. Recent works explored the possibility of estimating question difficulty from text, but did not experiment with the most recent NLP models, in particular Transformers. In this paper, we compare the performance of previous literature with Transformer models experimenting on a public and a private dataset. Our experimental results show that Transformers are capable of outperforming previously proposed models. Moreover, if an additional corpus of related documents is available, Transformers can leverage that information to further improve calibration accuracy. We characterize the dependence of the model performance on some properties of the questions, showing that it performs best on questions ending with a question mark and Multiple-Choice Questions (MCQs) with one correct choice.", "year": 2021, "venue": "Workshop on Innovative Use of NLP for Building Educational Applications", "authors": [ "Luca Benedetto", "Giovanni Aradelli", "P. Cremonesi", "Andrea Cappelli", "A. Giussani", "R. Turrin" ], "externalIds": { "DBLP": "conf/bea/BenedettoACCGT21", "ACL": "2021.bea-1.16", "CorpusId": 233365134 }, "url": "https://www.semanticscholar.org/paper/759fc0d8d1b58022c72cf9011eb820988aa1c804", "referenceCount": 23, "citationCount": 16, "influentialCitationCount": 1, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Jump-Starting Item Parameters for Adaptive Language Tests", "abstract": "A challenge in designing high-stakes language assessments is calibrating the test item difficulties, either a priori or from limited pilot test data. While prior work has addressed ‘cold start’ estimation of item difficulties without piloting, we devise a multi-task generalized linear model with BERT features to jump-start these estimates, rapidly improving their quality with as few as 500 test-takers and a small sample of item exposures (≈6 each) from a large item bank (≈4,000 items). Our joint model provides a principled way to compare test-taker proficiency, item difficulty, and language proficiency frameworks like the Common European Framework of Reference (CEFR). This also enables new item difficulty estimates without piloting them first, which in turn limits item exposure and thus enhances test item security. Finally, using operational data from the Duolingo English Test, a high-stakes English proficiency test, we find that the difficulty estimates derived using this method correlate strongly with lexico-grammatical features that correlate with reading complexity.", "year": 2021, "venue": "Conference on Empirical Methods in Natural Language Processing", "authors": [ "Arya D. McCarthy", "K. Yancey", "Geoffrey T. LaFlair", "Jesse Egbert", "Manqian Liao", "Burr Settles" ], "externalIds": { "DBLP": "conf/emnlp/McCarthyYLELS21", "ACL": "2021.emnlp-main.67", "DOI": "10.18653/v1/2021.emnlp-main.67", "CorpusId": 243865410 }, "url": "https://www.semanticscholar.org/paper/c030bf54bc974f7a1393d06cae0d6dffef53407b", "referenceCount": 63, "citationCount": 20, "influentialCitationCount": 1, "isOpenAccess": true, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "H2O AutoML: Scalable Automatic Machine Learning", "abstract": "H2O is an open source, distributed machine learning platform designed to scale to very large datasets, with APIs in R, Python, Java and Scala. We present H2O AutoML, a highly scalable, fully-automated, supervised learning algorithm which automates the process of training a large selection of candidate models and stacked ensembles within a single function. The result of the AutoML run is a “leaderboard”: a ranked list of models, all of which can be easily exported for use in a production environment. Models in the leader-board can be ranked by numerous model performance metrics or other model attributes such as training time or average per-row prediction speed. The H2O AutoML algorithm relies on the efficient training of H2O machine learning algorithms to produce a large number of models in a short amount of time. H2O AutoML uses a combination of fast random search and stacked ensembles to achieve results competitive with, and often better than, other frameworks which rely on more complex model tuning techniques such as Bayesian optimization or genetic algorithms. H2O AutoML trains a variety of algorithms (e.g. GBMs, Random Forests, Deep Neural Networks, GLMs), yielding a healthy amount of diversity across candidate models, which can be exploited by stacked ensembles to produce a powerful final model. The effectiveness of this technique is reflected in the OpenML AutoML Benchmark, which compares the performance of several of the most well known, open source AutoML systems across a number of datasets.", "year": 2020, "venue": "", "authors": [ "E. LeDell", "S. Poirier" ], "externalIds": { "CorpusId": 221338558 }, "url": "https://www.semanticscholar.org/paper/22cba8f244258e0bba7ff4bb70c4e5b5ac3e2382", "referenceCount": 16, "citationCount": 314, "influentialCitationCount": 47, "isOpenAccess": false, "fieldsOfStudy": null }, { "title": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding", "abstract": "We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models (Peters et al., 2018a; Radford et al., 2018), BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5 (7.7 point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).", "year": 2019, "venue": "North American Chapter of the Association for Computational Linguistics", "authors": [ "Jacob Devlin", "Ming-Wei Chang", "Kenton Lee", "Kristina Toutanova" ], "externalIds": { "MAG": "2951055169", "ACL": "N19-1423", "DBLP": "journals/corr/abs-1810-04805", "ArXiv": "1810.04805", "DOI": "10.18653/v1/N19-1423", "CorpusId": 52967399 }, "url": "https://www.semanticscholar.org/paper/df2b0e26d0599ce3e70df8a9da02e51594e0e992", "referenceCount": 63, "citationCount": 81678, "influentialCitationCount": 19056, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Elo-Rating and the Bradley-Terry Model: From Generalized Least Squares to Logistic Regression Models", "abstract": null, "year": 2017, "venue": "Statistical Modelling", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "The Corpus of Contemporary American English (COCA)", "abstract": ". This paper deals with the frequent use of modal auxiliary verbs in Corpus of Contemporary of American English (COCA). The modal auxiliary verbs mentioned as the data are can, could, will, would, may, might, shall, should, and must. Each data collected are taken from COCA in https://corpus.byu.edu/coca/. The research method is descriptive analysis; the present writers describe every data related to their function in each clause. The findings are related to the highest frequency and the number of elements follow the modal auxiliary verbs. From the 6,008,840 data, it is found the verb would (1,301,269 data) is the most frequently used, it is followed by the verb can (1,228,608 data), the verb will (1,118,515 data), the verb could (885,835 data), the verb may (494,144 data), the verb should (435,903 data), the verb might (300,258 data), the verb must (223,936 data), and the verb shall (20,372 data). This study has also revealed the number of elements following each of modal auxiliary verbs found in the COCA. It is reported that the verb would has seven different types,will has five different types, could and should has four different types, can, may, must, and shall have three different types, and might has two different types as the least number of elements following the verb. From the data analyzed, it is also reported that the modal auxiliary verb would is not only has the highest number in data (as the highest frequency) but also has various number of elements follow the verb.", "year": 2012, "venue": "", "authors": [ "Mark Davies" ], "externalIds": { "MAG": "1146155598", "CorpusId": 162133428 }, "url": "https://www.semanticscholar.org/paper/489b408faeb5a0b273a7769050e080228994c602", "referenceCount": 0, "citationCount": 1082, "influentialCitationCount": 144, "isOpenAccess": false, "fieldsOfStudy": [ "Psychology" ] }, { "title": "Common European Framework of Reference for Languages: learning, teaching, assessment", "abstract": "C2 Can understand with ease virtually everything heard or read. Can summarise information from different spoken and written sources, reconstructing arguments and accounts in a coherent presentation. Can express him/herself spontaneously, very fluently and precisely, differentiating finer shades of Proficient meaning even in more complex situations. User C1 Can understand a wide range of demanding, longer texts, and recognise implicit meaning. Can express him/herself fluently and spontaneously without much obvious searching for expressions. Can use language flexibly and effectively for social, academic and professional purposes. Can produce clear, well-structured, detailed text on complex subjects, showing controlled use of organisational patterns, connectors and cohesive devices. B2 Can understand the main ideas of complex text on both concrete and abstract topics, including technical discussions in his/her field of specialisation. Can interact with a degree of fluency and spontaneity that makes regular interaction with native speakers quite possible without strain for either party. Can produce clear, detailed text on a wide range of subjects and explain a viewpoint on a topical issue giving the advantages and Independent disadvantages of various options. User B1 Can understand the main points of clear standard input on familiar matters regularly encountered in work, school, leisure, etc. Can deal with most situations likely to arise whilst travelling in an area where the language is spoken. Can produce simple connected text on topics which are familiar or of personal interest. Can describe experiences and events, dreams, hopes and ambitions and briefly give reasons and explanations for opinions and plans. A2 Can understand sentences and frequently used expressions related to areas of most immediate relevance (e.g. very basic personal and family information, shopping, local geography, employment). Can communicate in simple and routine tasks requiring a simple and direct exchange of information on familiar and routine matters. Can describe in simple terms aspects of his/her background, immediate environment and matters in areas of immediate Basic need. User A1 Can understand and use familiar everyday expressions and very basic phrases aimed at the satisfaction of needs of a concrete type. Can introduce him/herself and others and can ask and answer questions about personal details such as where he/she lives, people he/she knows and things he/she has. Can interact in a simple way provided the other person talks slowly and clearly and is prepared to help.", "year": 2009, "venue": "", "authors": [ "Norman Verhelst", "P. V. Avermaet", "S. Takala", "Neus Figueras", "Brian North" ], "externalIds": { "MAG": "2304127486", "CorpusId": 170487691 }, "url": "https://www.semanticscholar.org/paper/2dbcb28829a1d141c68bd610a54b09bb499c4f45", "referenceCount": 15, "citationCount": 2260, "influentialCitationCount": 213, "isOpenAccess": false, "fieldsOfStudy": [ "Computer Science" ] }, { "title": "Pattern recognition and machine learning , volume 4", "abstract": null, "year": 2006, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Log Linear Trait Models: An Approach to Item Analysis and Test Construction", "abstract": null, "year": 1973, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2022. Auto-sklearn 2.0: Hands-free automl via meta-learning", "abstract": null, "year": null, "venue": "Journal of Machine Learning Research", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "set consists of all responses for operational items prior to the split date, and the test set consist of all responses past the split date for all items", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "2024. Amlb: an automl benchmark", "abstract": null, "year": null, "venue": "Journal of Machine Learning Research", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "Jump start train-test split: We select the same time and item splits as in the cold-start scenario", "abstract": null, "year": null, "venue": "The training set consists of all operational item responses prior to the split date as well as the first R responses for the pilot items after the split date (R=20, 40,", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "T1-4 Treatment conditions with ViC and Y/N Vocab", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null }, { "title": "T5-6 Same as T1-T4 except that ViC is recalibrated with more recent PT data. Naturally, launching Y/N Vocab in the single word for-Experiment", "abstract": null, "year": null, "venue": "", "authors": [], "externalIds": null, "url": null, "referenceCount": null, "citationCount": null, "influentialCitationCount": null, "isOpenAccess": null, "fieldsOfStudy": null } ] } }