Maintained by Difan Deng and Marius Lindauer.
The following list considers papers related to neural architecture search. It is by no means complete. If you miss a paper on the list, please let us know.
Please note that although NAS methods steadily improve, the quality of empirical evaluations in this field are still lagging behind compared to other areas in machine learning, AI and optimization. We would therefore like to share some best practices for empirical evaluations of NAS methods, which we believe will facilitate sustained and measurable progress in the field. If you are interested in a teaser, please read our blog post or directly jump to our checklist.
Transformers have gained increasing popularity in different domains. For a comprehensive list of papers focusing on Neural Architecture Search for Transformer-Based spaces, the awesome-transformer-search repo is all you need.
5555
Zhu, Huijuan; Xia, Mengzhen; Wang, Liangmin; Xu, Zhicheng; Sheng, Victor S.
A Novel Knowledge Search Structure for Android Malware Detection Journal Article
In: IEEE Transactions on Services Computing, no. 01, pp. 1-14, 5555, ISSN: 1939-1374.
@article{10750332,
title = { A Novel Knowledge Search Structure for Android Malware Detection },
author = {Huijuan Zhu and Mengzhen Xia and Liangmin Wang and Zhicheng Xu and Victor S. Sheng},
url = {https://doi.ieeecomputersociety.org/10.1109/TSC.2024.3496333},
doi = {10.1109/TSC.2024.3496333},
issn = {1939-1374},
year = {5555},
date = {5555-11-01},
urldate = {5555-11-01},
journal = {IEEE Transactions on Services Computing},
number = {01},
pages = {1-14},
publisher = {IEEE Computer Society},
address = {Los Alamitos, CA, USA},
abstract = {While the Android platform is gaining explosive popularity, the number of malicious software (malware) is also increasing sharply. Thus, numerous malware detection schemes based on deep learning have been proposed. However, they are usually suffering from the cumbersome models with complex architectures and tremendous parameters. They usually require heavy computation power support, which seriously limit their deployment on actual application environments with limited resources (e.g., mobile edge devices). To surmount this challenge, we propose a novel Knowledge Distillation (KD) structure—Knowledge Search (KS). KS exploits Neural Architecture Search (NAS) to adaptively bridge the capability gap between teacher and student networks in KD by introducing a parallelized student-wise search approach. In addition, we carefully analyze the characteristics of malware and locate three cost-effective types of features closely related to malicious attacks, namely, Application Programming Interfaces (APIs), permissions and vulnerable components, to characterize Android Applications (Apps). Therefore, based on typical samples collected in recent years, we refine features while exploiting the natural relationship between them, and construct corresponding datasets. Massive experiments are conducted to investigate the effectiveness and sustainability of KS on these datasets. Our experimental results show that the proposed method yields an accuracy of 97.89% to detect Android malware, which performs better than state-of-the-art solutions.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zhang, Feifei; Li, Mao; Ge, Jidong; Tang, Fenghui; Zhang, Sheng; Wu, Jie; Luo, Bin
Privacy-Preserving Federated Neural Architecture Search With Enhanced Robustness for Edge Computing Journal Article
In: IEEE Transactions on Mobile Computing, no. 01, pp. 1-18, 5555, ISSN: 1558-0660.
@article{10742476,
title = { Privacy-Preserving Federated Neural Architecture Search With Enhanced Robustness for Edge Computing },
author = {Feifei Zhang and Mao Li and Jidong Ge and Fenghui Tang and Sheng Zhang and Jie Wu and Bin Luo},
url = {https://doi.ieeecomputersociety.org/10.1109/TMC.2024.3490835},
doi = {10.1109/TMC.2024.3490835},
issn = {1558-0660},
year = {5555},
date = {5555-11-01},
urldate = {5555-11-01},
journal = {IEEE Transactions on Mobile Computing},
number = {01},
pages = {1-18},
publisher = {IEEE Computer Society},
address = {Los Alamitos, CA, USA},
abstract = {With the development of large-scale artificial intelligence services, edge devices are becoming essential providers of data and computing power. However, these edge devices are not immune to malicious attacks. Federated learning (FL), while protecting privacy of decentralized data through secure aggregation, struggles to trace adversaries and lacks optimization for heterogeneity. We discover that FL augmented with Differentiable Architecture Search (DARTS) can improve resilience against backdoor attacks while compatible with secure aggregation. Based on this, we propose a federated neural architecture search (NAS) framwork named SLNAS. The architecture of SLNAS is built on three pivotal components: a server-side search space generation method that employs an evolutionary algorithm with dual encodings, a federated NAS process based on DARTS, and client-side architecture tuning that utilizes Gumbel softmax combined with knowledge distillation. To validate robustness, we adapt a framework that includes backdoor attacks based on trigger optimization, data poisoning, and model poisoning, targeting both model weights and architecture parameters. Extensive experiments demonstrate that SLNAS not only effectively counters advanced backdoor attacks but also handles heterogeneity, outperforming defense baselines across a wide range of backdoor attack scenarios.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zhang, Yu-Ming; Hsieh, Jun-Wei; Lee, Chun-Chieh; Fan, Kuo-Chin
RATs-NAS: Redirection of Adjacent Trails on Graph Convolutional Networks for Predictor-based Neural Architecture Search Journal Article
In: IEEE Transactions on Artificial Intelligence, vol. 1, no. 01, pp. 1-11, 5555, ISSN: 2691-4581.
@article{10685480,
title = { RATs-NAS: Redirection of Adjacent Trails on Graph Convolutional Networks for Predictor-based Neural Architecture Search },
author = {Yu-Ming Zhang and Jun-Wei Hsieh and Chun-Chieh Lee and Kuo-Chin Fan},
url = {https://doi.ieeecomputersociety.org/10.1109/TAI.2024.3465433},
doi = {10.1109/TAI.2024.3465433},
issn = {2691-4581},
year = {5555},
date = {5555-09-01},
urldate = {5555-09-01},
journal = {IEEE Transactions on Artificial Intelligence},
volume = {1},
number = {01},
pages = {1-11},
publisher = {IEEE Computer Society},
address = {Los Alamitos, CA, USA},
abstract = {Manually designed CNN architectures like VGG, ResNet, DenseNet, and MobileNet have achieved high performance across various tasks, but design them is time-consuming and costly. Neural Architecture Search (NAS) automates the discovery of effective CNN architectures, reducing the need for experts. However, evaluating candidate architectures requires significant GPU resources, leading to the use of predictor-based NAS, such as graph convolutional networks (GCN), which is the popular option to construct predictors. However, we discover that, even though the ability of GCN mimics the propagation of features of real architectures, the binary nature of the adjacency matrix limits its effectiveness. To address this, we propose Redirection of Adjacent Trails (RATs), which adaptively learns trail weights within the adjacency matrix. Our RATs-GCN outperform other predictors by dynamically adjusting trail weights after each graph convolution layer. Additionally, the proposed Divide Search Sampling (DSS) strategy, based on the observation of cell-based NAS that architectures with similar FLOPs perform similarly, enhances search efficiency. Our RATs-NAS, which combine RATs-GCN and DSS, shows significant improvements over other predictor-based NAS methods on NASBench-101, NASBench-201, and NASBench-301.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chen, X.; Yang, C.
CIMNet: Joint Search for Neural Network and Computing-in-Memory Architecture Journal Article
In: IEEE Micro, no. 01, pp. 1-12, 5555, ISSN: 1937-4143.
@article{10551739,
title = {CIMNet: Joint Search for Neural Network and Computing-in-Memory Architecture},
author = {X. Chen and C. Yang},
url = {https://www.computer.org/csdl/magazine/mi/5555/01/10551739/1XyKBmSlmPm},
doi = {10.1109/MM.2024.3409068},
issn = {1937-4143},
year = {5555},
date = {5555-06-01},
urldate = {5555-06-01},
journal = {IEEE Micro},
number = {01},
pages = {1-12},
publisher = {IEEE Computer Society},
address = {Los Alamitos, CA, USA},
abstract = {Computing-in-memory (CIM) architecture has been proven to effectively transcend the memory wall bottleneck, expanding the potential of low-power and high-throughput applications such as machine learning. Neural architecture search (NAS) designs ML models to meet a variety of accuracy, latency, and energy constraints. However, integrating CIM into NAS presents a major challenge due to additional simulation overhead from the non-ideal characteristics of CIM hardware. This work introduces a quantization and device aware accuracy predictor that jointly scores quantization policy, CIM architecture, and neural network architecture, eliminating the need for time-consuming simulations in the search process. We also propose reducing the search space based on architectural observations, resulting in a well-pruned search space customized for CIM. These allow for efficient exploration of superior combinations in mere CPU minutes. Our methodology yields CIMNet, which consistently improves the trade-off between accuracy and hardware efficiency on benchmarks, providing valuable architectural insights.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Dong, Yukang; Pan, Fanxing; Gui, Yi; Jiang, Wenbin; Wan, Yao; Zheng, Ran; Jin, Hai
Comprehensive Architecture Search for Deep Graph Neural Networks Journal Article
In: IEEE Transactions on Big Data, no. 01, pp. 1-15, 5555, ISSN: 2332-7790.
@article{10930718,
title = { Comprehensive Architecture Search for Deep Graph Neural Networks },
author = {Yukang Dong and Fanxing Pan and Yi Gui and Wenbin Jiang and Yao Wan and Ran Zheng and Hai Jin},
url = {https://doi.ieeecomputersociety.org/10.1109/TBDATA.2025.3552336},
doi = {10.1109/TBDATA.2025.3552336},
issn = {2332-7790},
year = {5555},
date = {5555-03-01},
urldate = {5555-03-01},
journal = {IEEE Transactions on Big Data},
number = {01},
pages = {1-15},
publisher = {IEEE Computer Society},
address = {Los Alamitos, CA, USA},
abstract = {In recent years, Neural Architecture Search (NAS) has emerged as a promising approach for automatically discovering superior model architectures for deep Graph Neural Networks (GNNs). Different methods have paid attention to different types of search spaces. However, due to the time-consuming nature of training deep GNNs, existing NAS methods often fail to explore diverse search spaces sufficiently, which constrains their effectiveness. To crack this hard nut, we propose CAS-DGNN, a novel comprehensive architecture search method for deep GNNs. It encompasses four kinds of search spaces that are the composition of aggregate and update operators, different types of aggregate operators, residual connections, and hyper-parameters. To meet the needs of such a complex situation, a phased and hybrid search strategy is proposed to accommodate the diverse characteristics of different search spaces. Specifically, we divide the search process into four phases, utilizing evolutionary algorithms and Bayesian optimization. Meanwhile, we design two distinct search methods for residual connections (All-connected search and Initial Residual search) to streamline the search space, which enhances the scalability of CAS-DGNN. The experimental results show that CAS-DGNN achieves higher accuracy with competitive search costs across ten public datasets compared to existing methods.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Yan, J.; Liu, J.; Xu, H.; Wang, Z.; Qiao, C.
Peaches: Personalized Federated Learning with Neural Architecture Search in Edge Computing Journal Article
In: IEEE Transactions on Mobile Computing, no. 01, pp. 1-17, 5555, ISSN: 1558-0660.
@article{10460163,
title = {Peaches: Personalized Federated Learning with Neural Architecture Search in Edge Computing},
author = {J. Yan and J. Liu and H. Xu and Z. Wang and C. Qiao},
doi = {10.1109/TMC.2024.3373506},
issn = {1558-0660},
year = {5555},
date = {5555-03-01},
urldate = {5555-03-01},
journal = {IEEE Transactions on Mobile Computing},
number = {01},
pages = {1-17},
publisher = {IEEE Computer Society},
address = {Los Alamitos, CA, USA},
abstract = {In edge computing (EC), federated learning (FL) enables numerous distributed devices (or workers) to collaboratively train AI models without exposing their local data. Most works of FL adopt a predefined architecture on all participating workers for model training. However, since workers' local data distributions vary heavily in EC, the predefined architecture may not be the optimal choice for every worker. It is also unrealistic to manually design a high-performance architecture for each worker, which requires intense human expertise and effort. In order to tackle this challenge, neural architecture search (NAS) has been applied in FL to automate the architecture design process. Unfortunately, the existing federated NAS frameworks often suffer from the difficulties of system heterogeneity and resource limitation. To remedy this problem, we present a novel framework, termed Peaches, to achieve efficient searching and training in the resource-constrained EC system. Specifically, the local model of each worker is stacked by base cell and personal cell, where the base cell is shared by all workers to capture the common knowledge and the personal cell is customized for each worker to fit the local data. We determine the number of base cells, shared by all workers, according to the bandwidth budget on the parameters server. Besides, to relieve the data and system heterogeneity, we find the optimal number of personal cells for each worker based on its computing capability. In addition, we gradually prune the search space during training to mitigate the resource consumption. We evaluate the performance of Peaches through extensive experiments, and the results show that Peaches can achieve an average accuracy improvement of about 6.29% and up to 3.97× speed up compared with the baselines.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Sun, Genchen; Liu, Zhengkun; Gan, Lin; Su, Hang; Li, Ting; Zhao, Wenfeng; Sun, Biao
SpikeNAS-Bench: Benchmarking NAS Algorithms for Spiking Neural Network Architecture Journal Article
In: IEEE Transactions on Artificial Intelligence, vol. 1, no. 01, pp. 1-12, 5555, ISSN: 2691-4581.
@article{10855683,
title = { SpikeNAS-Bench: Benchmarking NAS Algorithms for Spiking Neural Network Architecture },
author = {Genchen Sun and Zhengkun Liu and Lin Gan and Hang Su and Ting Li and Wenfeng Zhao and Biao Sun},
url = {https://doi.ieeecomputersociety.org/10.1109/TAI.2025.3534136},
doi = {10.1109/TAI.2025.3534136},
issn = {2691-4581},
year = {5555},
date = {5555-01-01},
urldate = {5555-01-01},
journal = {IEEE Transactions on Artificial Intelligence},
volume = {1},
number = {01},
pages = {1-12},
publisher = {IEEE Computer Society},
address = {Los Alamitos, CA, USA},
abstract = {In recent years, Neural Architecture Search (NAS) has marked significant advancements, yet its efficacy is marred by the dependence on substantial computational resources. To mitigate this, the development of NAS benchmarks has emerged, offering datasets that enumerate all potential network architectures and their performances within a predefined search space. Nonetheless, these benchmarks predominantly focus on convolutional architectures, which are criticized for their limited interpretability and suboptimal hardware efficiency. Recognizing the untapped potential of Spiking Neural Networks (SNNs) — often hailed as the third generation of neural networks for their biological realism and computational thrift — this study introduces SpikeNAS-Bench. As a pioneering benchmark for SNN, SpikeNAS-Bench utilizes a cell-based search space, integrating leaky integrate-and-fire (LIF) neurons with variable thresholds as candidate operations. It encompasses 15,625 candidate architectures, rigorously evaluated on CIFAR10, CIFAR100 and Tiny-ImageNet datasets. This paper delves into the architectural nuances of SpikeNAS-Bench, leveraging various criteria to underscore the benchmark’s utility and presenting insights that could steer future NAS algorithm designs. Moreover, we assess the benchmark’s consistency through three distinct proxy types: zero-cost-based, early-stop-based, and predictor-based proxies. Additionally, the paper benchmarks seven contemporary NAS algorithms to attest to SpikeNAS-Bench’s broad applicability. We commit to providing training logs, diagnostic data for all candidate architectures, and the promise to release all code and datasets post-acceptance, aiming to catalyze further exploration and innovation within the SNN domain. SpikeNAS-Bench is open source at https://github.com/XXX (hidden for double anonymous review).},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Li, Changlin; Lin, Sihao; Tang, Tao; Wang, Guangrun; Li, Mingjie; Li, Zhihui; Chang, Xiaojun
BossNAS Family: Block-wisely Self-supervised Neural Architecture Search Journal Article
In: IEEE Transactions on Pattern Analysis & Machine Intelligence, no. 01, pp. 1-15, 5555, ISSN: 1939-3539.
@article{10839629,
title = { BossNAS Family: Block-wisely Self-supervised Neural Architecture Search },
author = {Changlin Li and Sihao Lin and Tao Tang and Guangrun Wang and Mingjie Li and Zhihui Li and Xiaojun Chang},
url = {https://doi.ieeecomputersociety.org/10.1109/TPAMI.2025.3529517},
doi = {10.1109/TPAMI.2025.3529517},
issn = {1939-3539},
year = {5555},
date = {5555-01-01},
urldate = {5555-01-01},
journal = {IEEE Transactions on Pattern Analysis & Machine Intelligence},
number = {01},
pages = {1-15},
publisher = {IEEE Computer Society},
address = {Los Alamitos, CA, USA},
abstract = {Recent advances in hand-crafted neural architectures for visual recognition underscore the pressing need to explore architecture designs comprising diverse building blocks. Concurrently, neural architecture search (NAS) methods have gained traction as a means to alleviate human efforts. Nevertheless, the question of whether NAS methods can efficiently and effectively manage diversified search spaces featuring disparate candidates, such as Convolutional Neural Networks (CNNs) and transformers, remains an open question. In this work, we introduce a novel unsupervised NAS approach called BossNAS (Block-wisely Self-supervised Neural Architecture Search), which aims to address the problem of inaccurate predictive architecture ranking caused by a large weight-sharing space while mitigating potential ranking issue caused by biased supervision. To achieve this, we factorize the search space into blocks and introduce a novel self-supervised training scheme called Ensemble Bootstrapping, to train each block separately in an unsupervised manner. In the search phase, we propose an unsupervised Population-Centric Search, optimizing the candidate architecture towards the population center. Additionally, we enhance our NAS method by integrating masked image modeling and present BossNAS++ to overcome the lack of dense supervision in our block-wise self-supervised NAS. In BossNAS++, we introduce the training technique named Masked Ensemble Bootstrapping for block-wise supernet, accompanied by a Masked Population-Centric Search scheme to promote fairer architecture selection. Our family of models, discovered through BossNAS and BossNAS++, delivers impressive results across various search spaces and datasets. Our transformer model discovered by BossNAS++ attains a remarkable accuracy of 83.2% on ImageNet with only 10.5B MAdds, surpassing DeiT-B by 1.4% while maintaining a lower computation cost. Moreover, our approach excels in architecture rating accuracy, achieving Spearman correlations of 0.78 and 0.76 on the canonical MBConv search space with ImageNet and the NATS-Bench size search space with CIFAR-100, respectively, outperforming state-of-the-art NAS methods.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2025
Wang, Weiqi; Bao, Feilong; Xing, Zhecong; Lian, Zhe
A Survey: Research Progress of Feature Fusion Technology Journal Article
In: 2025.
@article{wangsurvey,
title = {A Survey: Research Progress of Feature Fusion Technology},
author = {Weiqi Wang and Feilong Bao and Zhecong Xing and Zhe Lian},
url = {http://poster-openaccess.com/files/ICIC2024/862.pdf},
year = {2025},
date = {2025-12-01},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
(Ed.)
MACHINE-GENERATED NEURAL NETWORKS FOR SHORT-TERM LOAD FORECASTING Collection
2025.
@collection{nokey,
title = { MACHINE-GENERATED NEURAL NETWORKS FOR SHORT-TERM LOAD FORECASTING},
author = {Gergana Vacheva and Plamen Stanchev and Nikolay Hinov
},
url = {https://unitechsp.tugab.bg/images/2024/1-EE/s1_p143_v1.pdf},
year = {2025},
date = {2025-12-01},
urldate = {2025-12-01},
booktitle = {International Scientific Conference UNITECH`2024},
journal = {International Scientific Conference UNITECH`2024},
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
Tran, Thanh Hai; Nguyen, Dac Tam; Ngo, Minh Duc; Doan, Long; Luong, Ngoc Hoang; Binh, Huynh Thi Thanh
Kernelshap-nas: a shapley additive explanatory approach for characterizing operation influences Journal Article
In: Neural Computing and Applications , 2025.
@article{nokey,
title = {Kernelshap-nas: a shapley additive explanatory approach for characterizing operation influences},
author = {Thanh Hai Tran and Dac Tam Nguyen and Minh Duc Ngo and Long Doan and Ngoc Hoang Luong and Huynh Thi Thanh Binh
},
url = {https://link.springer.com/article/10.1007/s00521-025-11071-2},
year = {2025},
date = {2025-03-05},
urldate = {2025-03-05},
journal = {Neural Computing and Applications },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Xu, Dikai; Cao, Bin
Adaptive Multiobjective Evolutionary Generative Adversarial Network for Metaverse Network Intrusion Detection Journal Article
In: Science Partner Journals, 2025.
@article{nokey,
title = {Adaptive Multiobjective Evolutionary Generative Adversarial Network for Metaverse Network Intrusion Detection},
author = {Dikai Xu and Bin Cao},
url = {https://spj.science.org/doi/pdf/10.34133/research.0665},
year = {2025},
date = {2025-03-01},
urldate = {2025-03-01},
journal = {Science Partner Journals},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Li, Yuangang; Ma, Rui; Zhang, Qian; Wang, Zeyu; Zong, Linlin; Liu, Xinyue
Neural architecture search using attention enhanced precise path evaluation and efficient forward evolution Journal Article
In: scientific reports , 2025.
@article{nokey,
title = {Neural architecture search using attention enhanced precise path evaluation and efficient forward evolution},
author = {Yuangang Li and Rui Ma and Qian Zhang and Zeyu Wang and Linlin Zong and Xinyue Liu
},
url = {https://www.nature.com/articles/s41598-025-94187-8},
year = {2025},
date = {2025-03-01},
urldate = {2025-03-01},
booktitle = {scientific reports },
journal = {scientific reports },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
(Ed.)
HPE-DARTS: Hybrid Pruning and Proxy Evaluation in Differentiable Architecture Search Collection
2025.
@collection{lin-,
title = {HPE-DARTS: Hybrid Pruning and Proxy Evaluation in Differentiable Architecture Search},
author = {Hung-I Lin and Lin-Jing Kuo and Sheng-De Wang},
url = {https://www.scitepress.org/Papers/2025/131487/131487.pdf},
year = {2025},
date = {2025-03-01},
urldate = {2025-03-01},
booktitle = {Proceedings of the 17th International Conference on Agents and Artificial Intelligence (ICAART 2025)},
journal = {Proceedings of the 17th International Conference on Agents and Artificial Intelligence (ICAART 2025)},
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
(Ed.)
2025.
@collection{Friderikos-dte25a,
title = {OPTIMIZED LSTM NEURAL NETWORKS VIA NEURAL ARCHITECTURE SEARCH FOR PREDICTING DAMAGE EVOLUTION IN COMPOSITE LAMINATES},
author = {O. Friderikos and A. Mendoza and Emmanuel Baranger and D. Sagris and C. David},
url = {https://congressarchive.cimne.com/dte_aicomas_2025/abstracts/b8d1d10a96b711efba01000c29ddfc0c.pdf},
year = {2025},
date = {2025-03-01},
urldate = {2025-03-01},
booktitle = {Digital Twins in Engineering & Artificial Intelligence and Computational Methods in Applied Science, DTE - AICOMAS 2025},
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
Fang, Xuwei; Xie, Weisheng; Li, Hui; Zhou, Wenbin; Hang, Chen; Gao, Xiangxiang
DARTS-EAST: an edge-adaptive selection with topology first differentiable architecture selection method Journal Article
In: Applied Intelligence , 2025.
@article{fang-ai25a,
title = {DARTS-EAST: an edge-adaptive selection with topology first differentiable architecture selection method},
author = {Xuwei Fang and Weisheng Xie and Hui Li and Wenbin Zhou and Chen Hang and Xiangxiang Gao
},
url = {https://link.springer.com/article/10.1007/s10489-025-06353-0},
year = {2025},
date = {2025-03-01},
urldate = {2025-03-01},
journal = {Applied Intelligence },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
(Ed.)
Neural Architecture Search: Tradeoff Between Performance and Efficiency Collection
2025.
@collection{nokey,
title = {Neural Architecture Search: Tradeoff Between Performance and Efficiency},
author = {Tien Dung Nguyen and Nassim Mokhtari and Alexis Nédélec},
url = {https://www.scitepress.org/Papers/2025/132969/132969.pdf},
year = {2025},
date = {2025-03-01},
urldate = {2025-03-01},
booktitle = {Proceedings of the 17th International Conference on Agents and Artificial Intelligence (ICAART 2025)},
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
(Ed.)
PQNAS: Mixed-precision Quantization-aware Neural Architecture Search with Pseudo Quantizer Collection
2025.
@collection{gao-icassp25a,
title = {PQNAS: Mixed-precision Quantization-aware Neural Architecture Search with Pseudo Quantizer},
author = {Tianxiao Gao and Li Guo and Shihao Wang and Shiai Zhu and Dajiang Zhou},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=10888233},
year = {2025},
date = {2025-03-01},
urldate = {2025-03-01},
booktitle = {2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
He, Zhimin; Chen, Hongxiang; Zhou, Yan; Situ, Haozhen; Li, Yongyao; Li, Lvzhou
Self-supervised representation learning for Bayesian quantum architecture search Journal Article
In: Phys. Rev. A, vol. 111, iss. 3, pp. 032403, 2025.
@article{PhysRevA.111.032403,
title = {Self-supervised representation learning for Bayesian quantum architecture search},
author = {Zhimin He and Hongxiang Chen and Yan Zhou and Haozhen Situ and Yongyao Li and Lvzhou Li},
url = {https://link.aps.org/doi/10.1103/PhysRevA.111.032403},
doi = {10.1103/PhysRevA.111.032403},
year = {2025},
date = {2025-03-01},
urldate = {2025-03-01},
journal = {Phys. Rev. A},
volume = {111},
issue = {3},
pages = {032403},
publisher = {American Physical Society},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Feng, Shiyang; Li, Zhaowei; Zhang, Bo; Chen, Tao
DSF2-NAS: Dual-Stage Feature Fusion via Network Architecture Search for Classification of Multimodal Remote Sensing Images Journal Article
In: IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing. , 2025.
@article{feng-ieeejstoaeors25a,
title = {DSF2-NAS: Dual-Stage Feature Fusion via Network Architecture Search for Classification of Multimodal Remote Sensing Images},
author = {Shiyang Feng and Zhaowei Li and Bo Zhang and Tao Chen},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=10904332},
year = {2025},
date = {2025-03-01},
urldate = {2025-03-01},
journal = { IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing. },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
(Ed.)
TinyDevID: TinyML-Driven IoT Devices IDentification Using Network Flow Data Collection
2025.
@collection{Rushikesh-csp25a,
title = {TinyDevID: TinyML-Driven IoT Devices IDentification Using Network Flow Data},
author = {Priyanka Rushikesh Chaudhary and Anand Agrawal and Rajib Ranjan Maiti},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=10885715},
year = {2025},
date = {2025-02-01},
urldate = {2025-02-01},
booktitle = {COMSNETS 2025 - Cybersecurity & Privacy Workshop (CSP)},
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
Yu, Sixing
2025.
@phdthesis{yu-phd25a,
title = {Scalable and resource-efcient federated learning: Techniques for resource-constrained heterogeneous systems},
author = {Sixing Yu},
url = {https://www.proquest.com/docview/3165602177?pq-origsite=gscholar&fromopenview=true&sourcetype=Dissertations%20&%20Theses},
year = {2025},
date = {2025-02-01},
urldate = {2025-02-01},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
Fu, Jintao; Cong, Peng; Xu, Shuo; Chang, Jiahao; Liu, Ximing; Sun, Yuewen
Neural architecture search with Deep Radon Prior for sparse-view CT image reconstruction Journal Article
In: Med Phys , 2025.
@article{Fu-medphs25a,
title = { Neural architecture search with Deep Radon Prior for sparse-view CT image reconstruction },
author = {Jintao Fu and Peng Cong and Shuo Xu and Jiahao Chang and Ximing Liu and Yuewen Sun
},
url = {https://pubmed.ncbi.nlm.nih.gov/39930320/},
year = {2025},
date = {2025-02-01},
urldate = {2025-02-01},
journal = { Med Phys },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zhao, Yi-Heng; Pang, Shen-Wen; Huang, Heng-Zhi; Wu, Shao-Wen; Sun, Shao-Hua; Liu, Zhen-Bing; Pan, Zhi-Chao
Automatic clustering of single-molecule break junction data through task-oriented representation learning Journal Article
In: Rare Metals , 2025.
@article{zhao-rarem25a,
title = {Automatic clustering of single-molecule break junction data through task-oriented representation learning},
author = {
Yi-Heng Zhao and Shen-Wen Pang and Heng-Zhi Huang and Shao-Wen Wu and Shao-Hua Sun and Zhen-Bing Liu and Zhi-Chao Pan
},
url = {https://link.springer.com/article/10.1007/s12598-024-03089-7},
year = {2025},
date = {2025-02-01},
urldate = {2025-02-01},
journal = { Rare Metals },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Huang, Tao
Efficient Deep Neural Architecture Design and Training PhD Thesis
2025.
@phdthesis{nokey,
title = {Efficient Deep Neural Architecture Design and Training},
author = { Huang, Tao },
url = {https://ses.library.usyd.edu.au/handle/2123/33598},
year = {2025},
date = {2025-02-01},
urldate = {2025-02-01},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
Herterich, Nils; Liu, Kai; Stein, Anthony
Multi-objective neural architecture search for real-time weed detection on embedded system Miscellaneous
2025.
@misc{Herterich,
title = {Multi-objective neural architecture search for real-time weed detection on embedded system},
author = {Nils Herterich and Kai Liu and Anthony Stein},
url = {https://dl.gi.de/server/api/core/bitstreams/29a49f8d-304e-4073-8a92-4bef6483c087/content},
year = {2025},
date = {2025-02-01},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Tabak, Gabriel Couto; Molenaar, Dylan; Curi, Mariana
An evolutionary neural architecture search for item response theory autoencoders Journal Article
In: Behaviormetrika , 2025.
@article{nokey,
title = {An evolutionary neural architecture search for item response theory autoencoders},
author = {Gabriel Couto Tabak and Dylan Molenaar and Mariana Curi
},
url = {https://link.springer.com/article/10.1007/s41237-024-00250-5},
year = {2025},
date = {2025-01-27},
urldate = {2025-01-27},
journal = {Behaviormetrika },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hao, Debei; Pei, Songwei
MIG-DARTS: towards effective differentiable architecture search by gradually mitigating the initial-channel gap between search and evaluation Journal Article
In: Neural Computing and Applications, 2025.
@article{nokey,
title = {MIG-DARTS: towards effective differentiable architecture search by gradually mitigating the initial-channel gap between search and evaluation},
author = {
Debei Hao and Songwei Pei
},
url = {https://link.springer.com/article/10.1007/s00521-024-10681-6},
year = {2025},
date = {2025-01-09},
urldate = {2025-01-09},
journal = {Neural Computing and Applications},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
(Ed.)
2025.
@collection{nokey,
title = {H4H: Hybrid Convolution-Transformer Architecture Search for NPU-CIM Heterogeneous Systems for AR/VR Applications},
author = {Yiwei Zhao and Jinhui Chen and Sai Qian Zhang and Syed Shakib Sarwar and Kleber Hugo Stangherlin and Jorge Tomas Gomez and Jae-Sun Seo and Barbara De Salvo and Chiao Liu and Phillip B. Gibbons and Ziyun Li},
url = {https://www.pdl.cmu.edu/PDL-FTP/associated/ASP-DAC2025-1073-12.pdf},
year = {2025},
date = {2025-01-02},
urldate = {2025-01-02},
booktitle = {ASPDAC ’25},
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
Li, Kefan; Wan, Yuting; Ma, Ailong; Zhong, Yanfei
A Lightweight Multi-Scale and Multi-Attention Hyperspectral Image Classification Network Based on Multi-Stage Search Journal Article
In: IEEE Transactions on Geoscience and Remote Sensing, pp. 1-1, 2025.
@article{10935661,
title = {A Lightweight Multi-Scale and Multi-Attention Hyperspectral Image Classification Network Based on Multi-Stage Search},
author = {Kefan Li and Yuting Wan and Ailong Ma and Yanfei Zhong},
url = {https://ieeexplore.ieee.org/abstract/document/10935661},
doi = {10.1109/TGRS.2025.3553147},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {IEEE Transactions on Geoscience and Remote Sensing},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Xue, Yu; Hu, Bohan; Neri, Ferrante
A Surrogate Model With Multiple Comparisons and Semi-Online Learning for Evolutionary Neural Architecture Search Journal Article
In: IEEE Transactions on Emerging Topics in Computational Intelligence, pp. 1-13, 2025.
@article{10935345,
title = {A Surrogate Model With Multiple Comparisons and Semi-Online Learning for Evolutionary Neural Architecture Search},
author = {Yu Xue and Bohan Hu and Ferrante Neri},
url = {https://ieeexplore.ieee.org/abstract/document/10935345},
doi = {10.1109/TETCI.2025.3547621},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {IEEE Transactions on Emerging Topics in Computational Intelligence},
pages = {1-13},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Wang, Mansheng; Gu, Yu; Yang, Lidong; Zhang, Baohua; Wang, Jing; Lu, Xiaoqi; Li, Jianjun; Liu, Xin; Zhao, Ying; Yu, Dahua; Tang, Siyuan; He, Qun
A novel high-precision bilevel optimization method for 3D pulmonary nodule classification Journal Article
In: Physica Medica, vol. 133, pp. 104954, 2025, ISSN: 1120-1797.
@article{WANG2025104954,
title = {A novel high-precision bilevel optimization method for 3D pulmonary nodule classification},
author = {Mansheng Wang and Yu Gu and Lidong Yang and Baohua Zhang and Jing Wang and Xiaoqi Lu and Jianjun Li and Xin Liu and Ying Zhao and Dahua Yu and Siyuan Tang and Qun He},
url = {https://www.sciencedirect.com/science/article/pii/S112017972500064X},
doi = {https://doi.org/10.1016/j.ejmp.2025.104954},
issn = {1120-1797},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Physica Medica},
volume = {133},
pages = {104954},
abstract = {Background and objective
Classification of pulmonary nodules is important for the early diagnosis of lung cancer; however, the manual design of classification models requires substantial expert effort. To automate the model design process, we propose a neural architecture search with high-precision bilevel optimization (NAS-HBO) that directly searches for the optimal network on three-dimensional (3D) images.
Methods
We propose a novel high-precision bilevel optimization method (HBOM) to search for an optimal 3D pulmonary nodule classification model. We employed memory optimization techniques with a partially decoupled operation-weighting method to reduce the memory overhead while maintaining path selection stability. Additionally, we introduce a novel maintaining receptive field criterion (MRFC) within the NAS-HBO framework. MRFC narrows the search space by selecting and expanding the 3D Mobile Inverted Residual Bottleneck Block (3D-MBconv) operation based on previous receptive fields, thereby enhancing the scalability and practical application capabilities of NAS-HBO in terms of model complexity and performance.
Results
In this study, 888 CT images, including 554 benign and 450 malignant nodules, were obtained from the LIDC-IDRI dataset. The results showed that NAS-HBO achieved an impressive accuracy of 91.51 % after less than 6 h of searching, utilizing a mere 12.79 M parameters.
Conclusion
The proposed NAS-HBO method effectively automates the design of 3D pulmonary nodule classification models, achieving impressive accuracy with efficient parameters. By incorporating the HBOM and MRFC techniques, we demonstrated enhanced accuracy and scalability in model optimization for early lung cancer diagnosis. The related codes and results have been released at https://github.com/GuYuIMUST/NAS-HBO.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Classification of pulmonary nodules is important for the early diagnosis of lung cancer; however, the manual design of classification models requires substantial expert effort. To automate the model design process, we propose a neural architecture search with high-precision bilevel optimization (NAS-HBO) that directly searches for the optimal network on three-dimensional (3D) images.
Methods
We propose a novel high-precision bilevel optimization method (HBOM) to search for an optimal 3D pulmonary nodule classification model. We employed memory optimization techniques with a partially decoupled operation-weighting method to reduce the memory overhead while maintaining path selection stability. Additionally, we introduce a novel maintaining receptive field criterion (MRFC) within the NAS-HBO framework. MRFC narrows the search space by selecting and expanding the 3D Mobile Inverted Residual Bottleneck Block (3D-MBconv) operation based on previous receptive fields, thereby enhancing the scalability and practical application capabilities of NAS-HBO in terms of model complexity and performance.
Results
In this study, 888 CT images, including 554 benign and 450 malignant nodules, were obtained from the LIDC-IDRI dataset. The results showed that NAS-HBO achieved an impressive accuracy of 91.51 % after less than 6 h of searching, utilizing a mere 12.79 M parameters.
Conclusion
The proposed NAS-HBO method effectively automates the design of 3D pulmonary nodule classification models, achieving impressive accuracy with efficient parameters. By incorporating the HBOM and MRFC techniques, we demonstrated enhanced accuracy and scalability in model optimization for early lung cancer diagnosis. The related codes and results have been released at https://github.com/GuYuIMUST/NAS-HBO.
Winter, Benjamin David; Teahan, William John
Evaluating a Novel Neuroevolution and Neural Architecture Search System Technical Report
2025.
@techreport{winter2025evaluatingnovelneuroevolutionneural,
title = {Evaluating a Novel Neuroevolution and Neural Architecture Search System},
author = {Benjamin David Winter and William John Teahan},
url = {https://arxiv.org/abs/2503.10869},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Winter, Benjamin David; Teahan, William J.
Ecological Neural Architecture Search Technical Report
2025.
@techreport{winter2025ecologicalneuralarchitecturesearch,
title = {Ecological Neural Architecture Search},
author = {Benjamin David Winter and William J. Teahan},
url = {https://arxiv.org/abs/2503.10908},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Jeon, Jeimin; Oh, Youngmin; Lee, Junghyup; Baek, Donghyeon; Kim, Dohyung; Eom, Chanho; Ham, Bumsub
Subnet-Aware Dynamic Supernet Training for Neural Architecture Search Technical Report
2025.
@techreport{jeon2025subnetawaredynamicsupernettraining,
title = {Subnet-Aware Dynamic Supernet Training for Neural Architecture Search},
author = {Jeimin Jeon and Youngmin Oh and Junghyup Lee and Donghyeon Baek and Dohyung Kim and Chanho Eom and Bumsub Ham},
url = {https://arxiv.org/abs/2503.10740},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Onzo, Bernard-marie; Xue, Yu; Neri, Ferrante
Surrogate-assisted evolutionary neural architecture search based on smart-block discovery Journal Article
In: Expert Systems with Applications, vol. 277, pp. 127237, 2025, ISSN: 0957-4174.
@article{ONZO2025127237,
title = {Surrogate-assisted evolutionary neural architecture search based on smart-block discovery},
author = {Bernard-marie Onzo and Yu Xue and Ferrante Neri},
url = {https://www.sciencedirect.com/science/article/pii/S0957417425008590},
doi = {https://doi.org/10.1016/j.eswa.2025.127237},
issn = {0957-4174},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Expert Systems with Applications},
volume = {277},
pages = {127237},
abstract = {Neural architecture search (NAS) has emerged as a powerful method for automating neural network design, yet its high computational cost remains a significant challenge. This paper introduces hybrid training-less neural architecture search (HYTES-NAS), a novel hybrid NAS framework that integrates evolutionary computation with a training-free evaluation strategy, significantly reducing computational demands while maintaining high search efficiency. Unlike conventional NAS methods that rely on full model training, HYTES-NAS leverages a surrogate-assisted scoring mechanism to assess candidate architectures efficiently. Additionally, a smart-block discovery strategy and particle swarm optimisation are employed to refine the search space and accelerate convergence. Experimental results on multiple NAS benchmarks demonstrate that HYTES-NAS achieves superior performance with significantly lower computational cost compared to state-of-the-art NAS methods. This work provides a promising and scalable solution for efficient NAS, making high-performance architecture search more accessible for real-world applications.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Xie, Xiaofeng; Gao, Yuelin; Zhang, Yuming
An improved Artificial Protozoa Optimizer for CNN architecture optimization Journal Article
In: Neural Networks, pp. 107368, 2025, ISSN: 0893-6080.
@article{XIE2025107368,
title = {An improved Artificial Protozoa Optimizer for CNN architecture optimization},
author = {Xiaofeng Xie and Yuelin Gao and Yuming Zhang},
url = {https://www.sciencedirect.com/science/article/pii/S0893608025002473},
doi = {https://doi.org/10.1016/j.neunet.2025.107368},
issn = {0893-6080},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Neural Networks},
pages = {107368},
abstract = {In this paper, we propose a novel neural architecture search (NAS) method called MAPOCNN, which leverages an enhanced version of the Artificial Protozoa Optimizer (APO) to optimize the architecture of Convolutional Neural Networks (CNNs). The APO is known for its rapid convergence, high stability, and minimal parameter involvement. To further improve its performance, we introduce MAPO (Modified Artificial Protozoa Optimizer), which incorporates the phototaxis behavior of protozoa. This addition helps mitigate the risk of premature convergence, allowing the algorithm to explore a broader range of possible CNN architectures and ultimately identify more optimal solutions. Through rigorous experimentation on benchmark datasets, including Rectangle and Mnist-random, we demonstrate that MAPOCNN not only achieves faster convergence times but also performs competitively when compared to other state-of-the-art NAS algorithms. The results highlight the effectiveness of MAPOCNN in efficiently discovering CNN architectures that outperform existing methods in terms of both speed and accuracy. This work presents a promising direction for optimizing deep learning architectures using biologically inspired optimization techniques.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zhao, Tianchen; Wang, Xianpeng; Song, Xiangman
Multiobjective Backbone Network Architecture Search Based on Transfer Learning in Steel Defect Detection Journal Article
In: Neurocomputing, pp. 130012, 2025, ISSN: 0925-2312.
@article{ZHAO2025130012,
title = {Multiobjective Backbone Network Architecture Search Based on Transfer Learning in Steel Defect Detection},
author = {Tianchen Zhao and Xianpeng Wang and Xiangman Song},
url = {https://www.sciencedirect.com/science/article/pii/S0925231225006848},
doi = {https://doi.org/10.1016/j.neucom.2025.130012},
issn = {0925-2312},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Neurocomputing},
pages = {130012},
abstract = {In recent years, steel defect detection methods based on deep learning have been widely used. However, due to the shape specificity of steel defects and data scarcity, using existing convolutional neural network architectures for training requires significant expertise and time to fine-tune the hyperparameters. Transfer learning effectively tackles the challenges of data scarcity or limited computing resources by transferring domain knowledge from source tasks to related target tasks, reducing the resource consumption of model training from scratch. In this paper, we propose a transfer learning-based multiobjective backbone network architecture search method (TMBNAS). First, TMBNAS formulates defect detection network search as a multiobjective problem while optimizing its detection accuracy and model complexity. Second, an effective variable-length encoding strategy is designed to represent different building blocks and unpredictable optimal depths in convolutional neural networks, and targeted improvements are made to the crossover and mutation operators. For the specificity of the steel defect detection task, a transfer learning strategy based on similar knowledge is used to transfer the architecture and weight parameters obtained from the search in the source task to the target task, and adjust and optimize them. Finally, a dynamic adjustment mechanism based on actual constraints is designed during the search process to gradually approximate the optimal non-dominated solution set with higher detection accuracy without losing its population diversity. The proposed method is tested on the continuous casting slab and workpiece defect datasets. The experimental results show that the model searched by the proposed method can achieve better detection performance compared with manually designed deep learning algorithms and classical network architecture search methods.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gambella, Matteo; Pittorino, Fabrizio; Roveri, Manuel
Architecture-Aware Minimization (A$^2$M): How to Find Flat Minima in Neural Architecture Search Technical Report
2025.
@techreport{gambella2025architectureawareminimizationa2mflat,
title = {Architecture-Aware Minimization (A$^2$M): How to Find Flat Minima in Neural Architecture Search},
author = {Matteo Gambella and Fabrizio Pittorino and Manuel Roveri},
url = {https://arxiv.org/abs/2503.10404},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Tan, Wanrong; Huang, Lingling; Li, Hong; Tan, Menghao; Xie, Jin; Gao, Weifeng
Neural architecture search with integrated template-modules for efficient defect detection Journal Article
In: Expert Systems with Applications, pp. 127211, 2025, ISSN: 0957-4174.
@article{TAN2025127211,
title = {Neural architecture search with integrated template-modules for efficient defect detection},
author = {Wanrong Tan and Lingling Huang and Hong Li and Menghao Tan and Jin Xie and Weifeng Gao},
url = {https://www.sciencedirect.com/science/article/pii/S0957417425008334},
doi = {https://doi.org/10.1016/j.eswa.2025.127211},
issn = {0957-4174},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Expert Systems with Applications},
pages = {127211},
abstract = {Surface defect detection in industrial production is critical for quality control. Traditional manual design of detection models is time-consuming, inefficient, and lacks adaptability to diverse defect scenarios. To address these limitations, we propose TMNAS (Template-Module Neural Architecture Search), a bi-level optimization framework that automates the design of high-performance defect detection models. TMNAS uniquely integrates predefined template-modules into a flexible search space, enabling simultaneous exploration of architectural components and parameters. By incorporating a single-objective genetic algorithm with a computational complexity penalty term, our approach effectively avoids local optima and significantly reduces search resource consumption. Extensive experiments on industrial defect datasets demonstrate that TMNAS surpasses state-of-the-art models, while on the COCO benchmark, it achieves a competitive mean average precision (mAP) of 58.4%, all with lower computational overhead.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Sun, Jingyu; Zhang, Hanting; Wang, Jianfeng
Enhancing Time Series Prediction with Evolutionary Algorithm-based Optimization of LSTM Proceedings Article
In: ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1-5, 2025.
@inproceedings{10889678,
title = {Enhancing Time Series Prediction with Evolutionary Algorithm-based Optimization of LSTM},
author = {Jingyu Sun and Hanting Zhang and Jianfeng Wang},
url = {https://ieeexplore.ieee.org/abstract/document/10889678},
doi = {10.1109/ICASSP49660.2025.10889678},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1-5},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
R., José Ribamar Durand; Junior, Geraldo Braz; Silva, Italo Francyles Santos; Oliveira, Rui Miguel Gil Costa
HistAttentionNAS: A CNN built via NAS for Penile Cancer Diagnosis using Histopathological Images Journal Article
In: Procedia Computer Science, vol. 256, pp. 764-771, 2025, ISSN: 1877-0509, (CENTERIS - International Conference on ENTERprise Information Systems / ProjMAN - International Conference on Project MANagement / HCist - International Conference on Health and Social Care Information Systems and Technologies).
@article{DURANDR2025764,
title = {HistAttentionNAS: A CNN built via NAS for Penile Cancer Diagnosis using Histopathological Images},
author = {José Ribamar Durand R. and Geraldo Braz Junior and Italo Francyles Santos Silva and Rui Miguel Gil Costa Oliveira},
url = {https://www.sciencedirect.com/science/article/pii/S1877050925005344},
doi = {https://doi.org/10.1016/j.procs.2025.02.177},
issn = {1877-0509},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Procedia Computer Science},
volume = {256},
pages = {764-771},
abstract = {Penile cancer, although rare, has an increasing mortality rate in Brazil, highlighting the need for effective diagnostic methods. Artificial Intelligence (AI) in histopathological analysis can speed up and objectify diagnosis, but designing an ideal architecture is challenging. In this study, we propose a neural architecture search (NAS) methodology for detecting penile cancer in digital histopathology images. We explored different configurations of stem blocks and the inclusion of attention mechanisms, highlighting specific preferences depending on the magnification of the images. The results showed that the NAS methodology enabled the discovery of more accurate and optimized architectures for this task, surpassing conventional models. The proposed models achieve 89.5% and 88.5% F1-Score for 40X and 100X magnification, respectively.},
note = {CENTERIS - International Conference on ENTERprise Information Systems / ProjMAN - International Conference on Project MANagement / HCist - International Conference on Health and Social Care Information Systems and Technologies},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zhang, Yunlong; Chen, Nan; Wang, Yonghe; Su, Xiangdong; Bao, Feilong
Multilingual Parameter-Sharing Adapters: A Method for Optimizing Low-Resource Neural Machine Translation Proceedings Article
In: ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1-5, 2025.
@inproceedings{10889761,
title = {Multilingual Parameter-Sharing Adapters: A Method for Optimizing Low-Resource Neural Machine Translation},
author = {Yunlong Zhang and Nan Chen and Yonghe Wang and Xiangdong Su and Feilong Bao},
url = {https://ieeexplore.ieee.org/abstract/document/10889761},
doi = {10.1109/ICASSP49660.2025.10889761},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1-5},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Xie, Lunchen; Lomurno, Eugenio; Gambella, Matteo; Ardagna, Danilo; Roveri, Manual; Matteucci, Matteo; Shi, Qingjiang
ZO-DARTS++: An Efficient and Size-Variable Zeroth-Order Neural Architecture Search Algorithm Technical Report
2025.
@techreport{xie2025zodartsefficientsizevariablezerothorder,
title = {ZO-DARTS++: An Efficient and Size-Variable Zeroth-Order Neural Architecture Search Algorithm},
author = {Lunchen Xie and Eugenio Lomurno and Matteo Gambella and Danilo Ardagna and Manual Roveri and Matteo Matteucci and Qingjiang Shi},
url = {https://arxiv.org/abs/2503.06092},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Zhang, Heng; Chen, Ziqian; Xia, Wei; Xiong, Gang; Gou, Gaopeng; Li, Zhen; Huang, Guangyan; Li, Yunpeng
ANASETC: Automatic Neural Architecture Search for Encrypted Traffic Classification Proceedings Article
In: ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1-5, 2025.
@inproceedings{10890501,
title = {ANASETC: Automatic Neural Architecture Search for Encrypted Traffic Classification},
author = {Heng Zhang and Ziqian Chen and Wei Xia and Gang Xiong and Gaopeng Gou and Zhen Li and Guangyan Huang and Yunpeng Li},
url = {https://ieeexplore.ieee.org/document/10890501},
doi = {10.1109/ICASSP49660.2025.10890501},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1-5},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zein, Abbas Kassem; Diab, Rand Abou; Yaacoub, Mohamad; Ibrahim, Ali
Neural Architecture Search for Optimized TinyML Applications Proceedings Article
In: Roch, Massimo Ruo; Bellotti, Francesco; Berta, Riccardo; Martina, Maurizio; Ros, Paolo Motto (Ed.): Applications in Electronics Pervading Industry, Environment and Society, pp. 481–488, Springer Nature Switzerland, Cham, 2025, ISBN: 978-3-031-84100-2.
@inproceedings{10.1007/978-3-031-84100-2_57,
title = {Neural Architecture Search for Optimized TinyML Applications},
author = {Abbas Kassem Zein and Rand Abou Diab and Mohamad Yaacoub and Ali Ibrahim},
editor = {Massimo Ruo Roch and Francesco Bellotti and Riccardo Berta and Maurizio Martina and Paolo Motto Ros},
url = {https://link.springer.com/chapter/10.1007/978-3-031-84100-2_57},
isbn = {978-3-031-84100-2},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {Applications in Electronics Pervading Industry, Environment and Society},
pages = {481–488},
publisher = {Springer Nature Switzerland},
address = {Cham},
abstract = {Integrating machine learning algorithms on circuits with low power consumption and low hardware complexity is challenging at the different levels of the design process. In network design at the software level, it is crucial to balance a high classification accuracy, while minimizing model complexity to reduce hardware demands. This paper proposes a search approach integrated with the Neural Architecture Search (NAS) to enhance the performance and reduce the complexity of deep learning models. Accuracy and number of Floating-Point Operations Per Second (FLOPS) are employed as evaluation metrics for the targeted models. The experimental results demonstrate that the proposed method outperforms similar state-of-the-art architectures while exhibiting comparable accuracy with up to a 70% reduction in complexity.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Capello, Alessio; Berta, Riccardo; Fresta, Matteo; Lazzaroni, Luca; Bellotti, Francesco
Leveraging Neural Architecture Search for Structural Health Monitoring on Resource-Constrained Devices Proceedings Article
In: Roch, Massimo Ruo; Bellotti, Francesco; Berta, Riccardo; Martina, Maurizio; Ros, Paolo Motto (Ed.): Applications in Electronics Pervading Industry, Environment and Society, pp. 323–330, Springer Nature Switzerland, Cham, 2025, ISBN: 978-3-031-84100-2.
@inproceedings{10.1007/978-3-031-84100-2_38,
title = {Leveraging Neural Architecture Search for Structural Health Monitoring on Resource-Constrained Devices},
author = {Alessio Capello and Riccardo Berta and Matteo Fresta and Luca Lazzaroni and Francesco Bellotti},
editor = {Massimo Ruo Roch and Francesco Bellotti and Riccardo Berta and Maurizio Martina and Paolo Motto Ros},
url = {https://link.springer.com/chapter/10.1007/978-3-031-84100-2_38},
isbn = {978-3-031-84100-2},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {Applications in Electronics Pervading Industry, Environment and Society},
pages = {323–330},
publisher = {Springer Nature Switzerland},
address = {Cham},
abstract = {In recent decades signal processing incorporated the capabilities offered by Deep Learning (DL) models, especially for complex tasks. DL models demand significant memory, power, and computational resources, posing challenges for Microcontroller Units (MCUs) with limited capacities. The possibility to run models directly on the edge device is key in connectivity-limited scenarios such as Structural Health Monitoring (SHM). For those scenarios, it is necessary to use Tiny Machine Learning techniques to reduces computational requirements. This study focuses on the impact of the extended version of the state-of-the-art Neural Architecture Search (NAS) tool, μNAS, for SHM applications, targeting four commonly used MCUs. Our assessment is based on the Z24 Bridge benchmark dataset, a common dataset for SHM we employed to train and evaluate models. We then discuss if the models found fit the constraints of the MCUs and the possible tradeoffs between error rate and model computational requirements. We also offer a comparison with the Raspberry Pi 4 Model B to highlight μNAS's capability in achieving high accuracy with higher computing capabilities. The obtained results are promising, as the found models satisfy the given constraints both in term of accuracy and memory footprint.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Miriyala, Srinivas; Vajrala, Sowmya; Kumar, Hitesh; Kodavanti, Sravanth; Rajendiran, Vikram
Mobile-friendly Image de-noising: Hardware Conscious Optimization for Edge Application Proceedings Article
In: ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1-5, 2025.
@inproceedings{10888855,
title = {Mobile-friendly Image de-noising: Hardware Conscious Optimization for Edge Application},
author = {Srinivas Miriyala and Sowmya Vajrala and Hitesh Kumar and Sravanth Kodavanti and Vikram Rajendiran},
url = {https://ieeexplore.ieee.org/document/10888855},
doi = {10.1109/ICASSP49660.2025.10888855},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1-5},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Li, Xin; Fu, Keren; Zhao, Qijun
Camouflaged Object Detection via Neural Architecture Search Proceedings Article
In: ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1-5, 2025.
@inproceedings{10887976,
title = {Camouflaged Object Detection via Neural Architecture Search},
author = {Xin Li and Keren Fu and Qijun Zhao},
url = {https://ieeexplore.ieee.org/abstract/document/10887976},
doi = {10.1109/ICASSP49660.2025.10887976},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1-5},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zein, Hadi Al; Waterlaat, Nick; Alkanat, Tunc
Neural Architecture Search for Ultra-low Memory Blood Glucose Forecasting on the Edge Proceedings Article
In: ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1-5, 2025.
@inproceedings{10890864,
title = {Neural Architecture Search for Ultra-low Memory Blood Glucose Forecasting on the Edge},
author = {Hadi Al Zein and Nick Waterlaat and Tunc Alkanat},
url = {https://ieeexplore.ieee.org/document/10890864},
doi = {10.1109/ICASSP49660.2025.10890864},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1-5},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}