Maintained by Difan Deng and Marius Lindauer.
The following list considers papers related to neural architecture search. It is by no means complete. If you miss a paper on the list, please let us know.
Please note that although NAS methods steadily improve, the quality of empirical evaluations in this field are still lagging behind compared to other areas in machine learning, AI and optimization. We would therefore like to share some best practices for empirical evaluations of NAS methods, which we believe will facilitate sustained and measurable progress in the field. If you are interested in a teaser, please read our blog post or directly jump to our checklist.
Transformers have gained increasing popularity in different domains. For a comprehensive list of papers focusing on Neural Architecture Search for Transformer-Based spaces, the awesome-transformer-search repo is all you need.
2025
Wang, Zilong; Liang, Pei; Zhai, Jinglei; Wu, Bei; Chen, Xin; Ding, Fan; Chen, Qiang; Sun, Biao
Efficient detection of foodborne pathogens via SERS and deep learning: An ADMIN-optimized NAS-Unet approach Journal Article
In: Journal of Hazardous Materials, vol. 489, pp. 137581, 2025, ISSN: 0304-3894.
@article{WANG2025137581,
title = {Efficient detection of foodborne pathogens via SERS and deep learning: An ADMIN-optimized NAS-Unet approach},
author = {Zilong Wang and Pei Liang and Jinglei Zhai and Bei Wu and Xin Chen and Fan Ding and Qiang Chen and Biao Sun},
url = {https://www.sciencedirect.com/science/article/pii/S0304389425004959},
doi = {https://doi.org/10.1016/j.jhazmat.2025.137581},
issn = {0304-3894},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Journal of Hazardous Materials},
volume = {489},
pages = {137581},
abstract = {Amid the increasing global challenge of foodborne diseases, there is an urgent need for rapid and precise pathogen detection methods. This study innovatively integrates surface-enhanced Raman Spectroscopy (SERS) with deep learning technology to develop an efficient tool for the detection of foodborne pathogens. Utilizing an automated design of mixed networks (ADMIN) strategy, coupled with neural architecture search (NAS) technology, we optimized convolutional neural networks (CNNs) architectures, significantly enhancing SERS data analysis capabilities. This research introduces the U-Net architecture and attention mechanisms, which improve not only classification accuracy but also the model's ability to identify critical spectral features. Compared to traditional detection methods, our approach demonstrates significant advantages in accuracy. In testing samples from 22 foodborne pathogens, the optimized NAS-Unet model achieved an average precision of 92.77 %, surpassing current technologies. Additionally, we explored how different network depths affect classification performance and validated the model's generalization capabilities on the Bacteria-ID dataset, laying the groundwork for practical applications. Our study provides an innovative detection approach for the food safety sector and opens new avenues for applying deep learning technologies in microbiology. Looking ahead, we aim to further explore diverse network modules to enhance model generalization and promote the application of these technologies in real-world food safety testing, playing a crucial role in the fight against foodborne diseases.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Krishnanunni, C G; Bui-Thanh, Tan; Dawson, Clint
Topological derivative approach for deep neural network architecture adaptation Technical Report
2025.
@techreport{krishnanunni2025topologicalderivativeapproachdeep,
title = {Topological derivative approach for deep neural network architecture adaptation},
author = {C G Krishnanunni and Tan Bui-Thanh and Clint Dawson},
url = {https://arxiv.org/abs/2502.06885},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Cheng, Jian; Jiang, Jinbo; Kang, Haidong; Ma, Lianbo
A Hybrid Neural Architecture Search Algorithm Optimized via Lifespan Particle Swarm Optimization for Coal Mine Image Recognition Journal Article
In: Mathematics, vol. 13, no. 4, 2025, ISSN: 2227-7390.
@article{math13040631,
title = {A Hybrid Neural Architecture Search Algorithm Optimized via Lifespan Particle Swarm Optimization for Coal Mine Image Recognition},
author = {Jian Cheng and Jinbo Jiang and Haidong Kang and Lianbo Ma},
url = {https://www.mdpi.com/2227-7390/13/4/631},
doi = {10.3390/math13040631},
issn = {2227-7390},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Mathematics},
volume = {13},
number = {4},
abstract = {Coal mine scene image recognition plays an important role in safety monitoring and equipment detection. However, traditional methods often depend on manually designed neural network architectures. These models struggle to handle the complex backgrounds, low illumination, and diverse objects commonly found in coal mine environments. Manual designs are not only inefficient but also restrict the exploration of optimal architectures, resulting to subpar performance. To address these challenges, we propose using a neural architecture search (NAS) to automate the design of neural networks. Traditional NAS methods are known to be computationally expensive. To improve this, we enhance the process by incorporating Particle Swarm Optimization (PSO), a scalable algorithm that effectively balances global and local searches. To further enhance PSO’s efficiency, we integrate the lifespan mechanism, which prevents premature convergence and enables a more comprehensive exploration of the search space. Our proposed method establishes a flexible search space that includes various types of convolutional layers, activation functions, pooling operations, and network depths, enabling a comprehensive optimization process. Extensive experiments show that the Lifespan-PSO NAS method outperforms traditional manually designed networks and standard PSO-based NAS approaches, offering significant improvements in both recognition accuracy (improved by 10%) and computational efficiency (resource usage reduced by 30%). This makes it a highly effective solution for real-world coal mine image recognition tasks via a PSO-optimized approach in terms of performance and efficiency.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Cranney, Caleb; Meyer, Jesse G.
AttentionSmithy: A Modular Framework for Rapid Transformer Development and Customization Technical Report
2025.
@techreport{cranney2025attentionsmithymodularframeworkrapid,
title = {AttentionSmithy: A Modular Framework for Rapid Transformer Development and Customization},
author = {Caleb Cranney and Jesse G. Meyer},
url = {https://arxiv.org/abs/2502.09503},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Yin, Shantong; Niu, Ben; Wang, Rui; Wang, Xin
Spatial and channel level feature redundancy reduction for differentiable neural architecture search Journal Article
In: Neurocomputing, vol. 630, pp. 129713, 2025, ISSN: 0925-2312.
@article{YIN2025129713,
title = {Spatial and channel level feature redundancy reduction for differentiable neural architecture search},
author = {Shantong Yin and Ben Niu and Rui Wang and Xin Wang},
url = {https://www.sciencedirect.com/science/article/pii/S0925231225003856},
doi = {https://doi.org/10.1016/j.neucom.2025.129713},
issn = {0925-2312},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Neurocomputing},
volume = {630},
pages = {129713},
abstract = {Differentiable architecture search (DARTS), based on the continuous relaxation of the architectural representation and gradient descent, achieves effective results in Neural Architecture Search (NAS) field. Among the neural architectures, convolutional neural networks (CNNs) have achieved remarkable performance in various computer vision tasks. However, convolutional layers inevitably extract redundant features as the limitation of the weight-sharing property by convolutional kernels, thus slowing down the search efficiency of DARTS. In this paper, we propose a novel search approach named Slim-DARTS from the perspective of reducing feature redundancy, to further achieve high-speed and efficient neural architecture search. At the level of spatial redundancy, we design a spatial reconstruction module to eliminate spatial feature redundancy and facilitate representative feature learning. At the channel redundancy level, partial channel connection is applied to randomly sample a small subset of channels for operation selection to reduce unfair competition among candidate operations. And we introduce a group of channel parameters to automatically adjust the proportion of selected channels. The experimental results show that our research greatly improves search efficiency and memory utilization, achieving classification error rates of 2.39% and 16.78% on CIFAR-10 and CIFAR-100, respectively.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gao, Yang; Yang, Hong; Chen, Yizhi; Wu, Junxian; Zhang, Peng; Wang, Haishuai
LLM4GNAS: A Large Language Model Based Toolkit for Graph Neural Architecture Search Technical Report
2025.
@techreport{gao2025llm4gnaslargelanguagemodel,
title = {LLM4GNAS: A Large Language Model Based Toolkit for Graph Neural Architecture Search},
author = {Yang Gao and Hong Yang and Yizhi Chen and Junxian Wu and Peng Zhang and Haishuai Wang},
url = {https://arxiv.org/abs/2502.10459},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Kuhn, Lukas; Saba-Sadiya, Sari; Roig, Gemma
Cognitive Neural Architecture Search Reveals Hierarchical Entailment Technical Report
2025.
@techreport{kuhn2025cognitiveneuralarchitecturesearch,
title = {Cognitive Neural Architecture Search Reveals Hierarchical Entailment},
author = {Lukas Kuhn and Sari Saba-Sadiya and Gemma Roig},
url = {https://arxiv.org/abs/2502.11141},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Kim, Hyeonah; Choi, Sanghyeok; Son, Jiwoo; Park, Jinkyoo; Kwon, Changhyun
Neural Genetic Search in Discrete Spaces Technical Report
2025.
@techreport{kim2025neuralgeneticsearchdiscrete,
title = {Neural Genetic Search in Discrete Spaces},
author = {Hyeonah Kim and Sanghyeok Choi and Jiwoo Son and Jinkyoo Park and Changhyun Kwon},
url = {https://arxiv.org/abs/2502.10433},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Feng, Yuqi; Sun, Yanan; Yen, Gary G.; Tan, Kay Chen
REP: An Interpretable Robustness Enhanced Plugin for Differentiable Neural Architecture Search Journal Article
In: IEEE Transactions on Knowledge and Data Engineering, pp. 1-15, 2025.
@article{10892073,
title = {REP: An Interpretable Robustness Enhanced Plugin for Differentiable Neural Architecture Search},
author = {Yuqi Feng and Yanan Sun and Gary G. Yen and Kay Chen Tan},
url = {https://ieeexplore.ieee.org/abstract/document/10892073},
doi = {10.1109/TKDE.2025.3543503},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {IEEE Transactions on Knowledge and Data Engineering},
pages = {1-15},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Njor, Emil; Banbury, Colby; Fafoutis, Xenofon
Fast Data Aware Neural Architecture Search via Supernet Accelerated Evaluation Technical Report
2025.
@techreport{njor2025fastdataawareneural,
title = {Fast Data Aware Neural Architecture Search via Supernet Accelerated Evaluation},
author = {Emil Njor and Colby Banbury and Xenofon Fafoutis},
url = {https://arxiv.org/abs/2502.12690},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Deng, Difan; Lindauer, Marius
Neural Attention Search Technical Report
2025.
@techreport{deng2025neuralattentionsearch,
title = {Neural Attention Search},
author = {Difan Deng and Marius Lindauer},
url = {https://arxiv.org/abs/2502.13251},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Zhang, Zheyu; Zhang, Yueyi; sun, Xiaoyan
Denoising Designs-inherited Search Framework for Image Denoising Technical Report
2025.
@techreport{zhang2025denoisingdesignsinheritedsearchframework,
title = {Denoising Designs-inherited Search Framework for Image Denoising},
author = {Zheyu Zhang and Yueyi Zhang and Xiaoyan sun},
url = {https://arxiv.org/abs/2502.13359},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Zou, Juan; Liu, Yang; Liu, Yuan; Xia, Yizhang
Evolutionary multi-objective neural architecture search via depth equalization supernet Journal Article
In: Neurocomputing, pp. 129674, 2025, ISSN: 0925-2312.
@article{ZOU2025129674,
title = {Evolutionary multi-objective neural architecture search via depth equalization supernet},
author = {Juan Zou and Yang Liu and Yuan Liu and Yizhang Xia},
url = {https://www.sciencedirect.com/science/article/pii/S0925231225003467},
doi = {https://doi.org/10.1016/j.neucom.2025.129674},
issn = {0925-2312},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Neurocomputing},
pages = {129674},
abstract = {To provide a diverse selection of models suitable for different application scenarios, neural architecture search (NAS) is constructed as a multi-objective optimization problem aiming to simultaneously optimize multiple metrics such as model size and accuracy. Evolutionary algorithms (EA) have been shown to be an effective multi-objective approach that can balance different metrics. However, EA require many evaluations, and the evaluation of architectures is expensive. Training a supernet to evaluate an architecture is considered a promising method to reduce the cost of EA. But there are still many challenges in applying supernet to multi-objective NAS: (1) Supernet tends to give higher scores to shallower architectures, causing potential deeper architectures to be ignored. (2) The receptive field of the architecture has a large gap between search and evaluation, causing a decrease in performance. (3) Larger models are gradually eliminated during evolution, leading to a diversity disaster. We proposed a framework called DESEvo to solve these problems in this paper. DESEvo trains a depth equalization supernet to improve bias of supernet via a frequency rejection sampling method. In addition, DESEvo adaptively constrainted receptive field of architecture to reduce the gap. Finally, DESEvo developed a diversity-preserving strategy to enhance the diversity. Experimental results validate the efficiency and effectiveness of the algorithm, DESEvo can search a set of architectures that are more competitive compared to other state-of-the-art algorithms within 0.2 days, becoming the most efficient multi-objective NAS method in the supernet-based methods.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Xu, Liming; Zheng, Jie; He, Chunlin; Wang, Jing; Zheng, Bochuan; Lv, Jiancheng
Adaptive Multi-particle Swarm Neural Architecture Search for High-incidence Cancer Prediction Journal Article
In: IEEE Transactions on Artificial Intelligence, pp. 1-12, 2025.
@article{10896623,
title = {Adaptive Multi-particle Swarm Neural Architecture Search for High-incidence Cancer Prediction},
author = {Liming Xu and Jie Zheng and Chunlin He and Jing Wang and Bochuan Zheng and Jiancheng Lv},
url = {https://ieeexplore.ieee.org/abstract/document/10896623},
doi = {10.1109/TAI.2025.3543822},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {IEEE Transactions on Artificial Intelligence},
pages = {1-12},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Mecharbat, Lotfi Abdelkrim; Marchisio, Alberto; Shafique, Muhammad; Ghassemi, Mohammad M.; Alhanai, Tuka
MoENAS: Mixture-of-Expert based Neural Architecture Search for jointly Accurate, Fair, and Robust Edge Deep Neural Networks Technical Report
2025.
@techreport{mecharbat2025moenasmixtureofexpertbasedneuralb,
title = {MoENAS: Mixture-of-Expert based Neural Architecture Search for jointly Accurate, Fair, and Robust Edge Deep Neural Networks},
author = {Lotfi Abdelkrim Mecharbat and Alberto Marchisio and Muhammad Shafique and Mohammad M. Ghassemi and Tuka Alhanai},
url = {https://arxiv.org/abs/2502.07422},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
İlker, Günay; Özkan, İnik
SADASNet: A Selective and Adaptive Deep Architecture Search Network with Hyperparameter Optimization for Robust Skin Cancer Classification Journal Article
In: Diagnostics, vol. 15, no. 5, 2025, ISSN: 2075-4418.
@article{diagnostics15050541,
title = {SADASNet: A Selective and Adaptive Deep Architecture Search Network with Hyperparameter Optimization for Robust Skin Cancer Classification},
author = {Günay İlker and İnik Özkan},
url = {https://www.mdpi.com/2075-4418/15/5/541},
doi = {10.3390/diagnostics15050541},
issn = {2075-4418},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Diagnostics},
volume = {15},
number = {5},
abstract = {Background/Objectives: Skin cancer is a major public health concern, where early diagnosis and effective treatment are essential for prevention. To enhance diagnostic accuracy, researchers have increasingly utilized computer vision systems, with deep learning-based approaches becoming the primary focus in recent studies. Nevertheless, there is a notable research gap in the effective optimization of hyperparameters to design optimal deep learning architectures, given the need for high accuracy and lower computational complexity. Methods: This paper puts forth a robust metaheuristic optimization-based approach to develop novel deep learning architectures for multi-class skin cancer classification. This method, designated as the SADASNet (Selective and Adaptive Deep Architecture Search Network by Hyperparameter Optimization) algorithm, is developed based on the Particle Swarm Optimization (PSO) technique. The SADASNet method is adapted to the HAM10000 dataset. Innovative data augmentation techniques are applied to overcome class imbalance issues and enhance the performance of the model. The SADASNet method has been developed to accommodate a range of image sizes, and six different original deep learning models have been produced as a result. Results: The models achieved the following highest performance metrics: 99.31% accuracy, 97.58% F1 score, 97.57% recall, 97.64% precision, and 99.59% specificity. Compared to the most advanced competitors reported in the literature, the proposed method demonstrates superior performance in terms of accuracy and computational complexity. Furthermore, it maintains a broad solution space during parameter optimization. Conclusions: With these outcomes, this method aims to enhance the classification of skin cancer and contribute to the advancement of deep learning.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Li, Chunchao; Li, Jun; Peng, Mingrui; Rasti, Behnood; Duan, Puhong; Tang, Xuebin; Ma, Xiaoguang
Low-Latency Neural Network for Efficient Hyperspectral Image Classification Journal Article
In: IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing, vol. PP, pp. 1-17, 2025.
@article{articleg,
title = {Low-Latency Neural Network for Efficient Hyperspectral Image Classification},
author = {Chunchao Li and Jun Li and Mingrui Peng and Behnood Rasti and Puhong Duan and Xuebin Tang and Xiaoguang Ma},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=10900438},
doi = {10.1109/JSTARS.2025.3544583},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing},
volume = {PP},
pages = {1-17},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lu, Kang-Di; Huang, Jia-Cheng; Zeng, Guo-Qiang; Chen, Min-Rong; Geng, Guang-Gang; Weng, Jian
Multi-Objective Discrete Extremal Optimization of Variable-Length Blocks-Based CNN by Joint NAS and HPO for Intrusion Detection in IIoT Journal Article
In: IEEE Transactions on Dependable and Secure Computing, pp. 1-18, 2025.
@article{10902222,
title = {Multi-Objective Discrete Extremal Optimization of Variable-Length Blocks-Based CNN by Joint NAS and HPO for Intrusion Detection in IIoT},
author = {Kang-Di Lu and Jia-Cheng Huang and Guo-Qiang Zeng and Min-Rong Chen and Guang-Gang Geng and Jian Weng},
url = {https://ieeexplore.ieee.org/abstract/document/10902222},
doi = {10.1109/TDSC.2025.3545363},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {IEEE Transactions on Dependable and Secure Computing},
pages = {1-18},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ang, Li-Minn; Su, Yuanxin; Seng, Kah Phooi; Smith, Jeremy S.
Customized Binary Convolutional Neural Networks and Neural Architecture Search on Hardware Technologies Journal Article
In: IEEE Nanotechnology Magazine, pp. 1-8, 2025.
@article{10904266,
title = {Customized Binary Convolutional Neural Networks and Neural Architecture Search on Hardware Technologies},
author = {Li-Minn Ang and Yuanxin Su and Kah Phooi Seng and Jeremy S. Smith},
url = {https://ieeexplore.ieee.org/abstract/document/10904266},
doi = {10.1109/MNANO.2025.3533937},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {IEEE Nanotechnology Magazine},
pages = {1-8},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ding, Zhenyang; Pu, Ninghao; Miao, Qihui; Chen, Zhiqiang; Xu, Yifan; Liu, Hao
Efficient Palm Vein Recognition Optimized by Neural Architecture Search and Hybrid Compression Proceedings Article
In: 2025 International Conference on Multi-Agent Systems for Collaborative Intelligence (ICMSCI), pp. 826-832, 2025.
@inproceedings{10894245,
title = {Efficient Palm Vein Recognition Optimized by Neural Architecture Search and Hybrid Compression},
author = {Zhenyang Ding and Ninghao Pu and Qihui Miao and Zhiqiang Chen and Yifan Xu and Hao Liu},
url = {https://ieeexplore.ieee.org/abstract/document/10894245},
doi = {10.1109/ICMSCI62561.2025.10894245},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {2025 International Conference on Multi-Agent Systems for Collaborative Intelligence (ICMSCI)},
pages = {826-832},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rumiantsev, Pavel; Coates, Mark
Variation Matters: from Mitigating to Embracing Zero-Shot NAS Ranking Function Variation Technical Report
2025.
@techreport{rumiantsev2025variationmattersmitigatingembracing,
title = {Variation Matters: from Mitigating to Embracing Zero-Shot NAS Ranking Function Variation},
author = {Pavel Rumiantsev and Mark Coates},
url = {https://arxiv.org/abs/2502.19657},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Cai, Zicheng; Tang, Yaohua; Lai, Yutao; Wang, Hua; Chen, Zhi; Chen, Hao
SEKI: Self-Evolution and Knowledge Inspiration based Neural Architecture Search via Large Language Models Technical Report
2025.
@techreport{cai2025sekiselfevolutionknowledgeinspiration,
title = {SEKI: Self-Evolution and Knowledge Inspiration based Neural Architecture Search via Large Language Models},
author = {Zicheng Cai and Yaohua Tang and Yutao Lai and Hua Wang and Zhi Chen and Hao Chen},
url = {https://arxiv.org/abs/2502.20422},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Chouhan, Avinash; Chutia, Dibyajyoti; Deb, Biswarup; Aggarwal, Shiv Prasad
Attention-Based Neural Architecture Search for Effective Semantic Segmentation of Satellite Images Proceedings Article
In: Noor, Arti; Saroha, Kriti; Pricop, Emil; Sen, Abhijit; Trivedi, Gaurav (Ed.): Emerging Trends and Technologies on Intelligent Systems, pp. 325–335, Springer Nature Singapore, Singapore, 2025, ISBN: 978-981-97-5703-9.
@inproceedings{10.1007/978-981-97-5703-9_28,
title = {Attention-Based Neural Architecture Search for Effective Semantic Segmentation of Satellite Images},
author = {Avinash Chouhan and Dibyajyoti Chutia and Biswarup Deb and Shiv Prasad Aggarwal},
editor = {Arti Noor and Kriti Saroha and Emil Pricop and Abhijit Sen and Gaurav Trivedi},
url = {https://link.springer.com/chapter/10.1007/978-981-97-5703-9_28},
isbn = {978-981-97-5703-9},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {Emerging Trends and Technologies on Intelligent Systems},
pages = {325–335},
publisher = {Springer Nature Singapore},
address = {Singapore},
abstract = {Semantic segmentation is an important activity in satellite image analysis. The manual design and development of neural architectures for semantic segmentation is very tedious and can result in computationally heavy architectures with redundant computation. Neural architecture search (NAS) produces automated network architectures for a given task considering computational cost and other parameters. In this work, we proposed an attention-based neural architecture search (ANAS), which uses attention layers at cell levels for effective and efficient architecture design for semantic segmentation. The proposed ANAS has achieved better results than previous NAS-based work on two benchmark datasets.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Qiyi; Shao, Yinning; Ma, Yunlong; Liu, Min
NodeNAS: Node-Specific Graph Neural Architecture Search for Out-of-Distribution Generalization Technical Report
2025.
@techreport{wang2025nodenasnodespecificgraphneural,
title = {NodeNAS: Node-Specific Graph Neural Architecture Search for Out-of-Distribution Generalization},
author = {Qiyi Wang and Yinning Shao and Yunlong Ma and Min Liu},
url = {https://arxiv.org/abs/2503.02448},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Rong, Xiaobin; Wang, Dahan; Hu, Yuxiang; Zhu, Changbao; Chen, Kai; Lu, Jing
UL-UNAS: Ultra-Lightweight U-Nets for Real-Time Speech Enhancement via Network Architecture Search Miscellaneous
2025.
@misc{rong2025ulunasultralightweightunetsrealtime,
title = {UL-UNAS: Ultra-Lightweight U-Nets for Real-Time Speech Enhancement via Network Architecture Search},
author = {Xiaobin Rong and Dahan Wang and Yuxiang Hu and Changbao Zhu and Kai Chen and Jing Lu},
url = {https://arxiv.org/abs/2503.00340},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Yang, Fan; Abedin, Mohammad Zoynul; Hajek, Petr; Qiao, Yanan
Blockchain and Machine Learning in the Green Economy: Pioneering Carbon Neutrality Through Innovative Trading Technologies Journal Article
In: IEEE Transactions on Engineering Management, pp. 1-40, 2025.
@article{10909627,
title = {Blockchain and Machine Learning in the Green Economy: Pioneering Carbon Neutrality Through Innovative Trading Technologies},
author = {Fan Yang and Mohammad Zoynul Abedin and Petr Hajek and Yanan Qiao},
url = {https://ieeexplore.ieee.org/abstract/document/10909627},
doi = {10.1109/TEM.2025.3547730},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {IEEE Transactions on Engineering Management},
pages = {1-40},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zhou, Haoxiang; Wei, Zikun; Liu, Dingbang; Zhang, Liuyang; Ding, Chenchen; Yang, Jiaqi; Mao, Wei; Yu, Hao
A Layer-wised Mixed-Precision CIM Accelerator with Bit-level Sparsity-aware ADCs for NAS-Optimized CNNs Proceedings Article
In: Proceedings of the 30th Asia and South Pacific Design Automation Conference, pp. 720–726, Association for Computing Machinery, Tokyo, Japan, 2025, ISBN: 9798400706356.
@inproceedings{10.1145/3658617.3697682,
title = {A Layer-wised Mixed-Precision CIM Accelerator with Bit-level Sparsity-aware ADCs for NAS-Optimized CNNs},
author = {Haoxiang Zhou and Zikun Wei and Dingbang Liu and Liuyang Zhang and Chenchen Ding and Jiaqi Yang and Wei Mao and Hao Yu},
url = {https://doi.org/10.1145/3658617.3697682},
doi = {10.1145/3658617.3697682},
isbn = {9798400706356},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {Proceedings of the 30th Asia and South Pacific Design Automation Conference},
pages = {720–726},
publisher = {Association for Computing Machinery},
address = {Tokyo, Japan},
series = {ASPDAC '25},
abstract = {Exploring multiple precisions as well as sparsities for a computingin-memory (CIM) based convolutional accelerators is challenging. To further improve energy efficiency with minimal accuracy loss, this paper develops a neural architecture search (NAS) method to identify precision for each layer of the CNN and further leverages bit-level sparsity. The results indicate that following this approach, ResNet-18 and VGG-16 not only maintain their accuracy but also implement layer-wised mixed-precision effectively. Furthermore, there is a substantial enhancement in the bit-level sparsity of weights within each layer, with an average bit-level sparsity exceeding 90% per bit, thus providing broader possibilities for hardware-level sparsity optimization. In terms of hardware design, a mixed-precision (2/4/8-bit) readout circuit as well as a bit-level sparsity-aware Analog-to-Digital Converter (ADC) are both proposed to reduce system power consumption. Based on bit-level sparsity mixed-precision CNNs benchmarks, post-layout simulation results in 28nm reveal that the proposed accelerator achieves up to 245.72 TOPS/W energy efficiency, which shows about 2.52 – 6.57× improvement compared to the state-of-the-art SRAM-based CIM accelerators.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Yingzhe; Fu, Fangfa; Sun, Xuejian
Research on Approximate Computation of Signal Processing Algorithms for AIoT Processors Based on Deep Learning Journal Article
In: Electronics, vol. 14, no. 6, 2025, ISSN: 2079-9292.
@article{electronics14061064,
title = {Research on Approximate Computation of Signal Processing Algorithms for AIoT Processors Based on Deep Learning},
author = {Yingzhe Liu and Fangfa Fu and Xuejian Sun},
url = {https://www.mdpi.com/2079-9292/14/6/1064},
doi = {10.3390/electronics14061064},
issn = {2079-9292},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Electronics},
volume = {14},
number = {6},
abstract = {In the post-Moore era, the excessive amount of information brings great challenges to the performance of computing systems. To cope with these challenges, approximate computation has developed rapidly, which enhances the system performance with minor degradation in accuracy. In this paper, we investigate the utilization of an Artificial Intelligence of Things (AIoT) processor for approximate computing. Firstly, we employed neural architecture search (NAS) to acquire the neural network structure for approximate computation, which approximates the functions of FFT, DCT, FIR, and IIR. Subsequently, based on this structure, we quantized and trained a neural network implemented on the AI accelerator of the MAX78000 development board. To evaluate the performance, we implemented the same functions using the CMSIS-DSP library. The results demonstrate that the computational efficiency of the approximate computation on the AI accelerator is significantly higher compared to traditional DSP implementations. Therefore, the approximate computation based on AIoT devices can be effectively utilized in real-time applications.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chen, Renqi; Nian, Fan; Cen, Yuhui; Peng, Yiheng; Wang, Hongbo; Yu, Zekuan; Luo, Jingjing
L-SSHNN: A Larger search space of Semi-Supervised Hybrid NAS Network for echocardiography segmentation Journal Article
In: Expert Systems with Applications, pp. 127084, 2025, ISSN: 0957-4174.
@article{CHEN2025127084,
title = {L-SSHNN: A Larger search space of Semi-Supervised Hybrid NAS Network for echocardiography segmentation},
author = {Renqi Chen and Fan Nian and Yuhui Cen and Yiheng Peng and Hongbo Wang and Zekuan Yu and Jingjing Luo},
url = {https://www.sciencedirect.com/science/article/pii/S0957417425007067},
doi = {https://doi.org/10.1016/j.eswa.2025.127084},
issn = {0957-4174},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Expert Systems with Applications},
pages = {127084},
abstract = {Echocardiography with image segmentation facilitates clinicians in thoroughly analyzing cardiac conditions by providing detailed insights into anatomical structures. However, echocardiography segmentation is challenging due to low image quality with blurred boundaries constrained by the inherent noise, artifacts, and cardiac motion. When manually designed networks have achieved promising results, Neural Architecture Search (NAS) allows for the automatic optimization of network structures. Integrating the strengths of NAS works and meticulously crafted networks becomes meaningful in advancing this field. In this paper, we propose a new Semi-Supervised Hybrid NAS Network with a Larger search space for echocardiography segmentation under limited annotations, termed L-SSHNN. Firstly, we propose a three-level search: inner cell, outer layer, and encoder–decoder design, enlarging the search space. Secondly, the proposed L-SSHNN specifies an architectural non-sharing strategy, allowing diverse structures among different cells. Moreover, we propose a new differentiable architecture search (Darts) method termed separation-combination partially-connected Darts (SC-PC-Darts) to incorporate convolution fusion modules and search for the optimal cell architecture for multi-scale feature extraction. Extensive experiments with other state-of-the-art methods on three publicly available echocardiography datasets demonstrate the superiority of L-SSHNN. Additionally, comparative ablation studies further analyze different configurations of our model.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Sun, Shuoyang; Zhang, Kaiwen; Fang, Hao; Chen, Bin; Li, Jiawei; Huo, Enze; Xia, Shu-Tao
RobNAS: Robust Neural Architecture Search for Point Cloud Adversarial Defense Proceedings Article
In: ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1-5, 2025.
@inproceedings{10890087,
title = {RobNAS: Robust Neural Architecture Search for Point Cloud Adversarial Defense},
author = {Shuoyang Sun and Kaiwen Zhang and Hao Fang and Bin Chen and Jiawei Li and Enze Huo and Shu-Tao Xia},
url = {https://ieeexplore.ieee.org/abstract/document/10890087},
doi = {10.1109/ICASSP49660.2025.10890087},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1-5},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhang, Binyan; Ren, Ao; Zhang, Zihao; Duan, Moming; Liu, Duo; Tan, Yujuan; Zhong, Kan
MPNAS: Multimodal Sentiment Analysis Pruning via Neural Architecture Search Proceedings Article
In: ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1-5, 2025.
@inproceedings{10887670,
title = {MPNAS: Multimodal Sentiment Analysis Pruning via Neural Architecture Search},
author = {Binyan Zhang and Ao Ren and Zihao Zhang and Moming Duan and Duo Liu and Yujuan Tan and Kan Zhong},
url = {https://ieeexplore.ieee.org/abstract/document/10887670},
doi = {10.1109/ICASSP49660.2025.10887670},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1-5},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Wenbo; Deng, Tao; Yan, Fei
HID-NAS: A Novel Neural Architecture Search Pipeline for High Information Density Data Proceedings Article
In: ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1-5, 2025.
@inproceedings{10889095,
title = {HID-NAS: A Novel Neural Architecture Search Pipeline for High Information Density Data},
author = {Wenbo Liu and Tao Deng and Fei Yan},
url = {https://ieeexplore.ieee.org/abstract/document/10889095},
doi = {10.1109/ICASSP49660.2025.10889095},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1-5},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zein, Hadi Al; Waterlaat, Nick; Alkanat, Tunc
Neural Architecture Search for Ultra-low Memory Blood Glucose Forecasting on the Edge Proceedings Article
In: ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1-5, 2025.
@inproceedings{10890864,
title = {Neural Architecture Search for Ultra-low Memory Blood Glucose Forecasting on the Edge},
author = {Hadi Al Zein and Nick Waterlaat and Tunc Alkanat},
url = {https://ieeexplore.ieee.org/document/10890864},
doi = {10.1109/ICASSP49660.2025.10890864},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1-5},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Li, Xin; Fu, Keren; Zhao, Qijun
Camouflaged Object Detection via Neural Architecture Search Proceedings Article
In: ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1-5, 2025.
@inproceedings{10887976,
title = {Camouflaged Object Detection via Neural Architecture Search},
author = {Xin Li and Keren Fu and Qijun Zhao},
url = {https://ieeexplore.ieee.org/abstract/document/10887976},
doi = {10.1109/ICASSP49660.2025.10887976},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1-5},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Miriyala, Srinivas; Vajrala, Sowmya; Kumar, Hitesh; Kodavanti, Sravanth; Rajendiran, Vikram
Mobile-friendly Image de-noising: Hardware Conscious Optimization for Edge Application Proceedings Article
In: ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1-5, 2025.
@inproceedings{10888855,
title = {Mobile-friendly Image de-noising: Hardware Conscious Optimization for Edge Application},
author = {Srinivas Miriyala and Sowmya Vajrala and Hitesh Kumar and Sravanth Kodavanti and Vikram Rajendiran},
url = {https://ieeexplore.ieee.org/document/10888855},
doi = {10.1109/ICASSP49660.2025.10888855},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1-5},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Capello, Alessio; Berta, Riccardo; Fresta, Matteo; Lazzaroni, Luca; Bellotti, Francesco
Leveraging Neural Architecture Search for Structural Health Monitoring on Resource-Constrained Devices Proceedings Article
In: Roch, Massimo Ruo; Bellotti, Francesco; Berta, Riccardo; Martina, Maurizio; Ros, Paolo Motto (Ed.): Applications in Electronics Pervading Industry, Environment and Society, pp. 323–330, Springer Nature Switzerland, Cham, 2025, ISBN: 978-3-031-84100-2.
@inproceedings{10.1007/978-3-031-84100-2_38,
title = {Leveraging Neural Architecture Search for Structural Health Monitoring on Resource-Constrained Devices},
author = {Alessio Capello and Riccardo Berta and Matteo Fresta and Luca Lazzaroni and Francesco Bellotti},
editor = {Massimo Ruo Roch and Francesco Bellotti and Riccardo Berta and Maurizio Martina and Paolo Motto Ros},
url = {https://link.springer.com/chapter/10.1007/978-3-031-84100-2_38},
isbn = {978-3-031-84100-2},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {Applications in Electronics Pervading Industry, Environment and Society},
pages = {323–330},
publisher = {Springer Nature Switzerland},
address = {Cham},
abstract = {In recent decades signal processing incorporated the capabilities offered by Deep Learning (DL) models, especially for complex tasks. DL models demand significant memory, power, and computational resources, posing challenges for Microcontroller Units (MCUs) with limited capacities. The possibility to run models directly on the edge device is key in connectivity-limited scenarios such as Structural Health Monitoring (SHM). For those scenarios, it is necessary to use Tiny Machine Learning techniques to reduces computational requirements. This study focuses on the impact of the extended version of the state-of-the-art Neural Architecture Search (NAS) tool, μNAS, for SHM applications, targeting four commonly used MCUs. Our assessment is based on the Z24 Bridge benchmark dataset, a common dataset for SHM we employed to train and evaluate models. We then discuss if the models found fit the constraints of the MCUs and the possible tradeoffs between error rate and model computational requirements. We also offer a comparison with the Raspberry Pi 4 Model B to highlight μNAS's capability in achieving high accuracy with higher computing capabilities. The obtained results are promising, as the found models satisfy the given constraints both in term of accuracy and memory footprint.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zein, Abbas Kassem; Diab, Rand Abou; Yaacoub, Mohamad; Ibrahim, Ali
Neural Architecture Search for Optimized TinyML Applications Proceedings Article
In: Roch, Massimo Ruo; Bellotti, Francesco; Berta, Riccardo; Martina, Maurizio; Ros, Paolo Motto (Ed.): Applications in Electronics Pervading Industry, Environment and Society, pp. 481–488, Springer Nature Switzerland, Cham, 2025, ISBN: 978-3-031-84100-2.
@inproceedings{10.1007/978-3-031-84100-2_57,
title = {Neural Architecture Search for Optimized TinyML Applications},
author = {Abbas Kassem Zein and Rand Abou Diab and Mohamad Yaacoub and Ali Ibrahim},
editor = {Massimo Ruo Roch and Francesco Bellotti and Riccardo Berta and Maurizio Martina and Paolo Motto Ros},
url = {https://link.springer.com/chapter/10.1007/978-3-031-84100-2_57},
isbn = {978-3-031-84100-2},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {Applications in Electronics Pervading Industry, Environment and Society},
pages = {481–488},
publisher = {Springer Nature Switzerland},
address = {Cham},
abstract = {Integrating machine learning algorithms on circuits with low power consumption and low hardware complexity is challenging at the different levels of the design process. In network design at the software level, it is crucial to balance a high classification accuracy, while minimizing model complexity to reduce hardware demands. This paper proposes a search approach integrated with the Neural Architecture Search (NAS) to enhance the performance and reduce the complexity of deep learning models. Accuracy and number of Floating-Point Operations Per Second (FLOPS) are employed as evaluation metrics for the targeted models. The experimental results demonstrate that the proposed method outperforms similar state-of-the-art architectures while exhibiting comparable accuracy with up to a 70% reduction in complexity.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhang, Heng; Chen, Ziqian; Xia, Wei; Xiong, Gang; Gou, Gaopeng; Li, Zhen; Huang, Guangyan; Li, Yunpeng
ANASETC: Automatic Neural Architecture Search for Encrypted Traffic Classification Proceedings Article
In: ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1-5, 2025.
@inproceedings{10890501,
title = {ANASETC: Automatic Neural Architecture Search for Encrypted Traffic Classification},
author = {Heng Zhang and Ziqian Chen and Wei Xia and Gang Xiong and Gaopeng Gou and Zhen Li and Guangyan Huang and Yunpeng Li},
url = {https://ieeexplore.ieee.org/document/10890501},
doi = {10.1109/ICASSP49660.2025.10890501},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1-5},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Xie, Lunchen; Lomurno, Eugenio; Gambella, Matteo; Ardagna, Danilo; Roveri, Manual; Matteucci, Matteo; Shi, Qingjiang
ZO-DARTS++: An Efficient and Size-Variable Zeroth-Order Neural Architecture Search Algorithm Technical Report
2025.
@techreport{xie2025zodartsefficientsizevariablezerothorder,
title = {ZO-DARTS++: An Efficient and Size-Variable Zeroth-Order Neural Architecture Search Algorithm},
author = {Lunchen Xie and Eugenio Lomurno and Matteo Gambella and Danilo Ardagna and Manual Roveri and Matteo Matteucci and Qingjiang Shi},
url = {https://arxiv.org/abs/2503.06092},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Zhang, Yunlong; Chen, Nan; Wang, Yonghe; Su, Xiangdong; Bao, Feilong
Multilingual Parameter-Sharing Adapters: A Method for Optimizing Low-Resource Neural Machine Translation Proceedings Article
In: ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1-5, 2025.
@inproceedings{10889761,
title = {Multilingual Parameter-Sharing Adapters: A Method for Optimizing Low-Resource Neural Machine Translation},
author = {Yunlong Zhang and Nan Chen and Yonghe Wang and Xiangdong Su and Feilong Bao},
url = {https://ieeexplore.ieee.org/abstract/document/10889761},
doi = {10.1109/ICASSP49660.2025.10889761},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1-5},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
R., José Ribamar Durand; Junior, Geraldo Braz; Silva, Italo Francyles Santos; Oliveira, Rui Miguel Gil Costa
HistAttentionNAS: A CNN built via NAS for Penile Cancer Diagnosis using Histopathological Images Journal Article
In: Procedia Computer Science, vol. 256, pp. 764-771, 2025, ISSN: 1877-0509, (CENTERIS - International Conference on ENTERprise Information Systems / ProjMAN - International Conference on Project MANagement / HCist - International Conference on Health and Social Care Information Systems and Technologies).
@article{DURANDR2025764,
title = {HistAttentionNAS: A CNN built via NAS for Penile Cancer Diagnosis using Histopathological Images},
author = {José Ribamar Durand R. and Geraldo Braz Junior and Italo Francyles Santos Silva and Rui Miguel Gil Costa Oliveira},
url = {https://www.sciencedirect.com/science/article/pii/S1877050925005344},
doi = {https://doi.org/10.1016/j.procs.2025.02.177},
issn = {1877-0509},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Procedia Computer Science},
volume = {256},
pages = {764-771},
abstract = {Penile cancer, although rare, has an increasing mortality rate in Brazil, highlighting the need for effective diagnostic methods. Artificial Intelligence (AI) in histopathological analysis can speed up and objectify diagnosis, but designing an ideal architecture is challenging. In this study, we propose a neural architecture search (NAS) methodology for detecting penile cancer in digital histopathology images. We explored different configurations of stem blocks and the inclusion of attention mechanisms, highlighting specific preferences depending on the magnification of the images. The results showed that the NAS methodology enabled the discovery of more accurate and optimized architectures for this task, surpassing conventional models. The proposed models achieve 89.5% and 88.5% F1-Score for 40X and 100X magnification, respectively.},
note = {CENTERIS - International Conference on ENTERprise Information Systems / ProjMAN - International Conference on Project MANagement / HCist - International Conference on Health and Social Care Information Systems and Technologies},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Sun, Jingyu; Zhang, Hanting; Wang, Jianfeng
Enhancing Time Series Prediction with Evolutionary Algorithm-based Optimization of LSTM Proceedings Article
In: ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1-5, 2025.
@inproceedings{10889678,
title = {Enhancing Time Series Prediction with Evolutionary Algorithm-based Optimization of LSTM},
author = {Jingyu Sun and Hanting Zhang and Jianfeng Wang},
url = {https://ieeexplore.ieee.org/abstract/document/10889678},
doi = {10.1109/ICASSP49660.2025.10889678},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1-5},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tan, Wanrong; Huang, Lingling; Li, Hong; Tan, Menghao; Xie, Jin; Gao, Weifeng
Neural architecture search with integrated template-modules for efficient defect detection Journal Article
In: Expert Systems with Applications, pp. 127211, 2025, ISSN: 0957-4174.
@article{TAN2025127211,
title = {Neural architecture search with integrated template-modules for efficient defect detection},
author = {Wanrong Tan and Lingling Huang and Hong Li and Menghao Tan and Jin Xie and Weifeng Gao},
url = {https://www.sciencedirect.com/science/article/pii/S0957417425008334},
doi = {https://doi.org/10.1016/j.eswa.2025.127211},
issn = {0957-4174},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Expert Systems with Applications},
pages = {127211},
abstract = {Surface defect detection in industrial production is critical for quality control. Traditional manual design of detection models is time-consuming, inefficient, and lacks adaptability to diverse defect scenarios. To address these limitations, we propose TMNAS (Template-Module Neural Architecture Search), a bi-level optimization framework that automates the design of high-performance defect detection models. TMNAS uniquely integrates predefined template-modules into a flexible search space, enabling simultaneous exploration of architectural components and parameters. By incorporating a single-objective genetic algorithm with a computational complexity penalty term, our approach effectively avoids local optima and significantly reduces search resource consumption. Extensive experiments on industrial defect datasets demonstrate that TMNAS surpasses state-of-the-art models, while on the COCO benchmark, it achieves a competitive mean average precision (mAP) of 58.4%, all with lower computational overhead.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gambella, Matteo; Pittorino, Fabrizio; Roveri, Manuel
Architecture-Aware Minimization (A$^2$M): How to Find Flat Minima in Neural Architecture Search Technical Report
2025.
@techreport{gambella2025architectureawareminimizationa2mflat,
title = {Architecture-Aware Minimization (A$^2$M): How to Find Flat Minima in Neural Architecture Search},
author = {Matteo Gambella and Fabrizio Pittorino and Manuel Roveri},
url = {https://arxiv.org/abs/2503.10404},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Zhao, Tianchen; Wang, Xianpeng; Song, Xiangman
Multiobjective Backbone Network Architecture Search Based on Transfer Learning in Steel Defect Detection Journal Article
In: Neurocomputing, pp. 130012, 2025, ISSN: 0925-2312.
@article{ZHAO2025130012,
title = {Multiobjective Backbone Network Architecture Search Based on Transfer Learning in Steel Defect Detection},
author = {Tianchen Zhao and Xianpeng Wang and Xiangman Song},
url = {https://www.sciencedirect.com/science/article/pii/S0925231225006848},
doi = {https://doi.org/10.1016/j.neucom.2025.130012},
issn = {0925-2312},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Neurocomputing},
pages = {130012},
abstract = {In recent years, steel defect detection methods based on deep learning have been widely used. However, due to the shape specificity of steel defects and data scarcity, using existing convolutional neural network architectures for training requires significant expertise and time to fine-tune the hyperparameters. Transfer learning effectively tackles the challenges of data scarcity or limited computing resources by transferring domain knowledge from source tasks to related target tasks, reducing the resource consumption of model training from scratch. In this paper, we propose a transfer learning-based multiobjective backbone network architecture search method (TMBNAS). First, TMBNAS formulates defect detection network search as a multiobjective problem while optimizing its detection accuracy and model complexity. Second, an effective variable-length encoding strategy is designed to represent different building blocks and unpredictable optimal depths in convolutional neural networks, and targeted improvements are made to the crossover and mutation operators. For the specificity of the steel defect detection task, a transfer learning strategy based on similar knowledge is used to transfer the architecture and weight parameters obtained from the search in the source task to the target task, and adjust and optimize them. Finally, a dynamic adjustment mechanism based on actual constraints is designed during the search process to gradually approximate the optimal non-dominated solution set with higher detection accuracy without losing its population diversity. The proposed method is tested on the continuous casting slab and workpiece defect datasets. The experimental results show that the model searched by the proposed method can achieve better detection performance compared with manually designed deep learning algorithms and classical network architecture search methods.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Xie, Xiaofeng; Gao, Yuelin; Zhang, Yuming
An improved Artificial Protozoa Optimizer for CNN architecture optimization Journal Article
In: Neural Networks, pp. 107368, 2025, ISSN: 0893-6080.
@article{XIE2025107368,
title = {An improved Artificial Protozoa Optimizer for CNN architecture optimization},
author = {Xiaofeng Xie and Yuelin Gao and Yuming Zhang},
url = {https://www.sciencedirect.com/science/article/pii/S0893608025002473},
doi = {https://doi.org/10.1016/j.neunet.2025.107368},
issn = {0893-6080},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Neural Networks},
pages = {107368},
abstract = {In this paper, we propose a novel neural architecture search (NAS) method called MAPOCNN, which leverages an enhanced version of the Artificial Protozoa Optimizer (APO) to optimize the architecture of Convolutional Neural Networks (CNNs). The APO is known for its rapid convergence, high stability, and minimal parameter involvement. To further improve its performance, we introduce MAPO (Modified Artificial Protozoa Optimizer), which incorporates the phototaxis behavior of protozoa. This addition helps mitigate the risk of premature convergence, allowing the algorithm to explore a broader range of possible CNN architectures and ultimately identify more optimal solutions. Through rigorous experimentation on benchmark datasets, including Rectangle and Mnist-random, we demonstrate that MAPOCNN not only achieves faster convergence times but also performs competitively when compared to other state-of-the-art NAS algorithms. The results highlight the effectiveness of MAPOCNN in efficiently discovering CNN architectures that outperform existing methods in terms of both speed and accuracy. This work presents a promising direction for optimizing deep learning architectures using biologically inspired optimization techniques.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Onzo, Bernard-marie; Xue, Yu; Neri, Ferrante
Surrogate-assisted evolutionary neural architecture search based on smart-block discovery Journal Article
In: Expert Systems with Applications, vol. 277, pp. 127237, 2025, ISSN: 0957-4174.
@article{ONZO2025127237,
title = {Surrogate-assisted evolutionary neural architecture search based on smart-block discovery},
author = {Bernard-marie Onzo and Yu Xue and Ferrante Neri},
url = {https://www.sciencedirect.com/science/article/pii/S0957417425008590},
doi = {https://doi.org/10.1016/j.eswa.2025.127237},
issn = {0957-4174},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Expert Systems with Applications},
volume = {277},
pages = {127237},
abstract = {Neural architecture search (NAS) has emerged as a powerful method for automating neural network design, yet its high computational cost remains a significant challenge. This paper introduces hybrid training-less neural architecture search (HYTES-NAS), a novel hybrid NAS framework that integrates evolutionary computation with a training-free evaluation strategy, significantly reducing computational demands while maintaining high search efficiency. Unlike conventional NAS methods that rely on full model training, HYTES-NAS leverages a surrogate-assisted scoring mechanism to assess candidate architectures efficiently. Additionally, a smart-block discovery strategy and particle swarm optimisation are employed to refine the search space and accelerate convergence. Experimental results on multiple NAS benchmarks demonstrate that HYTES-NAS achieves superior performance with significantly lower computational cost compared to state-of-the-art NAS methods. This work provides a promising and scalable solution for efficient NAS, making high-performance architecture search more accessible for real-world applications.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Jeon, Jeimin; Oh, Youngmin; Lee, Junghyup; Baek, Donghyeon; Kim, Dohyung; Eom, Chanho; Ham, Bumsub
Subnet-Aware Dynamic Supernet Training for Neural Architecture Search Technical Report
2025.
@techreport{jeon2025subnetawaredynamicsupernettraining,
title = {Subnet-Aware Dynamic Supernet Training for Neural Architecture Search},
author = {Jeimin Jeon and Youngmin Oh and Junghyup Lee and Donghyeon Baek and Dohyung Kim and Chanho Eom and Bumsub Ham},
url = {https://arxiv.org/abs/2503.10740},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Winter, Benjamin David; Teahan, William J.
Ecological Neural Architecture Search Technical Report
2025.
@techreport{winter2025ecologicalneuralarchitecturesearch,
title = {Ecological Neural Architecture Search},
author = {Benjamin David Winter and William J. Teahan},
url = {https://arxiv.org/abs/2503.10908},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Winter, Benjamin David; Teahan, William John
Evaluating a Novel Neuroevolution and Neural Architecture Search System Technical Report
2025.
@techreport{winter2025evaluatingnovelneuroevolutionneural,
title = {Evaluating a Novel Neuroevolution and Neural Architecture Search System},
author = {Benjamin David Winter and William John Teahan},
url = {https://arxiv.org/abs/2503.10869},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}