Maintained by Difan Deng and Marius Lindauer.
The following list considers papers related to neural architecture search. It is by no means complete. If you miss a paper on the list, please let us know.
Please note that although NAS methods steadily improve, the quality of empirical evaluations in this field are still lagging behind compared to other areas in machine learning, AI and optimization. We would therefore like to share some best practices for empirical evaluations of NAS methods, which we believe will facilitate sustained and measurable progress in the field. If you are interested in a teaser, please read our blog post or directly jump to our checklist.
Transformers have gained increasing popularity in different domains. For a comprehensive list of papers focusing on Neural Architecture Search for Transformer-Based spaces, the awesome-transformer-search repo is all you need.
2025
Liu, Wenbo; Deng, Tao; Yan, Fei
HID-NAS: A Novel Neural Architecture Search Pipeline for High Information Density Data Proceedings Article
In: ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1-5, 2025.
@inproceedings{10889095,
title = {HID-NAS: A Novel Neural Architecture Search Pipeline for High Information Density Data},
author = {Wenbo Liu and Tao Deng and Fei Yan},
url = {https://ieeexplore.ieee.org/abstract/document/10889095},
doi = {10.1109/ICASSP49660.2025.10889095},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1-5},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhang, Binyan; Ren, Ao; Zhang, Zihao; Duan, Moming; Liu, Duo; Tan, Yujuan; Zhong, Kan
MPNAS: Multimodal Sentiment Analysis Pruning via Neural Architecture Search Proceedings Article
In: ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1-5, 2025.
@inproceedings{10887670,
title = {MPNAS: Multimodal Sentiment Analysis Pruning via Neural Architecture Search},
author = {Binyan Zhang and Ao Ren and Zihao Zhang and Moming Duan and Duo Liu and Yujuan Tan and Kan Zhong},
url = {https://ieeexplore.ieee.org/abstract/document/10887670},
doi = {10.1109/ICASSP49660.2025.10887670},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1-5},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Sun, Shuoyang; Zhang, Kaiwen; Fang, Hao; Chen, Bin; Li, Jiawei; Huo, Enze; Xia, Shu-Tao
RobNAS: Robust Neural Architecture Search for Point Cloud Adversarial Defense Proceedings Article
In: ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1-5, 2025.
@inproceedings{10890087,
title = {RobNAS: Robust Neural Architecture Search for Point Cloud Adversarial Defense},
author = {Shuoyang Sun and Kaiwen Zhang and Hao Fang and Bin Chen and Jiawei Li and Enze Huo and Shu-Tao Xia},
url = {https://ieeexplore.ieee.org/abstract/document/10890087},
doi = {10.1109/ICASSP49660.2025.10890087},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1-5},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Renqi; Nian, Fan; Cen, Yuhui; Peng, Yiheng; Wang, Hongbo; Yu, Zekuan; Luo, Jingjing
L-SSHNN: A Larger search space of Semi-Supervised Hybrid NAS Network for echocardiography segmentation Journal Article
In: Expert Systems with Applications, pp. 127084, 2025, ISSN: 0957-4174.
@article{CHEN2025127084,
title = {L-SSHNN: A Larger search space of Semi-Supervised Hybrid NAS Network for echocardiography segmentation},
author = {Renqi Chen and Fan Nian and Yuhui Cen and Yiheng Peng and Hongbo Wang and Zekuan Yu and Jingjing Luo},
url = {https://www.sciencedirect.com/science/article/pii/S0957417425007067},
doi = {https://doi.org/10.1016/j.eswa.2025.127084},
issn = {0957-4174},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Expert Systems with Applications},
pages = {127084},
abstract = {Echocardiography with image segmentation facilitates clinicians in thoroughly analyzing cardiac conditions by providing detailed insights into anatomical structures. However, echocardiography segmentation is challenging due to low image quality with blurred boundaries constrained by the inherent noise, artifacts, and cardiac motion. When manually designed networks have achieved promising results, Neural Architecture Search (NAS) allows for the automatic optimization of network structures. Integrating the strengths of NAS works and meticulously crafted networks becomes meaningful in advancing this field. In this paper, we propose a new Semi-Supervised Hybrid NAS Network with a Larger search space for echocardiography segmentation under limited annotations, termed L-SSHNN. Firstly, we propose a three-level search: inner cell, outer layer, and encoder–decoder design, enlarging the search space. Secondly, the proposed L-SSHNN specifies an architectural non-sharing strategy, allowing diverse structures among different cells. Moreover, we propose a new differentiable architecture search (Darts) method termed separation-combination partially-connected Darts (SC-PC-Darts) to incorporate convolution fusion modules and search for the optimal cell architecture for multi-scale feature extraction. Extensive experiments with other state-of-the-art methods on three publicly available echocardiography datasets demonstrate the superiority of L-SSHNN. Additionally, comparative ablation studies further analyze different configurations of our model.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Liu, Yingzhe; Fu, Fangfa; Sun, Xuejian
Research on Approximate Computation of Signal Processing Algorithms for AIoT Processors Based on Deep Learning Journal Article
In: Electronics, vol. 14, no. 6, 2025, ISSN: 2079-9292.
@article{electronics14061064,
title = {Research on Approximate Computation of Signal Processing Algorithms for AIoT Processors Based on Deep Learning},
author = {Yingzhe Liu and Fangfa Fu and Xuejian Sun},
url = {https://www.mdpi.com/2079-9292/14/6/1064},
doi = {10.3390/electronics14061064},
issn = {2079-9292},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Electronics},
volume = {14},
number = {6},
abstract = {In the post-Moore era, the excessive amount of information brings great challenges to the performance of computing systems. To cope with these challenges, approximate computation has developed rapidly, which enhances the system performance with minor degradation in accuracy. In this paper, we investigate the utilization of an Artificial Intelligence of Things (AIoT) processor for approximate computing. Firstly, we employed neural architecture search (NAS) to acquire the neural network structure for approximate computation, which approximates the functions of FFT, DCT, FIR, and IIR. Subsequently, based on this structure, we quantized and trained a neural network implemented on the AI accelerator of the MAX78000 development board. To evaluate the performance, we implemented the same functions using the CMSIS-DSP library. The results demonstrate that the computational efficiency of the approximate computation on the AI accelerator is significantly higher compared to traditional DSP implementations. Therefore, the approximate computation based on AIoT devices can be effectively utilized in real-time applications.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zhou, Haoxiang; Wei, Zikun; Liu, Dingbang; Zhang, Liuyang; Ding, Chenchen; Yang, Jiaqi; Mao, Wei; Yu, Hao
A Layer-wised Mixed-Precision CIM Accelerator with Bit-level Sparsity-aware ADCs for NAS-Optimized CNNs Proceedings Article
In: Proceedings of the 30th Asia and South Pacific Design Automation Conference, pp. 720–726, Association for Computing Machinery, Tokyo, Japan, 2025, ISBN: 9798400706356.
@inproceedings{10.1145/3658617.3697682,
title = {A Layer-wised Mixed-Precision CIM Accelerator with Bit-level Sparsity-aware ADCs for NAS-Optimized CNNs},
author = {Haoxiang Zhou and Zikun Wei and Dingbang Liu and Liuyang Zhang and Chenchen Ding and Jiaqi Yang and Wei Mao and Hao Yu},
url = {https://doi.org/10.1145/3658617.3697682},
doi = {10.1145/3658617.3697682},
isbn = {9798400706356},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {Proceedings of the 30th Asia and South Pacific Design Automation Conference},
pages = {720–726},
publisher = {Association for Computing Machinery},
address = {Tokyo, Japan},
series = {ASPDAC '25},
abstract = {Exploring multiple precisions as well as sparsities for a computingin-memory (CIM) based convolutional accelerators is challenging. To further improve energy efficiency with minimal accuracy loss, this paper develops a neural architecture search (NAS) method to identify precision for each layer of the CNN and further leverages bit-level sparsity. The results indicate that following this approach, ResNet-18 and VGG-16 not only maintain their accuracy but also implement layer-wised mixed-precision effectively. Furthermore, there is a substantial enhancement in the bit-level sparsity of weights within each layer, with an average bit-level sparsity exceeding 90% per bit, thus providing broader possibilities for hardware-level sparsity optimization. In terms of hardware design, a mixed-precision (2/4/8-bit) readout circuit as well as a bit-level sparsity-aware Analog-to-Digital Converter (ADC) are both proposed to reduce system power consumption. Based on bit-level sparsity mixed-precision CNNs benchmarks, post-layout simulation results in 28nm reveal that the proposed accelerator achieves up to 245.72 TOPS/W energy efficiency, which shows about 2.52 – 6.57× improvement compared to the state-of-the-art SRAM-based CIM accelerators.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Yang, Fan; Abedin, Mohammad Zoynul; Hajek, Petr; Qiao, Yanan
Blockchain and Machine Learning in the Green Economy: Pioneering Carbon Neutrality Through Innovative Trading Technologies Journal Article
In: IEEE Transactions on Engineering Management, pp. 1-40, 2025.
@article{10909627,
title = {Blockchain and Machine Learning in the Green Economy: Pioneering Carbon Neutrality Through Innovative Trading Technologies},
author = {Fan Yang and Mohammad Zoynul Abedin and Petr Hajek and Yanan Qiao},
url = {https://ieeexplore.ieee.org/abstract/document/10909627},
doi = {10.1109/TEM.2025.3547730},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {IEEE Transactions on Engineering Management},
pages = {1-40},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rong, Xiaobin; Wang, Dahan; Hu, Yuxiang; Zhu, Changbao; Chen, Kai; Lu, Jing
UL-UNAS: Ultra-Lightweight U-Nets for Real-Time Speech Enhancement via Network Architecture Search Miscellaneous
2025.
@misc{rong2025ulunasultralightweightunetsrealtime,
title = {UL-UNAS: Ultra-Lightweight U-Nets for Real-Time Speech Enhancement via Network Architecture Search},
author = {Xiaobin Rong and Dahan Wang and Yuxiang Hu and Changbao Zhu and Kai Chen and Jing Lu},
url = {https://arxiv.org/abs/2503.00340},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Wang, Qiyi; Shao, Yinning; Ma, Yunlong; Liu, Min
NodeNAS: Node-Specific Graph Neural Architecture Search for Out-of-Distribution Generalization Technical Report
2025.
@techreport{wang2025nodenasnodespecificgraphneural,
title = {NodeNAS: Node-Specific Graph Neural Architecture Search for Out-of-Distribution Generalization},
author = {Qiyi Wang and Yinning Shao and Yunlong Ma and Min Liu},
url = {https://arxiv.org/abs/2503.02448},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Chouhan, Avinash; Chutia, Dibyajyoti; Deb, Biswarup; Aggarwal, Shiv Prasad
Attention-Based Neural Architecture Search for Effective Semantic Segmentation of Satellite Images Proceedings Article
In: Noor, Arti; Saroha, Kriti; Pricop, Emil; Sen, Abhijit; Trivedi, Gaurav (Ed.): Emerging Trends and Technologies on Intelligent Systems, pp. 325–335, Springer Nature Singapore, Singapore, 2025, ISBN: 978-981-97-5703-9.
@inproceedings{10.1007/978-981-97-5703-9_28,
title = {Attention-Based Neural Architecture Search for Effective Semantic Segmentation of Satellite Images},
author = {Avinash Chouhan and Dibyajyoti Chutia and Biswarup Deb and Shiv Prasad Aggarwal},
editor = {Arti Noor and Kriti Saroha and Emil Pricop and Abhijit Sen and Gaurav Trivedi},
url = {https://link.springer.com/chapter/10.1007/978-981-97-5703-9_28},
isbn = {978-981-97-5703-9},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {Emerging Trends and Technologies on Intelligent Systems},
pages = {325–335},
publisher = {Springer Nature Singapore},
address = {Singapore},
abstract = {Semantic segmentation is an important activity in satellite image analysis. The manual design and development of neural architectures for semantic segmentation is very tedious and can result in computationally heavy architectures with redundant computation. Neural architecture search (NAS) produces automated network architectures for a given task considering computational cost and other parameters. In this work, we proposed an attention-based neural architecture search (ANAS), which uses attention layers at cell levels for effective and efficient architecture design for semantic segmentation. The proposed ANAS has achieved better results than previous NAS-based work on two benchmark datasets.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Cai, Zicheng; Tang, Yaohua; Lai, Yutao; Wang, Hua; Chen, Zhi; Chen, Hao
SEKI: Self-Evolution and Knowledge Inspiration based Neural Architecture Search via Large Language Models Technical Report
2025.
@techreport{cai2025sekiselfevolutionknowledgeinspiration,
title = {SEKI: Self-Evolution and Knowledge Inspiration based Neural Architecture Search via Large Language Models},
author = {Zicheng Cai and Yaohua Tang and Yutao Lai and Hua Wang and Zhi Chen and Hao Chen},
url = {https://arxiv.org/abs/2502.20422},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Rumiantsev, Pavel; Coates, Mark
Variation Matters: from Mitigating to Embracing Zero-Shot NAS Ranking Function Variation Technical Report
2025.
@techreport{rumiantsev2025variationmattersmitigatingembracing,
title = {Variation Matters: from Mitigating to Embracing Zero-Shot NAS Ranking Function Variation},
author = {Pavel Rumiantsev and Mark Coates},
url = {https://arxiv.org/abs/2502.19657},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Ding, Zhenyang; Pu, Ninghao; Miao, Qihui; Chen, Zhiqiang; Xu, Yifan; Liu, Hao
Efficient Palm Vein Recognition Optimized by Neural Architecture Search and Hybrid Compression Proceedings Article
In: 2025 International Conference on Multi-Agent Systems for Collaborative Intelligence (ICMSCI), pp. 826-832, 2025.
@inproceedings{10894245,
title = {Efficient Palm Vein Recognition Optimized by Neural Architecture Search and Hybrid Compression},
author = {Zhenyang Ding and Ninghao Pu and Qihui Miao and Zhiqiang Chen and Yifan Xu and Hao Liu},
url = {https://ieeexplore.ieee.org/abstract/document/10894245},
doi = {10.1109/ICMSCI62561.2025.10894245},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {2025 International Conference on Multi-Agent Systems for Collaborative Intelligence (ICMSCI)},
pages = {826-832},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ang, Li-Minn; Su, Yuanxin; Seng, Kah Phooi; Smith, Jeremy S.
Customized Binary Convolutional Neural Networks and Neural Architecture Search on Hardware Technologies Journal Article
In: IEEE Nanotechnology Magazine, pp. 1-8, 2025.
@article{10904266,
title = {Customized Binary Convolutional Neural Networks and Neural Architecture Search on Hardware Technologies},
author = {Li-Minn Ang and Yuanxin Su and Kah Phooi Seng and Jeremy S. Smith},
url = {https://ieeexplore.ieee.org/abstract/document/10904266},
doi = {10.1109/MNANO.2025.3533937},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {IEEE Nanotechnology Magazine},
pages = {1-8},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lu, Kang-Di; Huang, Jia-Cheng; Zeng, Guo-Qiang; Chen, Min-Rong; Geng, Guang-Gang; Weng, Jian
Multi-Objective Discrete Extremal Optimization of Variable-Length Blocks-Based CNN by Joint NAS and HPO for Intrusion Detection in IIoT Journal Article
In: IEEE Transactions on Dependable and Secure Computing, pp. 1-18, 2025.
@article{10902222,
title = {Multi-Objective Discrete Extremal Optimization of Variable-Length Blocks-Based CNN by Joint NAS and HPO for Intrusion Detection in IIoT},
author = {Kang-Di Lu and Jia-Cheng Huang and Guo-Qiang Zeng and Min-Rong Chen and Guang-Gang Geng and Jian Weng},
url = {https://ieeexplore.ieee.org/abstract/document/10902222},
doi = {10.1109/TDSC.2025.3545363},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {IEEE Transactions on Dependable and Secure Computing},
pages = {1-18},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Li, Chunchao; Li, Jun; Peng, Mingrui; Rasti, Behnood; Duan, Puhong; Tang, Xuebin; Ma, Xiaoguang
Low-Latency Neural Network for Efficient Hyperspectral Image Classification Journal Article
In: IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing, vol. PP, pp. 1-17, 2025.
@article{articleg,
title = {Low-Latency Neural Network for Efficient Hyperspectral Image Classification},
author = {Chunchao Li and Jun Li and Mingrui Peng and Behnood Rasti and Puhong Duan and Xuebin Tang and Xiaoguang Ma},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=10900438},
doi = {10.1109/JSTARS.2025.3544583},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing},
volume = {PP},
pages = {1-17},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
İlker, Günay; Özkan, İnik
SADASNet: A Selective and Adaptive Deep Architecture Search Network with Hyperparameter Optimization for Robust Skin Cancer Classification Journal Article
In: Diagnostics, vol. 15, no. 5, 2025, ISSN: 2075-4418.
@article{diagnostics15050541,
title = {SADASNet: A Selective and Adaptive Deep Architecture Search Network with Hyperparameter Optimization for Robust Skin Cancer Classification},
author = {Günay İlker and İnik Özkan},
url = {https://www.mdpi.com/2075-4418/15/5/541},
doi = {10.3390/diagnostics15050541},
issn = {2075-4418},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Diagnostics},
volume = {15},
number = {5},
abstract = {Background/Objectives: Skin cancer is a major public health concern, where early diagnosis and effective treatment are essential for prevention. To enhance diagnostic accuracy, researchers have increasingly utilized computer vision systems, with deep learning-based approaches becoming the primary focus in recent studies. Nevertheless, there is a notable research gap in the effective optimization of hyperparameters to design optimal deep learning architectures, given the need for high accuracy and lower computational complexity. Methods: This paper puts forth a robust metaheuristic optimization-based approach to develop novel deep learning architectures for multi-class skin cancer classification. This method, designated as the SADASNet (Selective and Adaptive Deep Architecture Search Network by Hyperparameter Optimization) algorithm, is developed based on the Particle Swarm Optimization (PSO) technique. The SADASNet method is adapted to the HAM10000 dataset. Innovative data augmentation techniques are applied to overcome class imbalance issues and enhance the performance of the model. The SADASNet method has been developed to accommodate a range of image sizes, and six different original deep learning models have been produced as a result. Results: The models achieved the following highest performance metrics: 99.31% accuracy, 97.58% F1 score, 97.57% recall, 97.64% precision, and 99.59% specificity. Compared to the most advanced competitors reported in the literature, the proposed method demonstrates superior performance in terms of accuracy and computational complexity. Furthermore, it maintains a broad solution space during parameter optimization. Conclusions: With these outcomes, this method aims to enhance the classification of skin cancer and contribute to the advancement of deep learning.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Mecharbat, Lotfi Abdelkrim; Marchisio, Alberto; Shafique, Muhammad; Ghassemi, Mohammad M.; Alhanai, Tuka
MoENAS: Mixture-of-Expert based Neural Architecture Search for jointly Accurate, Fair, and Robust Edge Deep Neural Networks Technical Report
2025.
@techreport{mecharbat2025moenasmixtureofexpertbasedneuralb,
title = {MoENAS: Mixture-of-Expert based Neural Architecture Search for jointly Accurate, Fair, and Robust Edge Deep Neural Networks},
author = {Lotfi Abdelkrim Mecharbat and Alberto Marchisio and Muhammad Shafique and Mohammad M. Ghassemi and Tuka Alhanai},
url = {https://arxiv.org/abs/2502.07422},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Xu, Liming; Zheng, Jie; He, Chunlin; Wang, Jing; Zheng, Bochuan; Lv, Jiancheng
Adaptive Multi-particle Swarm Neural Architecture Search for High-incidence Cancer Prediction Journal Article
In: IEEE Transactions on Artificial Intelligence, pp. 1-12, 2025.
@article{10896623,
title = {Adaptive Multi-particle Swarm Neural Architecture Search for High-incidence Cancer Prediction},
author = {Liming Xu and Jie Zheng and Chunlin He and Jing Wang and Bochuan Zheng and Jiancheng Lv},
url = {https://ieeexplore.ieee.org/abstract/document/10896623},
doi = {10.1109/TAI.2025.3543822},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {IEEE Transactions on Artificial Intelligence},
pages = {1-12},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zou, Juan; Liu, Yang; Liu, Yuan; Xia, Yizhang
Evolutionary multi-objective neural architecture search via depth equalization supernet Journal Article
In: Neurocomputing, pp. 129674, 2025, ISSN: 0925-2312.
@article{ZOU2025129674,
title = {Evolutionary multi-objective neural architecture search via depth equalization supernet},
author = {Juan Zou and Yang Liu and Yuan Liu and Yizhang Xia},
url = {https://www.sciencedirect.com/science/article/pii/S0925231225003467},
doi = {https://doi.org/10.1016/j.neucom.2025.129674},
issn = {0925-2312},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Neurocomputing},
pages = {129674},
abstract = {To provide a diverse selection of models suitable for different application scenarios, neural architecture search (NAS) is constructed as a multi-objective optimization problem aiming to simultaneously optimize multiple metrics such as model size and accuracy. Evolutionary algorithms (EA) have been shown to be an effective multi-objective approach that can balance different metrics. However, EA require many evaluations, and the evaluation of architectures is expensive. Training a supernet to evaluate an architecture is considered a promising method to reduce the cost of EA. But there are still many challenges in applying supernet to multi-objective NAS: (1) Supernet tends to give higher scores to shallower architectures, causing potential deeper architectures to be ignored. (2) The receptive field of the architecture has a large gap between search and evaluation, causing a decrease in performance. (3) Larger models are gradually eliminated during evolution, leading to a diversity disaster. We proposed a framework called DESEvo to solve these problems in this paper. DESEvo trains a depth equalization supernet to improve bias of supernet via a frequency rejection sampling method. In addition, DESEvo adaptively constrainted receptive field of architecture to reduce the gap. Finally, DESEvo developed a diversity-preserving strategy to enhance the diversity. Experimental results validate the efficiency and effectiveness of the algorithm, DESEvo can search a set of architectures that are more competitive compared to other state-of-the-art algorithms within 0.2 days, becoming the most efficient multi-objective NAS method in the supernet-based methods.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zhang, Zheyu; Zhang, Yueyi; sun, Xiaoyan
Denoising Designs-inherited Search Framework for Image Denoising Technical Report
2025.
@techreport{zhang2025denoisingdesignsinheritedsearchframework,
title = {Denoising Designs-inherited Search Framework for Image Denoising},
author = {Zheyu Zhang and Yueyi Zhang and Xiaoyan sun},
url = {https://arxiv.org/abs/2502.13359},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Deng, Difan; Lindauer, Marius
Neural Attention Search Technical Report
2025.
@techreport{deng2025neuralattentionsearch,
title = {Neural Attention Search},
author = {Difan Deng and Marius Lindauer},
url = {https://arxiv.org/abs/2502.13251},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Njor, Emil; Banbury, Colby; Fafoutis, Xenofon
Fast Data Aware Neural Architecture Search via Supernet Accelerated Evaluation Technical Report
2025.
@techreport{njor2025fastdataawareneural,
title = {Fast Data Aware Neural Architecture Search via Supernet Accelerated Evaluation},
author = {Emil Njor and Colby Banbury and Xenofon Fafoutis},
url = {https://arxiv.org/abs/2502.12690},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Feng, Yuqi; Sun, Yanan; Yen, Gary G.; Tan, Kay Chen
REP: An Interpretable Robustness Enhanced Plugin for Differentiable Neural Architecture Search Journal Article
In: IEEE Transactions on Knowledge and Data Engineering, pp. 1-15, 2025.
@article{10892073,
title = {REP: An Interpretable Robustness Enhanced Plugin for Differentiable Neural Architecture Search},
author = {Yuqi Feng and Yanan Sun and Gary G. Yen and Kay Chen Tan},
url = {https://ieeexplore.ieee.org/abstract/document/10892073},
doi = {10.1109/TKDE.2025.3543503},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {IEEE Transactions on Knowledge and Data Engineering},
pages = {1-15},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kim, Hyeonah; Choi, Sanghyeok; Son, Jiwoo; Park, Jinkyoo; Kwon, Changhyun
Neural Genetic Search in Discrete Spaces Technical Report
2025.
@techreport{kim2025neuralgeneticsearchdiscrete,
title = {Neural Genetic Search in Discrete Spaces},
author = {Hyeonah Kim and Sanghyeok Choi and Jiwoo Son and Jinkyoo Park and Changhyun Kwon},
url = {https://arxiv.org/abs/2502.10433},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Kuhn, Lukas; Saba-Sadiya, Sari; Roig, Gemma
Cognitive Neural Architecture Search Reveals Hierarchical Entailment Technical Report
2025.
@techreport{kuhn2025cognitiveneuralarchitecturesearch,
title = {Cognitive Neural Architecture Search Reveals Hierarchical Entailment},
author = {Lukas Kuhn and Sari Saba-Sadiya and Gemma Roig},
url = {https://arxiv.org/abs/2502.11141},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Gao, Yang; Yang, Hong; Chen, Yizhi; Wu, Junxian; Zhang, Peng; Wang, Haishuai
LLM4GNAS: A Large Language Model Based Toolkit for Graph Neural Architecture Search Technical Report
2025.
@techreport{gao2025llm4gnaslargelanguagemodel,
title = {LLM4GNAS: A Large Language Model Based Toolkit for Graph Neural Architecture Search},
author = {Yang Gao and Hong Yang and Yizhi Chen and Junxian Wu and Peng Zhang and Haishuai Wang},
url = {https://arxiv.org/abs/2502.10459},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Yin, Shantong; Niu, Ben; Wang, Rui; Wang, Xin
Spatial and channel level feature redundancy reduction for differentiable neural architecture search Journal Article
In: Neurocomputing, vol. 630, pp. 129713, 2025, ISSN: 0925-2312.
@article{YIN2025129713,
title = {Spatial and channel level feature redundancy reduction for differentiable neural architecture search},
author = {Shantong Yin and Ben Niu and Rui Wang and Xin Wang},
url = {https://www.sciencedirect.com/science/article/pii/S0925231225003856},
doi = {https://doi.org/10.1016/j.neucom.2025.129713},
issn = {0925-2312},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Neurocomputing},
volume = {630},
pages = {129713},
abstract = {Differentiable architecture search (DARTS), based on the continuous relaxation of the architectural representation and gradient descent, achieves effective results in Neural Architecture Search (NAS) field. Among the neural architectures, convolutional neural networks (CNNs) have achieved remarkable performance in various computer vision tasks. However, convolutional layers inevitably extract redundant features as the limitation of the weight-sharing property by convolutional kernels, thus slowing down the search efficiency of DARTS. In this paper, we propose a novel search approach named Slim-DARTS from the perspective of reducing feature redundancy, to further achieve high-speed and efficient neural architecture search. At the level of spatial redundancy, we design a spatial reconstruction module to eliminate spatial feature redundancy and facilitate representative feature learning. At the channel redundancy level, partial channel connection is applied to randomly sample a small subset of channels for operation selection to reduce unfair competition among candidate operations. And we introduce a group of channel parameters to automatically adjust the proportion of selected channels. The experimental results show that our research greatly improves search efficiency and memory utilization, achieving classification error rates of 2.39% and 16.78% on CIFAR-10 and CIFAR-100, respectively.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Cranney, Caleb; Meyer, Jesse G.
AttentionSmithy: A Modular Framework for Rapid Transformer Development and Customization Technical Report
2025.
@techreport{cranney2025attentionsmithymodularframeworkrapid,
title = {AttentionSmithy: A Modular Framework for Rapid Transformer Development and Customization},
author = {Caleb Cranney and Jesse G. Meyer},
url = {https://arxiv.org/abs/2502.09503},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Cheng, Jian; Jiang, Jinbo; Kang, Haidong; Ma, Lianbo
A Hybrid Neural Architecture Search Algorithm Optimized via Lifespan Particle Swarm Optimization for Coal Mine Image Recognition Journal Article
In: Mathematics, vol. 13, no. 4, 2025, ISSN: 2227-7390.
@article{math13040631,
title = {A Hybrid Neural Architecture Search Algorithm Optimized via Lifespan Particle Swarm Optimization for Coal Mine Image Recognition},
author = {Jian Cheng and Jinbo Jiang and Haidong Kang and Lianbo Ma},
url = {https://www.mdpi.com/2227-7390/13/4/631},
doi = {10.3390/math13040631},
issn = {2227-7390},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Mathematics},
volume = {13},
number = {4},
abstract = {Coal mine scene image recognition plays an important role in safety monitoring and equipment detection. However, traditional methods often depend on manually designed neural network architectures. These models struggle to handle the complex backgrounds, low illumination, and diverse objects commonly found in coal mine environments. Manual designs are not only inefficient but also restrict the exploration of optimal architectures, resulting to subpar performance. To address these challenges, we propose using a neural architecture search (NAS) to automate the design of neural networks. Traditional NAS methods are known to be computationally expensive. To improve this, we enhance the process by incorporating Particle Swarm Optimization (PSO), a scalable algorithm that effectively balances global and local searches. To further enhance PSO’s efficiency, we integrate the lifespan mechanism, which prevents premature convergence and enables a more comprehensive exploration of the search space. Our proposed method establishes a flexible search space that includes various types of convolutional layers, activation functions, pooling operations, and network depths, enabling a comprehensive optimization process. Extensive experiments show that the Lifespan-PSO NAS method outperforms traditional manually designed networks and standard PSO-based NAS approaches, offering significant improvements in both recognition accuracy (improved by 10%) and computational efficiency (resource usage reduced by 30%). This makes it a highly effective solution for real-world coal mine image recognition tasks via a PSO-optimized approach in terms of performance and efficiency.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Krishnanunni, C G; Bui-Thanh, Tan; Dawson, Clint
Topological derivative approach for deep neural network architecture adaptation Technical Report
2025.
@techreport{krishnanunni2025topologicalderivativeapproachdeep,
title = {Topological derivative approach for deep neural network architecture adaptation},
author = {C G Krishnanunni and Tan Bui-Thanh and Clint Dawson},
url = {https://arxiv.org/abs/2502.06885},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Wang, Zilong; Liang, Pei; Zhai, Jinglei; Wu, Bei; Chen, Xin; Ding, Fan; Chen, Qiang; Sun, Biao
Efficient detection of foodborne pathogens via SERS and deep learning: An ADMIN-optimized NAS-Unet approach Journal Article
In: Journal of Hazardous Materials, vol. 489, pp. 137581, 2025, ISSN: 0304-3894.
@article{WANG2025137581,
title = {Efficient detection of foodborne pathogens via SERS and deep learning: An ADMIN-optimized NAS-Unet approach},
author = {Zilong Wang and Pei Liang and Jinglei Zhai and Bei Wu and Xin Chen and Fan Ding and Qiang Chen and Biao Sun},
url = {https://www.sciencedirect.com/science/article/pii/S0304389425004959},
doi = {https://doi.org/10.1016/j.jhazmat.2025.137581},
issn = {0304-3894},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Journal of Hazardous Materials},
volume = {489},
pages = {137581},
abstract = {Amid the increasing global challenge of foodborne diseases, there is an urgent need for rapid and precise pathogen detection methods. This study innovatively integrates surface-enhanced Raman Spectroscopy (SERS) with deep learning technology to develop an efficient tool for the detection of foodborne pathogens. Utilizing an automated design of mixed networks (ADMIN) strategy, coupled with neural architecture search (NAS) technology, we optimized convolutional neural networks (CNNs) architectures, significantly enhancing SERS data analysis capabilities. This research introduces the U-Net architecture and attention mechanisms, which improve not only classification accuracy but also the model's ability to identify critical spectral features. Compared to traditional detection methods, our approach demonstrates significant advantages in accuracy. In testing samples from 22 foodborne pathogens, the optimized NAS-Unet model achieved an average precision of 92.77 %, surpassing current technologies. Additionally, we explored how different network depths affect classification performance and validated the model's generalization capabilities on the Bacteria-ID dataset, laying the groundwork for practical applications. Our study provides an innovative detection approach for the food safety sector and opens new avenues for applying deep learning technologies in microbiology. Looking ahead, we aim to further explore diverse network modules to enhance model generalization and promote the application of these technologies in real-world food safety testing, playing a crucial role in the fight against foodborne diseases.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lupión, M.; Cruz, N. C.; Ortigosa, E. M.; Ortigosa, P. M.
A holistic approach for resource-constrained neural network architecture search Journal Article
In: Applied Soft Computing, vol. 172, pp. 112832, 2025, ISSN: 1568-4946.
@article{LUPION2025112832,
title = {A holistic approach for resource-constrained neural network architecture search},
author = {M. Lupión and N. C. Cruz and E. M. Ortigosa and P. M. Ortigosa},
url = {https://www.sciencedirect.com/science/article/pii/S1568494625001437},
doi = {https://doi.org/10.1016/j.asoc.2025.112832},
issn = {1568-4946},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Applied Soft Computing},
volume = {172},
pages = {112832},
abstract = {The design of Artificial Neural Networks (ANN) is critical for their performance. The research field called Neural Network Search (NAS) investigates automated design strategies. This work proposes a novel NAS stack that stands out in three facets. First, the representation scheme encodes problem-specific ANN as plain vectors of numbers without needing auxiliary conversion models. Second, it is a pioneer in relying on the TLBO meta-heuristic. This optimizer supports large-scale problems and only expects two parameters, contrasting with other meta-heuristics used for NAS. Third, the stack includes a new evaluation predictor that avoids evaluating non-promising architectures. It combines several machine learning methods that train as the optimizer evaluates solutions, which avoids preliminary preparing this component and makes it self-adaptive. The proposal has been tested by using it to build a CIFAR-10 classifier while forcing the architecture to have fewer than 150,000 parameters, assuming that the resulting network must be deployed in a resource-constrained IoT device. The designs found with and without the predictor achieve validation accuracies of 78.68% and 80.65%, respectively. Both outperform a larger model from the recent literature. The predictor slightly constraints the evolution of solutions, but it approximately halves the computational effort. After extending the test to the CIFAR-100 dataset, the proposal achieves a validation accuracy of 65.43% with 478,006 parameters in its fastest configuration, competing with current results in the literature.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Franco-Gaona, Erick; Avila-Garcia, Maria Susana; Cruz-Aceves, Ivan
Automatic Neural Architecture Search Based on an Estimation of Distribution Algorithm for Binary Classification of Image Databases Journal Article
In: Mathematics, vol. 13, no. 4, 2025, ISSN: 2227-7390.
@article{math13040605,
title = {Automatic Neural Architecture Search Based on an Estimation of Distribution Algorithm for Binary Classification of Image Databases},
author = {Erick Franco-Gaona and Maria Susana Avila-Garcia and Ivan Cruz-Aceves},
url = {https://www.mdpi.com/2227-7390/13/4/605},
doi = {10.3390/math13040605},
issn = {2227-7390},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Mathematics},
volume = {13},
number = {4},
abstract = {Convolutional neural networks (CNNs) are widely used for image classification; however, setting the appropriate hyperparameters before training is subjective and time consuming, and the search space is not properly explored. This paper presents a novel method for the automatic neural architecture search based on an estimation of distribution algorithm (EDA) for binary classification problems. The hyperparameters were coded in binary form due to the nature of the metaheuristics used in the automatic search stage of CNN architectures which was performed using the Boltzmann Univariate Marginal Distribution algorithm (BUMDA) chosen by statistical comparison between four metaheuristics to explore the search space, whose computational complexity is O(229). Moreover, the proposed method is compared with multiple state-of-the-art methods on five databases, testing its efficiency in terms of accuracy and F1-score. In the experimental results, the proposed method achieved an F1-score of 97.2%, 98.73%, 97.23%, 98.36%, and 98.7% in its best evaluation, better results than the literature. Finally, the computational time of the proposed method for the test set was ≈0.6 s, 1 s, 0.7 s, 0.5 s, and 0.1 s, respectively.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ma, Lianbo; Zhou, Yuee; Ma, Ye; Yu, Guo; Li, Qing; He, Qiang; Pei, Yan
Defying Multi-model Forgetting in One-shot Neural Architecture Search Using Orthogonal Gradient Learning Journal Article
In: IEEE Transactions on Computers, pp. 1-13, 2025.
@article{10880105,
title = {Defying Multi-model Forgetting in One-shot Neural Architecture Search Using Orthogonal Gradient Learning},
author = {Lianbo Ma and Yuee Zhou and Ye Ma and Guo Yu and Qing Li and Qiang He and Yan Pei},
url = {https://ieeexplore.ieee.org/abstract/document/10880105},
doi = {10.1109/TC.2025.3540650},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {IEEE Transactions on Computers},
pages = {1-13},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Mecharbat, Lotfi Abdelkrim; Marchisio, Alberto; Shafique, Muhammad; Ghassemi, Mohammad M.; Alhanai, Tuka
MoENAS: Mixture-of-Expert based Neural Architecture Search for jointly Accurate, Fair, and Robust Edge Deep Neural Networks Technical Report
2025.
@techreport{mecharbat2025moenasmixtureofexpertbasedneural,
title = {MoENAS: Mixture-of-Expert based Neural Architecture Search for jointly Accurate, Fair, and Robust Edge Deep Neural Networks},
author = {Lotfi Abdelkrim Mecharbat and Alberto Marchisio and Muhammad Shafique and Mohammad M. Ghassemi and Tuka Alhanai},
url = {https://arxiv.org/abs/2502.07422},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Lu, Aojun; Ke, Junchao; Ding, Chunhui; Fan, Jiahao; Sun, Yanan
Position: Continual Learning Benefits from An Evolving Population over An Unified Model Technical Report
2025.
@techreport{lu2025positioncontinuallearningbenefits,
title = {Position: Continual Learning Benefits from An Evolving Population over An Unified Model},
author = {Aojun Lu and Junchao Ke and Chunhui Ding and Jiahao Fan and Yanan Sun},
url = {https://arxiv.org/abs/2502.06210},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
V, Srilakshmi; G, Uday Kiran; Moulika, B; Mahitha, G S; Laukya, G; Ruthick, M
Integrating NAS for Human Pose Estimation Journal Article
In: Procedia Computer Science, vol. 252, pp. 182-191, 2025, ISSN: 1877-0509, (4th International Conference on Evolutionary Computing and Mobile Sustainable Networks).
@article{V2025182,
title = {Integrating NAS for Human Pose Estimation},
author = {Srilakshmi V and Uday Kiran G and B Moulika and G S Mahitha and G Laukya and M Ruthick},
url = {https://www.sciencedirect.com/science/article/pii/S1877050924034525},
doi = {https://doi.org/10.1016/j.procs.2024.12.020},
issn = {1877-0509},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Procedia Computer Science},
volume = {252},
pages = {182-191},
abstract = {Neural Architecture Search (NAS) technologies have become popular across various fields, allowing for the joint learning of neural network architectures and weights. However, most existing NAS methods are task-specific, focusing on optimizing a single architecture to replace human-designed networks while often neglecting domain knowledge. This paper introduces Pose Neural Fabrics Search (PoseNFS), a unique NAS framework that integrates domain knowledge via part-specific neural architecture search—a form of multi-task learning—for human posture estimation. PoseNFS utilizes a novel search space called Cell-based Neural Fabric (CNF), employing a differentiable search approach to facilitate learning at both micro and macro levels. By utilizing prior knowledge of human body structure, PoseNFS directs the search for part-specific architectures personalized to different body components, treating the localization of human key points as multiple disentangled sub-tasks. Experimental results on the MPII and MS-COCO datasets demonstrate that PoseNFS significantly outperforms a manually designed part-based baseline model and several state-of-the-art methods, validating the effectiveness of this knowledge-guided strategy.},
note = {4th International Conference on Evolutionary Computing and Mobile Sustainable Networks},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Týbl, Ondřej; Neumann, Lukáš
Training-free Neural Architecture Search through Variance of Knowledge of Deep Network Weights Technical Report
2025.
@techreport{týbl2025trainingfreeneuralarchitecturesearch,
title = {Training-free Neural Architecture Search through Variance of Knowledge of Deep Network Weights},
author = {Ondřej Týbl and Lukáš Neumann},
url = {https://arxiv.org/abs/2502.04975},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
G, Uday Kiran; V, Srilakshmi; G, Padmini; G, Sreenidhi; B, Venkata Ramana; J, Preetham Reddy G
Neural Architecture Search-Driven Optimization of Deep Learning Models for Drug Response Prediction Journal Article
In: Procedia Computer Science, vol. 252, pp. 172-181, 2025, ISSN: 1877-0509, (4th International Conference on Evolutionary Computing and Mobile Sustainable Networks).
@article{G2025172,
title = {Neural Architecture Search-Driven Optimization of Deep Learning Models for Drug Response Prediction},
author = {Uday Kiran G and Srilakshmi V and Padmini G and Sreenidhi G and Venkata Ramana B and Preetham Reddy G J},
url = {https://www.sciencedirect.com/science/article/pii/S1877050924034513},
doi = {https://doi.org/10.1016/j.procs.2024.12.019},
issn = {1877-0509},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Procedia Computer Science},
volume = {252},
pages = {172-181},
abstract = {In this study, the efficacy of various Neural Architecture Search (NAS) techniques for optimizing neural network architectures in drug response prediction is explored. Accurate prediction of drug responses is crucial for advancing personalized medicine, enabling personalized therapeutic interventions that enhance effectiveness and reduce adverse effects. Traditional models often rely on manually designed architectures, which may not fully capture the complex relationships among drug properties, genetic variations, and cellular phenotypes. An automated NAS approach is introduced to optimize neural network architectures for drug response prediction. The framework explores a defined search space using three techniques: Random Search, Q-Learning, and Bayesian Optimization. A modular architecture that integrates layers, activation functions, and dropout rates is proposed. Findings reveal the strengths and limitations of each NAS method, offering insights into effective model optimization strategies. Validation on publicly available pharmacogenomics datasets shows that NAS-optimized models outperform conventional deep learning and machine learning approaches, highlighting the potential of NAS to enhance predictive modelling in drug response and support personalized medicine and drug development.},
note = {4th International Conference on Evolutionary Computing and Mobile Sustainable Networks},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Solís-Martín, David; Galán-Páez, Juan; Borrego-Díaz, Joaquín
A Model for Learning-Curve Estimation in Efficient Neural Architecture Search and Its Application in Predictive Health Maintenance Journal Article
In: Mathematics, vol. 13, no. 4, 2025, ISSN: 2227-7390.
@article{math13040555,
title = {A Model for Learning-Curve Estimation in Efficient Neural Architecture Search and Its Application in Predictive Health Maintenance},
author = {David Solís-Martín and Juan Galán-Páez and Joaquín Borrego-Díaz},
url = {https://www.mdpi.com/2227-7390/13/4/555},
doi = {10.3390/math13040555},
issn = {2227-7390},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Mathematics},
volume = {13},
number = {4},
abstract = {A persistent challenge in machine learning is the computational inefficiency of neural architecture search (NAS), particularly in resource-constrained domains like predictive maintenance. This work introduces a novel learning-curve estimation framework that reduces NAS computational costs by over 50% while maintaining model performance, addressing a critical bottleneck in automated machine learning design. By developing a data-driven estimator trained on 62 different predictive maintenance datasets, we demonstrate a generalized approach to early-stopping trials during neural network optimization. Our methodology not only reduces computational resources but also provides a transferable technique for efficient neural network architecture exploration across complex industrial monitoring tasks. The proposed approach achieves a remarkable balance between computational efficiency and model performance, with only a 2% performance degradation, showcasing a significant advancement in automated neural architecture optimization strategies.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Liu, Guangyuan; Li, Yangyang; Chen, Yanqiao; Shang, Ronghua; Jiao, Licheng
AutoPolCNN: A neural architecture search method of convolutional neural network for PolSAR image classification Journal Article
In: Knowledge-Based Systems, vol. 312, pp. 113122, 2025, ISSN: 0950-7051.
@article{LIU2025113122,
title = {AutoPolCNN: A neural architecture search method of convolutional neural network for PolSAR image classification},
author = {Guangyuan Liu and Yangyang Li and Yanqiao Chen and Ronghua Shang and Licheng Jiao},
url = {https://www.sciencedirect.com/science/article/pii/S0950705125001698},
doi = {https://doi.org/10.1016/j.knosys.2025.113122},
issn = {0950-7051},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Knowledge-Based Systems},
volume = {312},
pages = {113122},
abstract = {Convolutional neural networks (CNNs), as a kind of typical classification model known for good performance, have been utilized to cope with polarimetric synthetic aperture radar (PolSAR) image classification. Nevertheless, the performances of CNNs highly rely on well-designed network architectures and there is no theoretical guarantee on how to design them. As a result, the architectures of CNNs can be only designed by human experts or by trial and error, which makes the architecture design is annoying and time-consuming. So, a neural architecture search (NAS) method of CNN called AutoPolCNN, which can determine the architecture automatically, is proposed in this paper. Specifically, we firstly design the search space which covers the main components of CNNs like convolution and pooling operators. Secondly, considering the fact that the number of layers can also influence the performance of CNN, we propose a super normal module (SNM), which can dynamically adjust the number of network layers according to different datasets in the search stage. Finally, we develop the loss function and the search method for the designed search space. Via AutoPolCNN, preparing the data and waiting for the classification results are enough. Experiments carried out on three PolSAR datasets prove that the architecture can be automatically determined by AutoPolCNN within an hour (at least 10 times faster than existing NAS methods) and has higher overall accuracy (OA) than state-of-the-art (SOTA) PolSAR image classification CNN models.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Deevi, Sri Aditya; Mishra, Asish Kumar; Mishra, Deepak; L, Ravi Kumar; P, Bharat Kumar G V; G, Murali Krishna Bhagavan
Efficient Self-Supervised Neural Architecture Search Proceedings Article
In: 2025 19th International Conference on Ubiquitous Information Management and Communication (IMCOM), pp. 1-8, 2025.
@inproceedings{10857490,
title = {Efficient Self-Supervised Neural Architecture Search},
author = {Sri Aditya Deevi and Asish Kumar Mishra and Deepak Mishra and Ravi Kumar L and Bharat Kumar G V P and Murali Krishna Bhagavan G},
url = {https://ieeexplore.ieee.org/abstract/document/10857490},
doi = {10.1109/IMCOM64595.2025.10857490},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
booktitle = {2025 19th International Conference on Ubiquitous Information Management and Communication (IMCOM)},
pages = {1-8},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Siddiqui, Shahid; Kyrkou, Christos; Theocharides, Theocharis
Efficient Global Neural Architecture Search Technical Report
2025.
@techreport{siddiqui2025efficientglobalneuralarchitecture,
title = {Efficient Global Neural Architecture Search},
author = {Shahid Siddiqui and Christos Kyrkou and Theocharis Theocharides},
url = {https://arxiv.org/abs/2502.03553},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Zhao, Xinlong; Sun, Jiande; Zhang, Jia; Hou, Sujuan; Li, Shuai; Liu, Tong; Liu, Ke
PerfSeer: An Efficient and Accurate Deep Learning Models Performance Predictor Technical Report
2025.
@techreport{zhao2025perfseerefficientaccuratedeep,
title = {PerfSeer: An Efficient and Accurate Deep Learning Models Performance Predictor},
author = {Xinlong Zhao and Jiande Sun and Jia Zhang and Sujuan Hou and Shuai Li and Tong Liu and Ke Liu},
url = {https://arxiv.org/abs/2502.01206},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Li, Jianzhao; Wang, Shanfeng; Yang, Rui; Gong, Maoguo; Hu, Zhuping; Zhang, Ning; Sheng, Kai; Zhou, Yu
Towards Federated Customized Neural Architecture Search for Remote Sensing Scene Classification Journal Article
In: IEEE Transactions on Geoscience and Remote Sensing, pp. 1-1, 2025.
@article{10858749,
title = {Towards Federated Customized Neural Architecture Search for Remote Sensing Scene Classification},
author = {Jianzhao Li and Shanfeng Wang and Rui Yang and Maoguo Gong and Zhuping Hu and Ning Zhang and Kai Sheng and Yu Zhou},
url = {https://ieeexplore.ieee.org/abstract/document/10858749},
doi = {10.1109/TGRS.2025.3537085},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {IEEE Transactions on Geoscience and Remote Sensing},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Banerjee, Somnath
Neural Architecture Search Based Deepfake Detection Model using YOLO Journal Article
In: International Journal of Advanced Research in Science, Communication and Technology, vol. 5, no. 1, pp. 375 - 383, 2025.
@article{banerjee:hal-04901372,
title = {Neural Architecture Search Based Deepfake Detection Model using YOLO},
author = {Somnath Banerjee},
url = {https://hal.science/hal-04901372},
doi = {10.48175/ijarsct-22938},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = { International Journal of Advanced Research in Science, Communication and Technology},
volume = {5},
number = {1},
pages = {375 - 383},
publisher = {Naksh Solutions},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Errica, Federico; Christiansen, Henrik; Zaverkin, Viktor; Niepert, Mathias; Alesiani, Francesco
Adaptive Width Neural Networks Technical Report
2025.
@techreport{errica2025adaptivewidthneuralnetworks,
title = {Adaptive Width Neural Networks},
author = {Federico Errica and Henrik Christiansen and Viktor Zaverkin and Mathias Niepert and Francesco Alesiani},
url = {https://arxiv.org/abs/2501.15889},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Sander, Jacob; Cohen, Achraf; Dasari, Venkat R.; Venable, Brent; Jalaian, Brian
On Accelerating Edge AI: Optimizing Resource-Constrained Environments Technical Report
2025.
@techreport{sander2025acceleratingedgeaioptimizing,
title = {On Accelerating Edge AI: Optimizing Resource-Constrained Environments},
author = {Jacob Sander and Achraf Cohen and Venkat R. Dasari and Brent Venable and Brian Jalaian},
url = {https://arxiv.org/abs/2501.15014},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Tu, Xiaolong; Chen, Dawei; Han, Kyungtae; Altintas, Onur; Wang, Haoxin
GreenAuto: An Automated Platform for Sustainable AI Model Design on Edge Devices Technical Report
2025.
@techreport{tu2025greenautoautomatedplatformsustainable,
title = {GreenAuto: An Automated Platform for Sustainable AI Model Design on Edge Devices},
author = {Xiaolong Tu and Dawei Chen and Kyungtae Han and Onur Altintas and Haoxin Wang},
url = {https://arxiv.org/abs/2501.14995},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}