Maintained by Difan Deng and Marius Lindauer.
The following list considers papers related to neural architecture search. It is by no means complete. If you miss a paper on the list, please let us know.
Please note that although NAS methods steadily improve, the quality of empirical evaluations in this field are still lagging behind compared to other areas in machine learning, AI and optimization. We would therefore like to share some best practices for empirical evaluations of NAS methods, which we believe will facilitate sustained and measurable progress in the field. If you are interested in a teaser, please read our blog post or directly jump to our checklist.
Transformers have gained increasing popularity in different domains. For a comprehensive list of papers focusing on Neural Architecture Search for Transformer-Based spaces, the awesome-transformer-search repo is all you need.
2022
Li, Yanyu; Zhao, Pu; Yuan, Geng; Lin, Xue; Wang, Yanzhi; Chen, Xin
Pruning-as-Search: Efficient Neural Architecture Search via Channel Pruning and Structural Reparameterization Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2206-01198b,
title = {Pruning-as-Search: Efficient Neural Architecture Search via Channel Pruning and Structural Reparameterization},
author = {Yanyu Li and Pu Zhao and Geng Yuan and Xue Lin and Yanzhi Wang and Xin Chen},
url = {https://doi.org/10.48550/arXiv.2206.01198},
doi = {10.48550/arXiv.2206.01198},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2206.01198},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Risso, Matteo; Burrello, Alessio; Benini, Luca; Macii, Enrico; Poncino, Massimo; Pagliari, Daniele Jahier
Multi-Complexity-Loss DNAS for Energy-Efficient and Memory-Constrained Deep Neural Networks Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2206-00302,
title = {Multi-Complexity-Loss DNAS for Energy-Efficient and Memory-Constrained Deep Neural Networks},
author = {Matteo Risso and Alessio Burrello and Luca Benini and Enrico Macii and Massimo Poncino and Daniele Jahier Pagliari},
url = {https://doi.org/10.48550/arXiv.2206.00302},
doi = {10.48550/arXiv.2206.00302},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2206.00302},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Li, Yanyu; Yuan, Geng; Wen, Yang; Hu, Eric; Evangelidis, Georgios; Tulyakov, Sergey; Wang, Yanzhi; Ren, Jian
EfficientFormer: Vision Transformers at MobileNet Speed Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2206-01191,
title = {EfficientFormer: Vision Transformers at MobileNet Speed},
author = {Yanyu Li and Geng Yuan and Yang Wen and Eric Hu and Georgios Evangelidis and Sergey Tulyakov and Yanzhi Wang and Jian Ren},
url = {https://doi.org/10.48550/arXiv.2206.01191},
doi = {10.48550/arXiv.2206.01191},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2206.01191},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Zheng, Xiawu; Fei, Xiang; Zhang, Lei; Wu, Chenglin; Chao, Fei; Liu, Jianzhuang; Zeng, Wei; Tian, Yonghong; Ji, Rongrong
Neural Architecture Search With Representation Mutual Information Proceedings Article
In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 11912–11921, 2022.
@inproceedings{zheng2022neural,
title = {Neural Architecture Search With Representation Mutual Information},
author = {Xiawu Zheng and Xiang Fei and Lei Zhang and Chenglin Wu and Fei Chao and Jianzhuang Liu and Wei Zeng and Yonghong Tian and Rongrong Ji},
url = {https://openaccess.thecvf.com/content/CVPR2022/papers/Zheng_Neural_Architecture_Search_With_Representation_Mutual_Information_CVPR_2022_paper.pdf},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
pages = {11912--11921},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pan, Junyi; Sun, Chong; Zhou, Yizhou; Zhang, Ying; Li, Chen
Distribution Consistent Neural Architecture Search Proceedings Article
In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10884–10893, 2022.
@inproceedings{pan2022distribution,
title = {Distribution Consistent Neural Architecture Search},
author = {Junyi Pan and Chong Sun and Yizhou Zhou and Ying Zhang and Chen Li},
url = {https://openaccess.thecvf.com/content/CVPR2022/papers/Pan_Distribution_Consistent_Neural_Architecture_Search_CVPR_2022_paper.pdf},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
pages = {10884--10893},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Xie, Pengtao; Du, Xuefeng
Performance-Aware Mutual Knowledge Distillation for Improving Neural Architecture Search Proceedings Article
In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 11922–11932, 2022.
@inproceedings{xie2022performance,
title = {Performance-Aware Mutual Knowledge Distillation for Improving Neural Architecture Search},
author = {Pengtao Xie and Xuefeng Du},
url = {https://openaccess.thecvf.com/content/CVPR2022/papers/Xie_Performance-Aware_Mutual_Knowledge_Distillation_for_Improving_Neural_Architecture_Search_CVPR_2022_paper.pdf},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
pages = {11922--11932},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Xu, Kepeng; He, Gang
DNAS: A Decoupled Global Neural Architecture Search Method Proceedings Article
In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 1979–1985, 2022.
@inproceedings{xu2022dnas,
title = {DNAS: A Decoupled Global Neural Architecture Search Method},
author = {Kepeng Xu and Gang He},
url = {https://openaccess.thecvf.com/content/CVPR2022W/NAS/papers/Xu_DNASA_Decoupled_Global_Neural_Architecture_Search_Method_CVPRW_2022_paper.pdf},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
pages = {1979--1985},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Shang, Ronghua; Zhu, Songling; Ren, Jinhong; Liu, Hangcheng; Jiao, Licheng
Evolutionary neural architecture search based on evaluation correction and functional units Journal Article
In: Knowledge-Based Systems, vol. 251, pp. 109206, 2022, ISSN: 0950-7051.
@article{SHANG2022109206,
title = {Evolutionary neural architecture search based on evaluation correction and functional units},
author = {Ronghua Shang and Songling Zhu and Jinhong Ren and Hangcheng Liu and Licheng Jiao},
url = {https://www.sciencedirect.com/science/article/pii/S0950705122006001},
doi = {https://doi.org/10.1016/j.knosys.2022.109206},
issn = {0950-7051},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Knowledge-Based Systems},
volume = {251},
pages = {109206},
abstract = {Neural architecture search (NAS) has been a great success in the automated design of deep neural networks. However, neural architecture search using evolutionary algorithms is challenging due to the diverse structure of neural networks and the difficulty in performance evaluation. To this end, this paper proposes an evolutionary neural architecture search algorithm (called EF-ENAS) based on evaluation corrections and functional units. First, a mating selection operation based on evaluation correction is developed, which can help EF-ENAS discriminate high-performance network architectures and reduce the harmful effects of low fidelity accuracy evaluation methods. Then, a functional unit-based network architecture crossover operation is designed, which divides the neural network into different functional units for crossover and protects valuable network architectures from destruction. Finally, the idea of species protection is introduced into the traditional environmental selection operation and a species protection-based environmental selection operation is designed, which can improve the diversity of network architectures in a population. The EF-ENAS is tested on ten benchmark datasets with varying complexities. In addition, the proposed algorithm is compared with 44 state-of-the-art algorithms, including DARTS, EvoCNN, CNN-GA, AE-CNN, etc. The experimental results show that the proposed algorithm11The code of EF-ENAS is available at https://github.com/codesl173/EF-ENAS. can automatically design neural networks and perform better.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Liu, Shiqing; Zhang, Haoyu; Jin, Yaochu
A Survey on Surrogate-assisted Efficient Neural Architecture Search Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2206-01520,
title = {A Survey on Surrogate-assisted Efficient Neural Architecture Search},
author = {Shiqing Liu and Haoyu Zhang and Yaochu Jin},
url = {https://doi.org/10.48550/arXiv.2206.01520},
doi = {10.48550/arXiv.2206.01520},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2206.01520},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Kim, Youngkee; Jung, Soyi; Choi, Minseok; Kim, Joongheon
Search Space Adaptation for Differentiable Neural Architecture Search in Image Classification Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2206-02098,
title = {Search Space Adaptation for Differentiable Neural Architecture Search in Image Classification},
author = {Youngkee Kim and Soyi Jung and Minseok Choi and Joongheon Kim},
url = {https://doi.org/10.48550/arXiv.2206.02098},
doi = {10.48550/arXiv.2206.02098},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2206.02098},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Li, Wenshuo; Chen, Xinghao; Bai, Jinyu; Ning, Xuefei; Wang, Yunhe
Searching for Energy-Efficient Hybrid Adder-Convolution Neural Networks Proceedings Article
In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 1943–1952, 2022.
@inproceedings{li2022searching,
title = {Searching for Energy-Efficient Hybrid Adder-Convolution Neural Networks},
author = {Wenshuo Li and Xinghao Chen and Jinyu Bai and Xuefei Ning and Yunhe Wang},
url = {https://openaccess.thecvf.com/content/CVPR2022W/NAS/papers/Li_Searching_for_Energy-Efficient_Hybrid_Adder-Convolution_Neural_Networks_CVPRW_2022_paper.pdf},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
pages = {1943--1952},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ferjani, Imen; Hidri, Minyar Sassi; Frihida, Ali
SiNoptiC: swarm intelligence optimisation of convolutional neural network architectures for text classification Journal Article
In: International Journal of Computer Applications in Technology, vol. 68, no. 1, pp. 82-100, 2022.
@article{doi:10.1504/IJCAT.2022.123237,
title = {SiNoptiC: swarm intelligence optimisation of convolutional neural network architectures for text classification},
author = {Imen Ferjani and Minyar Sassi Hidri and Ali Frihida},
url = {https://www.inderscienceonline.com/doi/abs/10.1504/IJCAT.2022.123237},
doi = {10.1504/IJCAT.2022.123237},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {International Journal of Computer Applications in Technology},
volume = {68},
number = {1},
pages = {82-100},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Liu, Jinyuan; Wu, Yuhui; Wu, Guanyao; Liu, Risheng; Fan, Xin
Learn to Search a Lightweight Architecture for Target-aware Infrared and Visible Image Fusion Journal Article
In: IEEE Signal Processing Letters, pp. 1-5, 2022.
@article{9789723,
title = {Learn to Search a Lightweight Architecture for Target-aware Infrared and Visible Image Fusion},
author = {Jinyuan Liu and Yuhui Wu and Guanyao Wu and Risheng Liu and Xin Fan},
url = {https://ieeexplore.ieee.org/abstract/document/9789723},
doi = {10.1109/LSP.2022.3180672},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Signal Processing Letters},
pages = {1-5},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Li, Zhuowei; Gao, Yibo; Zha, Zhenzhou; Hu, Zhiqiang; Xia, Qing; Zhang, Shaoting; Metaxas, Dimitris N.
Towards Self-supervised and Weight-preserving Neural Architecture Search Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2206-04125,
title = {Towards Self-supervised and Weight-preserving Neural Architecture Search},
author = {Zhuowei Li and Yibo Gao and Zhenzhou Zha and Zhiqiang Hu and Qing Xia and Shaoting Zhang and Dimitris N. Metaxas},
url = {https://doi.org/10.48550/arXiv.2206.04125},
doi = {10.48550/arXiv.2206.04125},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2206.04125},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Du, Yipeng; Liu, Jian; Wang, Xiang; Wang, Peng
SSVEP based Emotion Recognition for IoT via Multiobjective Neural Architecture Search Journal Article
In: IEEE Internet of Things Journal, pp. 1-1, 2022.
@article{9793561,
title = {SSVEP based Emotion Recognition for IoT via Multiobjective Neural Architecture Search},
author = {Yipeng Du and Jian Liu and Xiang Wang and Peng Wang},
url = {https://ieeexplore.ieee.org/abstract/document/9793561},
doi = {10.1109/JIOT.2022.3180215},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Internet of Things Journal},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zhang, Yuanhan; Zhou, Kaiyang; Liu, Ziwei
Neural Prompt Search Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2206-04673,
title = {Neural Prompt Search},
author = {Yuanhan Zhang and Kaiyang Zhou and Ziwei Liu},
url = {https://doi.org/10.48550/arXiv.2206.04673},
doi = {10.48550/arXiv.2206.04673},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2206.04673},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Huang, Mingqiang; Liu, Yucen; Man, Changhai; Li, Kai; Cheng, Quan; Mao, Wei; Yu, Hao
A High Performance Multi-Bit-Width Booth Vector Systolic Accelerator for NAS Optimized Deep Learning Neural Networks Journal Article
In: IEEE Transactions on Circuits and Systems I: Regular Papers, pp. 1-13, 2022.
@article{9793397,
title = {A High Performance Multi-Bit-Width Booth Vector Systolic Accelerator for NAS Optimized Deep Learning Neural Networks},
author = {Mingqiang Huang and Yucen Liu and Changhai Man and Kai Li and Quan Cheng and Wei Mao and Hao Yu},
url = {https://ieeexplore.ieee.org/abstract/document/9793397},
doi = {10.1109/TCSI.2022.3178474},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Transactions on Circuits and Systems I: Regular Papers},
pages = {1-13},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Xue, Xizhe; Zhang, Haokui; Fang, Bei; Bai, Zongwen; Li, Ying
Grafting Transformer on Automatically Designed Convolutional Neural Network for Hyperspectral Image Classification Journal Article
In: IEEE Transactions on Geoscience and Remote Sensing, vol. 60, pp. 1-16, 2022.
@article{9791305,
title = {Grafting Transformer on Automatically Designed Convolutional Neural Network for Hyperspectral Image Classification},
author = {Xizhe Xue and Haokui Zhang and Bei Fang and Zongwen Bai and Ying Li},
url = {https://ieeexplore.ieee.org/abstract/document/9791305},
doi = {10.1109/TGRS.2022.3180685},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Transactions on Geoscience and Remote Sensing},
volume = {60},
pages = {1-16},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Aradhya, Abhay M. S.; Ashfahani, Andri; Angelina, Fienny; Pratama, Mahardhika; Mello, Rodrigo Fernandes; Sundaram, Suresh
Autonomous CNN (AutoCNN): A data-driven approach to network architecture determination Journal Article
In: Information Sciences, vol. 607, pp. 638-653, 2022, ISSN: 0020-0255.
@article{ARADHYA2022638,
title = {Autonomous CNN (AutoCNN): A data-driven approach to network architecture determination},
author = {Abhay M. S. Aradhya and Andri Ashfahani and Fienny Angelina and Mahardhika Pratama and Rodrigo Fernandes Mello and Suresh Sundaram},
url = {https://www.sciencedirect.com/science/article/pii/S0020025522005370},
doi = {https://doi.org/10.1016/j.ins.2022.05.100},
issn = {0020-0255},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Information Sciences},
volume = {607},
pages = {638-653},
abstract = {Designing a Convolutional Neural Networks (CNN) is a complex task and requires expert knowledge to optimize the performance and network architecture. In this paper, a novel data-driven approach is proposed to determine the architecture of CNN models. The proposed Autonomous Convolutional Neural Networks (AutoCNNThe executable code and original numerical results can be downloaded from (https://tinyurl.com/AutoCNN)) algorithm introduces data driven strategies for addition of new convolutional layers, pruning of redundant filters and training cycle optimization. AutoCNN is evaluated using MNIST, MNIST-rot-back-image, Fashion MNIST and the ADHD200 datasets to measure the performance on small datasets with varied feature distributions. The results indicate that AutoCNN optimizes the CNN network architecture and helps maximise the classification performance. The data-driven network determination approach introduced in this paper was found to not only provides competitive performance similar to existing evolutionary computation based network determination algorithms in literature, but was found to be an effective optimization tool to improve the performance of existing CNN architectures. Further, the AutoCNN was found to highly immune to noise in the dataset and has proven to be effective method to transfer knowledge between related datasets. Therefore, the AutoCNN is a highly versatile CNN architecture determination tool that has a wide range of applications in the field of autonomous driving, medical image analysis, image enhancement, camera based security monitoring and image based fault detection.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Shahawy, Mohamed; Benkhelifa, Elhadj; White, David
A Review on Plastic Artificial Neural Networks: Exploring the Intersection between Neural Architecture Search and Continual Learning Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2206-05625,
title = {A Review on Plastic Artificial Neural Networks: Exploring the Intersection between Neural Architecture Search and Continual Learning},
author = {Mohamed Shahawy and Elhadj Benkhelifa and David White},
url = {https://doi.org/10.48550/arXiv.2206.05625},
doi = {10.48550/arXiv.2206.05625},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2206.05625},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Franken, G. G. H.; Singh, Prabhant; Vanschoren, Joaquin
EmProx: Neural Network Performance Estimation For Neural Architecture Search Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2206-05972,
title = {EmProx: Neural Network Performance Estimation For Neural Architecture Search},
author = {G. G. H. Franken and Prabhant Singh and Joaquin Vanschoren},
url = {https://doi.org/10.48550/arXiv.2206.05972},
doi = {10.48550/arXiv.2206.05972},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2206.05972},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Lu, Qing; Xu, Xiaowei; Dong, Shunjie; Hao, Cong; Yang, Lei; Zhuo, Cheng; Shi, Yiyu
RT-DNAS: Real-time Constrained Differentiable Neural Architecture Search for 3D Cardiac Cine MRI Segmentation Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2206-04682,
title = {RT-DNAS: Real-time Constrained Differentiable Neural Architecture Search for 3D Cardiac Cine MRI Segmentation},
author = {Qing Lu and Xiaowei Xu and Shunjie Dong and Cong Hao and Lei Yang and Cheng Zhuo and Yiyu Shi},
url = {https://doi.org/10.48550/arXiv.2206.04682},
doi = {10.48550/arXiv.2206.04682},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2206.04682},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Liu, Jiawei; Zhang, Kaiyu; Hu, Weitai; Yang, Qing
Improve Ranking Correlation of Super-net through Training Scheme from One-shot NAS to Few-shot NAS Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2206-05896,
title = {Improve Ranking Correlation of Super-net through Training Scheme from One-shot NAS to Few-shot NAS},
author = {Jiawei Liu and Kaiyu Zhang and Weitai Hu and Qing Yang},
url = {https://doi.org/10.48550/arXiv.2206.05896},
doi = {10.48550/arXiv.2206.05896},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2206.05896},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Eslahi, Samira Vafay; Tao, Jian; Ji, Jim
ERNAS: An Evolutionary Neural Architecture Search for Magnetic Resonance Image Reconstructions Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2206-07280,
title = {ERNAS: An Evolutionary Neural Architecture Search for Magnetic Resonance Image Reconstructions},
author = {Samira Vafay Eslahi and Jian Tao and Jim Ji},
url = {https://doi.org/10.48550/arXiv.2206.07280},
doi = {10.48550/arXiv.2206.07280},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2206.07280},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Yang, Yingguang; Yang, Renyu; Li, Yangyang; Cui, Kai; Yang, Zhiqin; Wang, Yue; Xu, Jie; Xie, Haiyong
RoSGAS: Adaptive Social Bot Detection with Reinforced Self-Supervised GNN Architecture Search Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2206-06757,
title = {RoSGAS: Adaptive Social Bot Detection with Reinforced Self-Supervised GNN Architecture Search},
author = {Yingguang Yang and Renyu Yang and Yangyang Li and Kai Cui and Zhiqin Yang and Yue Wang and Jie Xu and Haiyong Xie},
url = {https://doi.org/10.48550/arXiv.2206.06757},
doi = {10.48550/arXiv.2206.06757},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2206.06757},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Ren, Xuhong; Chen, Jianlang; Juefei-Xu, Felix; Xue, Wanli; Guo, Qing; Ma, Lei; Zhao, Jianjun; Chen, Shengyong
DARTSRepair: Core-failure-set guided DARTS for network robustness to common corruptions Journal Article
In: Pattern Recognition, vol. 131, pp. 108864, 2022, ISSN: 0031-3203.
@article{REN2022108864,
title = {DARTSRepair: Core-failure-set guided DARTS for network robustness to common corruptions},
author = {Xuhong Ren and Jianlang Chen and Felix Juefei-Xu and Wanli Xue and Qing Guo and Lei Ma and Jianjun Zhao and Shengyong Chen},
url = {https://www.sciencedirect.com/science/article/pii/S0031320322003454},
doi = {https://doi.org/10.1016/j.patcog.2022.108864},
issn = {0031-3203},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Pattern Recognition},
volume = {131},
pages = {108864},
abstract = {Network architecture search (NAS), in particular the differentiable architecture search (DARTS) method, has shown a great power to learn excellent model architectures on the specific dataset of interest. In contrast to using a fixed dataset, in this work, we focus on a different but important scenario for NAS: how to refine a deployed network’s model architecture to enhance its robustness with the guidance of a few collected and misclassified examples that are degraded by some real-world unknown corruptions having a specific pattern (e.g., noise, blur, etc..). To this end, we first conduct an empirical study to validate that the model architectures can be definitely related to the corruption patterns. Surprisingly, by just adding a few corrupted and misclassified examples (e.g., 103 examples) to the clean training dataset (e.g., 5.0×104 examples), we can refine the model architecture and enhance the robustness significantly. To make it more practical, the key problem, i.e., how to select the proper failure examples for the effective NAS guidance, should be carefully investigated. Then, we propose a novel core-failure-set guided DARTS that embeds a K-center-greedy algorithm for DARTS to select suitable corrupted failure examples to refine the model architecture. We use our method for DARTS-refined DNNs on the clean as well as 15 corruptions with the guidance of four specific real-world corruptions. Compared with the state-of-the-art NAS as well as data-augmentation-based enhancement methods, our final method can achieve higher accuracy on both corrupted datasets and the original clean dataset. On some of the corruption patterns, we can achieve as high as over 45% absolute accuracy improvements.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Dervisi, Foteini; Kyriakides, George; Margaritis, Konstantinos
Evaluating Acceleration Techniques for Genetic Neural Architecture Search Proceedings Article
In: Iliadis, Lazaros; Jayne, Chrisina; Tefas, Anastasios; Pimenidis, Elias (Ed.): Engineering Applications of Neural Networks, pp. 3–14, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-08223-8.
@inproceedings{10.1007/978-3-031-08223-8_1,
title = {Evaluating Acceleration Techniques for Genetic Neural Architecture Search},
author = {Foteini Dervisi and George Kyriakides and Konstantinos Margaritis},
editor = {Lazaros Iliadis and Chrisina Jayne and Anastasios Tefas and Elias Pimenidis},
isbn = {978-3-031-08223-8},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Engineering Applications of Neural Networks},
pages = {3--14},
publisher = {Springer International Publishing},
address = {Cham},
abstract = {The increase in the available data and computational power has led to the rapid evolution of the field of deep learning over the last few years. However, the success of deep learning methods relies on making appropriate neural architecture choices, which is not a straightforward task and usually requires a time-consuming trial-and-error procedure. Neural architecture search is the process of automating the design of neural network architectures capable of performing well on specific tasks. It is a field that has emerged in order to address the problem of designing efficient neural architectures and is gaining popularity due to the rapid evolution of deep learning, which has led to an increasing need for the discovery of high-performing neural architectures. This paper focuses on evolutionary neural architecture search, which is an efficient but also time-consuming and computationally expensive neural architecture search approach, and aims to pave the way for speeding up such algorithms by assessing the effect of acceleration methods on the overall performance of the neural architecture search procedure as well as on the produced architectures.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhang, Wentao; Lin, Zheyu; Shen, Yu; Li, Yang; Yang, Zhi; Cui, Bin
DFG-NAS: Deep and Flexible Graph Neural Architecture Search Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2206-08582,
title = {DFG-NAS: Deep and Flexible Graph Neural Architecture Search},
author = {Wentao Zhang and Zheyu Lin and Yu Shen and Yang Li and Zhi Yang and Bin Cui},
url = {https://doi.org/10.48550/arXiv.2206.08582},
doi = {10.48550/arXiv.2206.08582},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2206.08582},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Xue, Yu; Qin, Jiafeng
Partial Connection Based on Channel Attention for Differentiable Neural Architecture Search Journal Article
In: IEEE Transactions on Industrial Informatics, pp. 1-10, 2022.
@article{9802692,
title = {Partial Connection Based on Channel Attention for Differentiable Neural Architecture Search},
author = {Yu Xue and Jiafeng Qin},
url = {https://ieeexplore.ieee.org/abstract/document/9802692},
doi = {10.1109/TII.2022.3184700},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Transactions on Industrial Informatics},
pages = {1-10},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Qin, Yijian; Zhang, Ziwei; Wang, Xin; Zhang, Zeyang; Zhu, Wenwu
NAS-Bench-Graph: Benchmarking Graph Neural Architecture Search Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2206-09166,
title = {NAS-Bench-Graph: Benchmarking Graph Neural Architecture Search},
author = {Yijian Qin and Ziwei Zhang and Xin Wang and Zeyang Zhang and Wenwu Zhu},
url = {https://doi.org/10.48550/arXiv.2206.09166},
doi = {10.48550/arXiv.2206.09166},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2206.09166},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Jung, Harim; Oh, Myeong-Seok; Yang, Cheoljong; Lee, Seong-Whan
Neural Architecture Adaptation for Object Detection by Searching Channel Dimensions and Mapping Pre-trained Parameters Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2206-08509,
title = {Neural Architecture Adaptation for Object Detection by Searching Channel Dimensions and Mapping Pre-trained Parameters},
author = {Harim Jung and Myeong-Seok Oh and Cheoljong Yang and Seong-Whan Lee},
url = {https://doi.org/10.48550/arXiv.2206.08509},
doi = {10.48550/arXiv.2206.08509},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2206.08509},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Risso, Matteo; Burrello, Alessio; Benini, Luca; Macii, Enrico; Poncino, Massimo; Pagliari, Daniele Jahier
Channel-wise Mixed-precision Assignment for DNN Inference on Constrained Edge Nodes Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2206-08852,
title = {Channel-wise Mixed-precision Assignment for DNN Inference on Constrained Edge Nodes},
author = {Matteo Risso and Alessio Burrello and Luca Benini and Enrico Macii and Massimo Poncino and Daniele Jahier Pagliari},
url = {https://doi.org/10.48550/arXiv.2206.08852},
doi = {10.48550/arXiv.2206.08852},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2206.08852},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Hasan, Noha W.; Saudi, Ali S.; Khalil, Mahmoud I.; Abbas, Hazem M.
A Genetic Algorithm Approach to Automate Architecture Design for Acoustic Scene Classification Journal Article
In: IEEE Transactions on Evolutionary Computation, pp. 1-1, 2022.
@article{9803192,
title = {A Genetic Algorithm Approach to Automate Architecture Design for Acoustic Scene Classification},
author = {Noha W. Hasan and Ali S. Saudi and Mahmoud I. Khalil and Hazem M. Abbas},
doi = {10.1109/TEVC.2022.3185543},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Transactions on Evolutionary Computation},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lou, Xiaoxuan; Guo, Shangwei; Li, Jiwei; Zhang, Tianwei
Ownership Verification of DNN Architectures via Hardware Cache Side Channels Journal Article
In: IEEE Transactions on Circuits and Systems for Video Technology, pp. 1-1, 2022.
@article{9801864,
title = {Ownership Verification of DNN Architectures via Hardware Cache Side Channels},
author = {Xiaoxuan Lou and Shangwei Guo and Jiwei Li and Tianwei Zhang},
url = {https://ieeexplore.ieee.org/abstract/document/9801864},
doi = {10.1109/TCSVT.2022.3184644},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Transactions on Circuits and Systems for Video Technology},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Perego, Riccardo; Candelieri, Antonio; Archetti, Francesco; Pau, Danilo
AutoTinyML for microcontrollers: Dealing with black-box deployability Journal Article
In: Expert Systems with Applications, vol. 207, pp. 117876, 2022, ISSN: 0957-4174.
@article{PEREGO2022117876,
title = {AutoTinyML for microcontrollers: Dealing with black-box deployability},
author = {Riccardo Perego and Antonio Candelieri and Francesco Archetti and Danilo Pau},
url = {https://www.sciencedirect.com/science/article/pii/S0957417422011289},
doi = {https://doi.org/10.1016/j.eswa.2022.117876},
issn = {0957-4174},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Expert Systems with Applications},
volume = {207},
pages = {117876},
abstract = {While many companies are currently leveraging on Cloud, data centres and specialized hardware (e.g., GPUs and TPUs) to train very accurate Machine Learning models, the need to deploy and run these models on tiny devices is emerging as the most relevant challenge, with a massive untapped market. Although Automated Machine Learning and Neural Architecture Search frameworks are successfully used to find accurate models by trying a small number of alternatives, they are typically performed on large computational platforms and they cannot directly deal with deployability, leading to an accurate model which could result undeployable on a tiny device. To bridge the gap between these two worlds, we present an approach extending these frameworks to include the constraints related to the limited hardware resources of the tiny device which the trained model has to run on. Experimental results on two benchmark classification tasks and two microcontrollers prove that our AutoTinyML framework can efficiently identify models which are both accurate and deployable, in case accepting a reasonable reduction in accuracy compared to a significant reduction in hardware usages, without applying any quantization techniques of the model.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gridin, Ivan
Öne-Shot Neural Architecture Search Book Chapter
In: Äutomated Deep Learning Using Neural Network Intelligence: Develop and Design PyTorch and TensorFlow Models Using Python", pp. 257–318, Äpress, Berkeley, CA, 2022, ISBN: 978-1-4842-8149-9.
@inbook{Gridin2022,
title = {Öne-Shot Neural Architecture Search},
author = {Ivan Gridin},
url = {https://doi.org/10.1007/978-1-4842-8149-9_5},
doi = {10.1007/978-1-4842-8149-9_5},
isbn = {978-1-4842-8149-9},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Äutomated Deep Learning Using Neural Network Intelligence: Develop and Design PyTorch and TensorFlow Models Using Python"},
pages = {257--318},
publisher = {Äpress},
address = {Berkeley, CA},
abstract = {In the previous chapter, we explored Multi-trial Neural Architecture Search, which is a very promising approach. And the reader might wonder why Multi-trial NAS is called that way. Are there any other non-Multi-trial NAS approaches, and is it really possible to search for the optimal neural network architecture in some other way without trying it? It looks pretty natural that the only way to find the optimal solution is to try different elements in the search space. In fact, it turns out that this is not entirely true. There is an approach that allows you to find the best architecture by training some Supernet. And this approach is called One-shot Neural Architecture Search. As the name ``one-shot'' implies, this approach involves only one try or shot. Of course, this ``shot'' is much longer than single neural network training, but nevertheless, it saves a lot of time. In this chapter, we will study what One-shot NAS is and how to design architectures for this approach. We will examine two popular One-shot algorithms: Efficient Neural Architecture Search via Parameter Sharing (ENAS)Efficient neural architecture search via parameter sharing (ENAS) and Differentiable Architecture Search (DARTS)Differentiable architecture search (DARTS). Of course, we will apply these algorithms to solve practical problems.},
keywords = {},
pubstate = {published},
tppubtype = {inbook}
}
Gridin, Ivan
Multi-trial Neural Architecture Search Book Chapter
In: Äutomated Deep Learning Using Neural Network Intelligence: Develop and Design PyTorch and TensorFlow Models Using Python", pp. 185–256, Äpress, Berkeley, CA, 2022, ISBN: 978-1-4842-8149-9.
@inbook{Gridin2022b,
title = {Multi-trial Neural Architecture Search},
author = {Ivan Gridin},
url = {https://doi.org/10.1007/978-1-4842-8149-9_4},
doi = {10.1007/978-1-4842-8149-9_4},
isbn = {978-1-4842-8149-9},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Äutomated Deep Learning Using Neural Network Intelligence: Develop and Design PyTorch and TensorFlow Models Using Python"},
pages = {185--256},
publisher = {Äpress},
address = {Berkeley, CA},
abstract = {Änd now we come to the most exciting part of this book. As we noted at the end of the last chapter, HPO methods are pretty limited for automating the search for the optimal deep learning models, but Neural Architecture Search (NAS) dispels these limits. This chapter focuses on NAS, one of the most promising areas of automated deep learning. Automatic Neural Architecture Search is increasingly important in finding appropriate deep learning models. Recent researches have proven the NAS effectiveness and found some models that could beat manually tuned ones. NAS is a fairly young discipline in machine learning. It took shape as a separate discipline in 2018. Since then, it has made a significant breakthrough in automating neural network architecture construction that solves a specific problem. The most manual design of neural networks can be replaced by automated architecture search soon, so this area is very up and coming for all data scientists. NAS produced many top computer vision architectures. Architectures like NASNet, EfficientNet, and MobileNet are the result of automated Neural Architecture Search."},
keywords = {},
pubstate = {published},
tppubtype = {inbook}
}
Dudziak, Lukasz; Laskaridis, Stefanos; Fernández-Marqués, Javier
FedorAS: Federated Architecture Search under system heterogeneity Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2206-11239,
title = {FedorAS: Federated Architecture Search under system heterogeneity},
author = {Lukasz Dudziak and Stefanos Laskaridis and Javier Fernández-Marqués},
url = {https://doi.org/10.48550/arXiv.2206.11239},
doi = {10.48550/arXiv.2206.11239},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2206.11239},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Duan, Fenxia; Cao, Chunhong; Gao, Xieping
SA-NAS-BFNR: Spatiotemporal Attention Neural Architecture Search for Task-Based Brain Functional Network Representation Proceedings Article
In: Proceedings of the 2022 International Conference on Multimedia Retrieval, pp. 661–667, Association for Computing Machinery, Newark, NJ, USA, 2022, ISBN: 9781450392389.
@inproceedings{10.1145/3512527.3531421,
title = {SA-NAS-BFNR: Spatiotemporal Attention Neural Architecture Search for Task-Based Brain Functional Network Representation},
author = {Fenxia Duan and Chunhong Cao and Xieping Gao},
url = {https://doi.org/10.1145/3512527.3531421},
doi = {10.1145/3512527.3531421},
isbn = {9781450392389},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of the 2022 International Conference on Multimedia Retrieval},
pages = {661–667},
publisher = {Association for Computing Machinery},
address = {Newark, NJ, USA},
series = {ICMR '22},
abstract = {The spatiotemporal representation of task-based brain functional networks is a key topic in functional magnetic resonance image (fMRI) research. At present, deep learning has been more powerful and flexible in brain functional network research than traditional methods. However, the dominant deep learning models failed in capturing the long-distance dependency (LDD) in task-based fMRI images (tfMRI) due to the time correlation among different task stimuli, the nature between temporal and spatial dimensions, which resulting in inaccurate brain pattern extraction. To address this issue, this paper proposes a spatiotemporal attention neural architecture search (NAS) model for task-based brain functional networks representation (SA-NAS-BFNR), where attention mechanism and gate recurrent unit (GRU) are integrated into a novel framework and GRU structure is searched by the differentiable neural architecture search. This model can not only achieve meaningful brain functional networks (BFNs) by addressing the LDD, but also simplify the existing recurrent structure models in tfMRI. Experiments show that the proposed model is capable of improving the fitting ability between time series and task stimulus sequence, and extracting the BFNs effectively as well.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chitty-Venkata, Krishna Teja; Emani, Murali; Vishwanath, Venkatram; Somani, Arun K.
Efficient Design Space Exploration for Sparse Mixed Precision Neural Architectures Proceedings Article
In: Proceedings of the 31st International Symposium on High-Performance Parallel and Distributed Computing, pp. 265–276, Association for Computing Machinery, Minneapolis, MN, USA, 2022, ISBN: 9781450391993.
@inproceedings{10.1145/3502181.3531463,
title = {Efficient Design Space Exploration for Sparse Mixed Precision Neural Architectures},
author = {Krishna Teja Chitty-Venkata and Murali Emani and Venkatram Vishwanath and Arun K. Somani},
url = {https://doi.org/10.1145/3502181.3531463},
doi = {10.1145/3502181.3531463},
isbn = {9781450391993},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of the 31st International Symposium on High-Performance Parallel and Distributed Computing},
pages = {265–276},
publisher = {Association for Computing Machinery},
address = {Minneapolis, MN, USA},
series = {HPDC '22},
abstract = {Pruning and Quantization are two effective Deep Neural Network (DNN) compression methods for efficient inference on various hardware platforms. Pruning refers to removing unimportant weights or nodes, whereas Quantization converts the floating-point parameters to low-bit fixed integer representation. The pruned and low precision models result in smaller and faster inference models on hardware platforms with almost the same accuracy as the unoptimized network. Tensor Cores in Nvidia Ampere 100 (A100) GPU supports (1) 2:4 fine-grained sparse pruning where 2 out of every 4 elements are pruned, and (2) traditional dense multiplication to achieve a good accuracy and performance trade-off. The A100 Tensor Core also takes advantage of 1-bit, 4-bit, and 8-bit multiplication to speed up the inference of a model. Hence, finding the right matrix type (dense or 2:4 sparse) along with the precision for each layer becomes a combinatorial problem. Neural Architecture Search (NAS) can alleviate such problems by automating the architecture design process instead of a brute-force search. In this paper, we propose (i) Mixed Sparse and Precision Search (MSPS), a NAS framework to search for efficient sparse and mixed-precision quantized model within the predefined search space and fixed backbone neural network (Eg. ResNet50), and (ii) Architecture, Sparse and Precision Search (ASPS) to jointly search for kernel size and number of filters, and sparse-precision combination of each layer. We illustrate the effectiveness of our methods targeting A100 Tensor Core on Nvidia GPUs by searching efficient sparse-mixed precision networks on ResNet50 and achieving better accuracy-latency trade-off models compared to the manually designed Uniform Sparse Int8 networks.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gesmundo, Andrea; Dean, Jeff
muNet: Evolving Pretrained Deep Neural Networks into Scalable Auto-tuning Multitask Systems Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2205-10937,
title = {muNet: Evolving Pretrained Deep Neural Networks into Scalable Auto-tuning Multitask Systems},
author = {Andrea Gesmundo and Jeff Dean},
url = {https://doi.org/10.48550/arXiv.2205.10937},
doi = {10.48550/arXiv.2205.10937},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2205.10937},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Han, Zhu; Hong, Danfeng; Gao, Lianru; Zhang, Bing; Huang, Min; Chanussot, Jocelyn
AutoNAS: Automatic Neural Architecture Search for Hyperspectral Unmixing Journal Article
In: IEEE Transactions on Geoscience and Remote Sensing, vol. 60, pp. 1-14, 2022.
@article{9807268,
title = {AutoNAS: Automatic Neural Architecture Search for Hyperspectral Unmixing},
author = {Zhu Han and Danfeng Hong and Lianru Gao and Bing Zhang and Min Huang and Jocelyn Chanussot},
url = {https://ieeexplore.ieee.org/abstract/document/9807268},
doi = {10.1109/TGRS.2022.3186480},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Transactions on Geoscience and Remote Sensing},
volume = {60},
pages = {1-14},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Seng, Jonas; Prasad, Pooja; Dhami, Devendra Singh; Kersting, Kristian
HANF: Hyperparameter And Neural Architecture Search in Federated Learning Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2206-12342,
title = {HANF: Hyperparameter And Neural Architecture Search in Federated Learning},
author = {Jonas Seng and Pooja Prasad and Devendra Singh Dhami and Kristian Kersting},
url = {https://doi.org/10.48550/arXiv.2206.12342},
doi = {10.48550/arXiv.2206.12342},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2206.12342},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Yu, Yanjiang; Zhang, Puyang; Zhang, Kaihao; Luo, Wenhan; Li, Changsheng; Yuan, Ye; Wang, Guoren
Multi-Prior Learning via Neural Architecture Search for Blind Face Restoration Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2206-13962,
title = {Multi-Prior Learning via Neural Architecture Search for Blind Face Restoration},
author = {Yanjiang Yu and Puyang Zhang and Kaihao Zhang and Wenhan Luo and Changsheng Li and Ye Yuan and Guoren Wang},
url = {https://doi.org/10.48550/arXiv.2206.13962},
doi = {10.48550/arXiv.2206.13962},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2206.13962},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Dong, Peijie; Niu, Xin; Li, Lujun; Xie, Linzhen; Zou, Wenbin; Ye, Tian; Wei, Zimian; Pan, Hengyue
Prior-Guided One-shot Neural Architecture Search Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2206-13329,
title = {Prior-Guided One-shot Neural Architecture Search},
author = {Peijie Dong and Xin Niu and Lujun Li and Linzhen Xie and Wenbin Zou and Tian Ye and Zimian Wei and Hengyue Pan},
url = {https://doi.org/10.48550/arXiv.2206.13329},
doi = {10.48550/arXiv.2206.13329},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2206.13329},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Benmeziane, Hadjer; Niar, Smail; Ouarnoughi, Hamza; Maghraoui, Kaoutar El
Pareto Rank Surrogate Model for Hardware-aware Neural Architecture Search Proceedings Article
In: 2022 IEEE International Symposium on Performance Analysis of Systems and Software (ISPASS), pp. 267-276, 2022.
@inproceedings{9804643,
title = {Pareto Rank Surrogate Model for Hardware-aware Neural Architecture Search},
author = {Hadjer Benmeziane and Smail Niar and Hamza Ouarnoughi and Kaoutar El Maghraoui},
doi = {10.1109/ISPASS55109.2022.00040},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {2022 IEEE International Symposium on Performance Analysis of Systems and Software (ISPASS)},
pages = {267-276},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Tianzi; Deng, Jiajun; Geng, Mengzhe; Ye, Zi; Hu, Shoukang; Wang, Yi; Cui, Mingyu; Jin, Zengrui; Liu, Xunying; Meng, Helen
Conformer Based Elderly Speech Recognition System for Alzheimer's Disease Detection Journal Article
In: CoRR, vol. abs/2206.13232, 2022.
@article{DBLP:journals/corr/abs-2206-13232,
title = {Conformer Based Elderly Speech Recognition System for Alzheimer's Disease Detection},
author = {Tianzi Wang and Jiajun Deng and Mengzhe Geng and Zi Ye and Shoukang Hu and Yi Wang and Mingyu Cui and Zengrui Jin and Xunying Liu and Helen Meng},
url = {https://doi.org/10.48550/arXiv.2206.13232},
doi = {10.48550/arXiv.2206.13232},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2206.13232},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Shen, Junge; Cao, Bin; Zhang, Chi; Wang, Ruxin; Wang, Qi
Remote Sensing Scene Classification Based on Attention-Enabled Progressively Searching Journal Article
In: IEEE Transactions on Geoscience and Remote Sensing, pp. 1-1, 2022.
@article{9807377,
title = {Remote Sensing Scene Classification Based on Attention-Enabled Progressively Searching},
author = {Junge Shen and Bin Cao and Chi Zhang and Ruxin Wang and Qi Wang},
url = {https://ieeexplore.ieee.org/abstract/document/9807377},
doi = {10.1109/TGRS.2022.3186588},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Transactions on Geoscience and Remote Sensing},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Dahouda, Mwamba Kasongo; Joe, Inwhee
Neural Architecture Search Net-based Feature Extraction with Modular Neural Network for Image Classification of Copper/Cobalt Raw Minerals Journal Article
In: IEEE Access, pp. 1-1, 2022.
@article{9810927,
title = {Neural Architecture Search Net-based Feature Extraction with Modular Neural Network for Image Classification of Copper/Cobalt Raw Minerals},
author = {Mwamba Kasongo Dahouda and Inwhee Joe},
doi = {10.1109/ACCESS.2022.3187420},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Access},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lin, Zhiwei; Liang, Tingting; Xiao, Taihong; Wang, Yongtao; Tang, Zhi; Yang, Ming-Hsuan
FlowNAS: Neural Architecture Search for Optical Flow Estimation Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2207-01271,
title = {FlowNAS: Neural Architecture Search for Optical Flow Estimation},
author = {Zhiwei Lin and Tingting Liang and Taihong Xiao and Yongtao Wang and Zhi Tang and Ming-Hsuan Yang},
url = {https://doi.org/10.48550/arXiv.2207.01271},
doi = {10.48550/arXiv.2207.01271},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2207.01271},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}