Maintained by Difan Deng and Marius Lindauer.
The following list considers papers related to neural architecture search. It is by no means complete. If you miss a paper on the list, please let us know.
Please note that although NAS methods steadily improve, the quality of empirical evaluations in this field are still lagging behind compared to other areas in machine learning, AI and optimization. We would therefore like to share some best practices for empirical evaluations of NAS methods, which we believe will facilitate sustained and measurable progress in the field. If you are interested in a teaser, please read our blog post or directly jump to our checklist.
Transformers have gained increasing popularity in different domains. For a comprehensive list of papers focusing on Neural Architecture Search for Transformer-Based spaces, the awesome-transformer-search repo is all you need.
2022
Gao, Zejun; Cao, Fei; He, Chuan; Feng, Xiaowei; Xu, Jianfeng; Qin, Jianqiang
Network Optimization Algorithm for Radar Active Jamming Identification Based on Neural Architecture Search Proceedings Article
In: Progress In Electromagnetics Research, 2022.
@inproceedings{GaoIER2022,
title = {Network Optimization Algorithm for Radar Active Jamming Identification Based on Neural Architecture Search},
author = {Zejun Gao and Fei Cao and Chuan He and Xiaowei Feng and Jianfeng Xu and Jianqiang Qin},
url = {https://www.jpier.org/ac_api/download.php?id=22081806},
year = {2022},
date = {2022-11-10},
urldate = {2022-11-10},
booktitle = {Progress In Electromagnetics Research},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Raj, Aditya; Sathish, Rakshith; Tandra Sarka and, Ramanathan Sethuraman; Sheet, Debdoot
Designing Deep Neural High-Density Compression Engines for Radiology Images Journal Article
In: Circuits, Systems, and Signal Processing, 2022.
@article{RajCSSP22,
title = {Designing Deep Neural High-Density Compression Engines for Radiology Images},
author = {
Aditya Raj and Rakshith Sathish and Tandra Sarka and, Ramanathan Sethuraman and Debdoot Sheet
},
url = {https://link.springer.com/article/10.1007/s00034-022-02222-0},
year = {2022},
date = {2022-11-10},
urldate = {2022-11-10},
journal = {Circuits, Systems, and Signal Processing},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Wan, Yuting; Zhong, Yanfei; Ma, Ailong; Wang, Junjue; LiangpeiZhang,
E2SCNet: Efficient Multiobjective Evolutionary Automatic Search for Remote Sensing Image Scene Classification Network Architecture Proceedings Article
In: IEEE TRANSACTIONS ON NEURAL NETWORKS AND LEARNING SYSTEMS, 2022.
@inproceedings{WanNNLS22,
title = {E2SCNet: Efficient Multiobjective Evolutionary Automatic Search for Remote Sensing Image Scene Classification Network Architecture},
author = {Yuting Wan and Yanfei Zhong and Ailong Ma and Junjue Wang and LiangpeiZhang},
url = {https://www.researchgate.net/profile/Yuting-Wan/publication/365479807_E2SCNet_Efficient_Multiobjective_Evolutionary_Automatic_Search_for_Remote_Sensing_Image_Scene_Classification_Network_Architecture/links/637728f11766b34c54345fe1/E2SCNet-Efficient-Multiobjective-Evolutionary-Automatic-Search-for-Remote-Sensing-Image-Scene-Classification-Network-Architecture.pdf},
year = {2022},
date = {2022-11-10},
urldate = {2022-11-10},
booktitle = {IEEE TRANSACTIONS ON NEURAL NETWORKS AND LEARNING SYSTEMS},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Xindong Zhang and, Hui Zeng; Zhang, Lei
Efficient Hardware-aware Neural Architecture Search for Image Super-resolution on Mobile Device Proceedings Article
In: ACCV 2022, 2022.
@inproceedings{ZhangACCV22,
title = {Efficient Hardware-aware Neural Architecture Search for Image Super-resolution on Mobile Device},
author = {Xindong Zhang and, Hui Zeng and Lei Zhang},
url = {https://openaccess.thecvf.com/content/ACCV2022/papers/Zhang_Efficient_Hardware-aware_Neural_Architecture_Search_for_Image_Super-resolution_on_Mobile_ACCV_2022_paper.pdf},
year = {2022},
date = {2022-11-10},
booktitle = {ACCV 2022},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tang, Yi; Iwaguchi, Takafumi; Kawasaki, Hiroshi; Sagawa, Ryusuke; Furukawa, Ryo
AutoEnhancer: Transformer on U-Net Architecture search for Underwater Image Enhancement Proceedings Article
In: ACCV 2022, 2022.
@inproceedings{TangACCV22,
title = {AutoEnhancer: Transformer on U-Net Architecture search for Underwater Image Enhancement},
author = {Yi Tang and Takafumi Iwaguchi and Hiroshi Kawasaki and Ryusuke Sagawa and Ryo Furukawa},
url = {https://openaccess.thecvf.com/content/ACCV2022/papers/Tang_AutoEnhancer_Transformer_on_U-Net_Architecture_search_for_Underwater_Image_Enhancement_ACCV_2022_paper.pdf},
year = {2022},
date = {2022-11-10},
booktitle = {ACCV 2022},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Jia
Evolving deep neural networks for robust learning PhD Thesis
2022.
@phdthesis{LiuPhD22,
title = { Evolving deep neural networks for robust learning },
author = {Jia Liu},
url = {https://ethos.bl.uk/OrderDetails.do?uin=uk.bl.ethos.865567},
year = {2022},
date = {2022-11-10},
urldate = {2022-11-10},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
Lu, Zhichao; Cheng, Ran; Huang, Shihua; Zhang, Haoming; Qiu, Changxiao; Yang, Fan
Surrogate-assisted Multiobjective Neural Architecture Search for Real-time Semantic Segmentation Technical Manual
2022.
@manual{nokey,
title = {Surrogate-assisted Multiobjective Neural Architecture Search for Real-time Semantic Segmentation},
author = {Zhichao Lu and Ran Cheng and Shihua Huang and Haoming Zhang and Changxiao Qiu and Fan Yang},
url = {https://www.zhichaolu.com/assets/paper/22_tai_RealTimeSeg.pdf},
year = {2022},
date = {2022-11-07},
urldate = {2022-11-07},
keywords = {},
pubstate = {published},
tppubtype = {manual}
}
Yang, Zhao; Sun, Qingshuang
Toward efficient neural architecture search with dynamic mapping-adaptive sampling for resource-limited edge device Proceedings Article
In: Neural Computing and Applications , 2022.
@inproceedings{YangNCA22,
title = {Toward efficient neural architecture search with dynamic mapping-adaptive sampling for resource-limited edge device},
author = {
Zhao Yang and Qingshuang Sun
},
url = {https://link.springer.com/article/10.1007/s00521-022-07984-x},
year = {2022},
date = {2022-11-05},
urldate = {2022-11-05},
booktitle = {Neural Computing and Applications },
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ning, Xuefei; Zhou, Zixuan; Zhao, Junbo; Zhao, Tianchen; Deng, Yiping; Tang, Changcheng; Liang, Shuang; Yang, Huazhong; Wang, Yu
TA-GATES: An Encoding Scheme for Neural Network Architectures Proceedings Article
In: 36th Conference on Neural Information Processing Systems (NeurIPS 2022), 2022.
@inproceedings{NingNeurIPS22,
title = {TA-GATES: An Encoding Scheme for Neural Network Architectures},
author = {Xuefei Ning and Zixuan Zhou and Junbo Zhao and Tianchen Zhao and Yiping Deng and Changcheng Tang and Shuang Liang and Huazhong Yang and Yu Wang},
url = {https://nicsefc.ee.tsinghua.edu.cn/nics_file/pdf/3a73d11c-a922-419f-838a-5e2b69f4187c.pdf},
year = {2022},
date = {2022-11-05},
booktitle = {36th Conference on Neural Information Processing Systems (NeurIPS 2022)},
journal = {36th Conference on Neural Information Processing Systems (NeurIPS 2022)},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nalluri, Sravani; Sasikala, R.
A deep neural architecture for SOTA pneumonia detection from chest X-rays Proceedings Article
In: International Journal of System Assurance Engineering and Management , 2022.
@inproceedings{NalluriIJSAEM22,
title = {A deep neural architecture for SOTA pneumonia detection from chest X-rays},
author = {Sravani Nalluri and R. Sasikala
},
url = {https://link.springer.com/article/10.1007/s13198-022-01788-x},
year = {2022},
date = {2022-11-03},
urldate = {2022-11-03},
booktitle = {International Journal of System Assurance Engineering and Management },
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Park, Jae Bok; Lee, Kyung Hee; Kwak, Ji Young; Cho, Chang Sik
Deployment Framework Design Techniques for Optimized Neural Network Applicati Technical Manual
2022.
@manual{ParkICC22,
title = {Deployment Framework Design Techniques for Optimized Neural Network Applicati},
author = {Jae Bok Park and Kyung Hee Lee and Ji Young Kwak and Chang Sik Cho},
url = {https://journal-home.s3.ap-northeast-2.amazonaws.com/site/ictc2022/abs/P7-4.pdf},
year = {2022},
date = {2022-11-01},
urldate = {2022-11-01},
keywords = {},
pubstate = {published},
tppubtype = {manual}
}
Lu, Bingqian
Towards Ultra-Efficient Machine Learning for Edge Inference PhD Thesis
2022.
@phdthesis{LuPhD2022,
title = {Towards Ultra-Efficient Machine Learning for Edge Inference},
author = {Bingqian Lu},
url = {https://escholarship.org/uc/item/0wg9692z#main},
year = {2022},
date = {2022-11-01},
urldate = {2022-11-01},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
Lu, Zexin; Xia, Wenjun; Huang, Yongqiang; Hou, Mingzheng; Chen, Hu; Zhou, Jiliu; Shan, Hongming; Zhang, Yi
M<sup>3</sup>NAS: Multi-Scale and Multi-Level Memory-Efficient Neural Architecture Search for Low-Dose CT Denoising Journal Article
In: IEEE transactions on medical imaging, vol. PP, 2022, ISSN: 0278-0062.
@article{PMID:36327187,
title = {M^{3}NAS: Multi-Scale and Multi-Level Memory-Efficient Neural Architecture Search for Low-Dose CT Denoising},
author = {Zexin Lu and Wenjun Xia and Yongqiang Huang and Mingzheng Hou and Hu Chen and Jiliu Zhou and Hongming Shan and Yi Zhang},
url = {https://doi.org/10.1109/TMI.2022.3219286},
doi = {10.1109/tmi.2022.3219286},
issn = {0278-0062},
year = {2022},
date = {2022-11-01},
urldate = {2022-11-01},
journal = {IEEE transactions on medical imaging},
volume = {PP},
abstract = {Lowering the radiation dose in computed tomography (CT) can greatly reduce the potential risk to public health. However, the reconstructed images from dose-reduced CT or low-dose CT (LDCT) suffer from severe noise which compromises the subsequent diagnosis and analysis. Recently, convolutional neural networks have achieved promising results in removing noise from LDCT images. The network architectures that are used are either handcrafted or built on top of conventional networks such as ResNet and U-Net. Recent advances in neural network architecture search (NAS) have shown that the network architecture has a dramatic effect on the model performance. This indicates that current network architectures for LDCT may be suboptimal. Therefore, in this paper, we make the first attempt to apply NAS to LDCT and propose a multi-scale and multi-level memory-efficient NAS for LDCT denoising, termed M^{3}NAS. On the one hand, the proposed M^{3}NAS fuses features extracted by different scale cells to capture multi-scale image structural details. On the other hand, the proposed M^{3}NAS can search a hybrid cell- and network-level structure for better performance. In addition, M^{3}NAS can effectively reduce the number of model parameters and increase the speed of inference. Extensive experimental results on two different datasets demonstrate that the proposed M^{3}NAS can achieve better performance and fewer parameters than several state-of-the-art methods. In addition, we also validate the effectiveness of the multi-scale and multi-level architecture for LDCT denoising, and present further analysis for different configurations of super-net.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zhang, Yang; Zong, Ruohan; Kou, Ziyi; Shang, Lanyu; Wang, Dong
CrowdNAS: A Crowd-Guided Neural Architecture Searching Approach to Disaster Damage Assessment Journal Article
In: Proc. ACM Hum.-Comput. Interact., vol. 6, no. CSCW2, 2022.
@article{10.1145/3555179,
title = {CrowdNAS: A Crowd-Guided Neural Architecture Searching Approach to Disaster Damage Assessment},
author = {Yang Zhang and Ruohan Zong and Ziyi Kou and Lanyu Shang and Dong Wang},
url = {https://doi.org/10.1145/3555179},
doi = {10.1145/3555179},
year = {2022},
date = {2022-11-01},
urldate = {2022-11-01},
journal = {Proc. ACM Hum.-Comput. Interact.},
volume = {6},
number = {CSCW2},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {Disaster damage assessment (DDA) has emerged as an important application in disaster response and management, which aims to assess the damage severity of an affected area by leveraging AI (e.g., deep learning) techniques to examine the imagery data posted on social media during a disaster event. In this paper, we focus on a crowd-guided neural architecture searching (NAS) problem in DDA applications. Our goal is to leverage human intelligence from crowdsourcing systems to guide the discovery of the optimal neural network architecture in the design space to achieve the desirable damage assessment performance. Our work is motivated by the limitation that the deep neural network architectures in current DDA solutions are mainly designed by AI experts, which is known to be both time-consuming and error-prone. Two critical technical challenges exist in solving our problem: i) it is challenging to design a manageable NAS space for crowd-based solutions; ii) it is non-trivial to transfer the imperfect crowd knowledge to effective decisions in identifying the optimal neural network architecture of a DDA application. To address the above challenges, we develop CrowdNAS, a crowd-guided NAS framework that develops novel techniques inspired by AI, crowdsourcing, and estimation theory to address the NAS problem. The evaluation results from two real-world DDA applications show that CrowdNAS consistently outperforms the state-of-the-art AI-only, crowd-AI, and NAS baselines by achieving the highest classification accuracy in the damage assessment while maintaining a low computational cost under various evaluation scenarios.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Jin, Charles; Phothilimthana, Phitchaya Mangpo; Roy, Sudip
Neural Architecture Search Using Property Guided Synthesis Journal Article
In: Proc. ACM Program. Lang., vol. 6, no. OOPSLA2, 2022.
@article{10.1145/3563329,
title = {Neural Architecture Search Using Property Guided Synthesis},
author = {Charles Jin and Phitchaya Mangpo Phothilimthana and Sudip Roy},
url = {https://doi.org/10.1145/3563329},
doi = {10.1145/3563329},
year = {2022},
date = {2022-10-01},
urldate = {2022-10-01},
journal = {Proc. ACM Program. Lang.},
volume = {6},
number = {OOPSLA2},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {Neural architecture search (NAS) has become an increasingly important tool within the deep learning community in recent years, yielding many practical advancements in the design of deep neural network architectures. However, most existing approaches operate within highly structured design spaces, and hence (1) explore only a small fraction of the full search space of neural architectures while also (2) requiring significant manual effort from domain experts. In this work, we develop techniques that enable efficient NAS in a significantly larger design space. In particular, we propose to perform NAS in an abstract search space of program properties. Our key insights are as follows: (1) an abstract search space can be significantly smaller than the original search space, and (2) architectures with similar program properties should also have similar performance; thus, we can search more efficiently in the abstract search space. To enable this approach, we also introduce a novel efficient synthesis procedure, which performs the role of concretizing a set of promising program properties into a satisfying neural architecture. We implement our approach, αNAS, within an evolutionary framework, where the mutations are guided by the program properties. Starting with a ResNet-34 model, αNAS produces a model with slightly improved accuracy on CIFAR-10 but 96% fewer parameters. On ImageNet, αNAS is able to improve over Vision Transformer (30% fewer FLOPS and parameters), ResNet-50 (23% fewer FLOPS, 14% fewer parameters), and EfficientNet (7% fewer FLOPS and parameters) without any degradation in accuracy.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Stolle, Kurt; Vogel, Sebastian; van der Sommen, Fons; Sanberg, Willem
Block-Level Surrogate Models for Inference Time Estimation in Hardware-Aware Neural Architecture Search Proceedings Article
In: ECML PKDD 22, 2022.
@inproceedings{StolleECML22,
title = {Block-Level Surrogate Models for Inference Time Estimation in Hardware-Aware Neural Architecture Search},
author = {Kurt Stolle and Sebastian Vogel and Fons van der Sommen and Willem Sanberg},
url = {https://2022.ecmlpkdd.org/wp-content/uploads/2022/09/sub_737.pdf},
year = {2022},
date = {2022-09-19},
urldate = {2022-09-19},
booktitle = {ECML PKDD 22},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nowak, Aleksandra I.; Janik, Romuald A.
Discovering wiring patterns influencing neural network performance Proceedings Article
In: ECML PKDD 22, 2022.
@inproceedings{NowakECML22,
title = {Discovering wiring patterns influencing neural network performance},
author = {Aleksandra I. Nowak and Romuald A. Janik},
url = {https://2022.ecmlpkdd.org/wp-content/uploads/2022/09/sub_1358.pdf},
year = {2022},
date = {2022-09-19},
urldate = {2022-09-19},
booktitle = {ECML PKDD 22},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Franken, Gideon; Singh, Prabhant; Vanschoren, Joaquin
Faster Performance Estimation for NAS with Embedding Proximity Score Proceedings Article
In: ECMLPKDD Workshop on Meta-Knowledge Transfer, 2022.
@inproceedings{FrankenECMLMeta22,
title = {Faster Performance Estimation for NAS with Embedding Proximity Score},
author = {Gideon Franken and Prabhant Singh and Joaquin Vanschoren},
url = {https://proceedings.mlr.press/v191/franken22a/franken22a.pdf},
year = {2022},
date = {2022-09-16},
urldate = {2022-09-16},
booktitle = {ECMLPKDD Workshop on Meta-Knowledge Transfer},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Yuan, Wanqi; Fu, Chenping; Liu, Risheng; Fan, Xin
SSoB: searching a scene-oriented architecture for underwater object detection Journal Article
In: The Visual Computer, 2022.
@article{YuanTVC22,
title = {SSoB: searching a scene-oriented architecture for underwater object detection},
author = {
Wanqi Yuan and Chenping Fu and Risheng Liu and Xin Fan
},
url = {https://link.springer.com/article/10.1007/s00371-022-02654-4},
year = {2022},
date = {2022-09-10},
urldate = {2022-09-10},
journal = {The Visual Computer},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Bhowmik, Pritom
Machine Learning in Production: From Experimented ML Model to System Journal Article
In: Journal of Robotics and Automation Research, 2022.
@article{Bhowmik22,
title = {Machine Learning in Production: From Experimented ML Model to System},
author = {Pritom Bhowmik},
url = {https://opastpublishers.com/open-access/machine-learning-in-production-from-experimented-ml-model-to-system.pdf},
year = {2022},
date = {2022-09-10},
urldate = {2022-09-10},
journal = {Journal of Robotics and Automation Research},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Li, Yilan; Lu, Yantao; Cui, Helei; Velipasalar, Senem
Improving robustness and efficiency of edge computing models Journal Article
In: Wireless Networks , 2022.
@article{LiWirelessNetworks22,
title = {Improving robustness and efficiency of edge computing models},
author = {
Yilan Li and Yantao Lu and Helei Cui and Senem Velipasalar
},
url = {https://link.springer.com/article/10.1007/s11276-022-03115-5},
year = {2022},
date = {2022-09-05},
urldate = {2022-09-05},
journal = {Wireless Networks },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Arhore, Edore G.; Yasaee, Mehdi; Dayyani, Iman
Optimisation of convolutional neural network architecture using genetic algorithm for the prediction of adhesively bonded joint strength Journal Article
In: Structural and Multidisciplinary Optimization, 2022.
@article{ArhoreSMO22,
title = {Optimisation of convolutional neural network architecture using genetic algorithm for the prediction of adhesively bonded joint strength},
author = {
Edore G. Arhore and Mehdi Yasaee and Iman Dayyani
},
url = {https://link.springer.com/article/10.1007/s00158-022-03359-x},
year = {2022},
date = {2022-09-02},
urldate = {2022-09-02},
journal = {Structural and Multidisciplinary Optimization},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Liu, Yang; Liu, Jing; Li, Yifan
Automatic search of architecture and hyperparameters of graph convolutional networks for node classification Journal Article
In: Applied Intelligence , 2022.
@article{LiuAppliedIntelligence2022,
title = {Automatic search of architecture and hyperparameters of graph convolutional networks for node classification},
author = {
Yang Liu and Jing Liu and Yifan Li
},
url = {https://link.springer.com/article/10.1007/s10489-022-04096-w},
year = {2022},
date = {2022-08-31},
urldate = {2022-08-31},
booktitle = {Applied Intelligence },
journal = {Applied Intelligence },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Phan, Quan Minh; Luong, Ngoc Hoang
Enhancing multi-objective evolutionary neural architecture search with training-free Pareto local search Journal Article
In: Applied Intelligence, 2022.
@article{PhanApppliedInteligence22,
title = {Enhancing multi-objective evolutionary neural architecture search with training-free Pareto local search},
author = {
Quan Minh Phan and Ngoc Hoang Luong
},
url = {https://link.springer.com/article/10.1007/s10489-022-04032-y},
year = {2022},
date = {2022-08-24},
journal = {Applied Intelligence},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Fadzail, N F; Zali, S Mat; Mid1, E C; Jailani, R
Application of Automated Machine Learning (AutoML) Method in Wind Turbine Fault Detection Journal Article
In: Journal of Physics: Conference Series, 2022.
@article{FadzailJPCS2022,
title = {Application of Automated Machine Learning (AutoML) Method in Wind Turbine Fault Detection},
author = {N F Fadzail and S Mat Zali and E C Mid1 and R Jailani},
url = {https://iopscience.iop.org/article/10.1088/1742-6596/2312/1/012074/pdf},
year = {2022},
date = {2022-08-16},
urldate = {2022-08-16},
journal = {Journal of Physics: Conference Series},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
ke, Songyu; Song, Li; Bao, Kainan; Pan, Zheyi; Zhang, Junbo; Zheng, Yu
EAST: An Enhanced Automated Machine Learning Library for Spatio-Temporal Forecasting Proceedings Article
In: DeepSpatial ’22, Washington, DC, USA, 2022.
@inproceedings{KeEAST2022,
title = {EAST: An Enhanced Automated Machine Learning Library for Spatio-Temporal Forecasting},
author = {Songyu ke and Li Song and Kainan Bao and Zheyi Pan and Junbo Zhang and Yu Zheng},
year = {2022},
date = {2022-08-15},
urldate = {2022-08-15},
booktitle = {DeepSpatial ’22},
address = {Washington, DC, USA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bin, Wang; Hui, Ning
Notebook for PAN at CLEF 2022:Profiling Irony and Stereotype Spreaders on Twitter Proceedings Article
In: CLEF 2022: Conference and Labs of the Evaluation Forum, 2022.
@inproceedings{BinPAN2022,
title = {Notebook for PAN at CLEF 2022:Profiling Irony and Stereotype Spreaders on Twitter},
author = {Wang Bin and Ning Hui},
url = {http://ceur-ws.org/Vol-3180/paper-225.pdf},
year = {2022},
date = {2022-08-12},
urldate = {2022-08-12},
booktitle = {CLEF 2022: Conference and Labs of the Evaluation Forum},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Arasakumaran, Umamageswari; Johnson, Shiny Duela; Sara, Dioline; Kothandaraman, Raja
An Enhanced Identification and Classification Algorithm for Plant Leaf Diseases Based on Deep Learning. Journal Article
In: Traitement du Signal, vol. 39, iss. 3, 2022.
@article{Umamageswari22,
title = { An Enhanced Identification and Classification Algorithm for Plant Leaf Diseases Based on Deep Learning. },
author = {Arasakumaran, Umamageswari and Johnson, Shiny Duela and Sara, Dioline and Kothandaraman, Raja },
url = {https://web.s.ebscohost.com/ehost/detail/detail?vid=0&sid=9bd58607-e82f-47ed-b179-299ab049b566%40redis&bdata=JnNpdGU9ZWhvc3QtbGl2ZQ%3d%3d#db=bsh&AN=158329091},
year = {2022},
date = {2022-08-10},
urldate = {2022-08-10},
journal = {Traitement du Signal},
volume = {39},
issue = {3},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chen, Shengbo; Jiang, Kai; Liu, Xianrui; Yang, Kangkang; Lei, Zhou
TGAS-ReID: Efficient architecture search for person re-identification via greedy decisions with topological order Journal Article
In: Applied Intelligence , 2022.
@article{ChenAI2022,
title = {TGAS-ReID: Efficient architecture search for person re-identification via greedy decisions with topological order},
author = {Shengbo Chen and Kai Jiang and Xianrui Liu and Kangkang Yang and Zhou Lei
},
url = {https://link.springer.com/article/10.1007/s10489-021-03097-5},
year = {2022},
date = {2022-08-01},
urldate = {2022-08-01},
journal = {Applied Intelligence },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zheng, Kai; Zhao, Haochen; Zhao, Qichang; Wang, Bin; Gao, Xin; Wang, Jianxin
NASMDR: a framework for miRNA-drug resistance prediction using efficient neural architecture search and graph isomorphism networks Journal Article
In: Brief Bioinform., 2022.
@article{ZhengBriefBIio2022,
title = {NASMDR: a framework for miRNA-drug resistance prediction using efficient neural architecture search and graph isomorphism networks },
author = {Kai Zheng and Haochen Zhao and Qichang Zhao and Bin Wang and Xin Gao and Jianxin Wang},
url = {https://pubmed.ncbi.nlm.nih.gov/35998922/},
doi = {10.1093/bib/bbac338},
year = {2022},
date = {2022-08-01},
urldate = {2022-08-01},
journal = {Brief Bioinform.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Turner, Jack
Efficient Neural Networks PhD Thesis
2022.
@phdthesis{TurnerPhD2022,
title = {Efficient Neural Networks},
author = {Jack Turner },
url = {https://era.ed.ac.uk/bitstream/handle/1842/39326/TurnerJ_2022.pdf?sequence=1&isAllowed=y},
year = {2022},
date = {2022-08-01},
urldate = {2022-08-01},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
Fakieh, Bahjat; Ragab, Mahmoud
Automated COVID-19 Classification Using Heap-Based Optimization with the Deep Transfer Learning Model Journal Article Forthcoming
In: Computational Intelligence and its Applications in Biomedical Engineering, vol. 2022, Forthcoming.
@article{FakiehCIN2022,
title = {Automated COVID-19 Classification Using Heap-Based Optimization with the Deep Transfer Learning Model},
author = {Bahjat Fakieh and Mahmoud Ragab},
url = {https://www.hindawi.com/journals/cin/2022/7508836/},
doi = { https://doi.org/10.1155/2022/7508836},
year = {2022},
date = {2022-08-01},
urldate = {2022-08-01},
journal = {Computational Intelligence and its Applications in Biomedical Engineering},
volume = {2022},
keywords = {},
pubstate = {forthcoming},
tppubtype = {article}
}
Chen, J.; Gao, J.; Lyu, T.; Oloulade, B.; Hu, X.
AutoMSR: Auto Molecular Structure Representation Learning for Multi-label Metabolic Pathway Prediction Journal Article
In: IEEE/ACM Transactions on Computational Biology and Bioinformatics, no. 01, pp. 1-11, 2022, ISSN: 1557-9964.
@article{9864145,
title = {AutoMSR: Auto Molecular Structure Representation Learning for Multi-label Metabolic Pathway Prediction},
author = {J. Chen and J. Gao and T. Lyu and B. Oloulade and X. Hu},
doi = {10.1109/TCBB.2022.3198119},
issn = {1557-9964},
year = {2022},
date = {2022-08-01},
urldate = {5555-08-01},
journal = {IEEE/ACM Transactions on Computational Biology and Bioinformatics},
number = {01},
pages = {1-11},
publisher = {IEEE Computer Society},
address = {Los Alamitos, CA, USA},
abstract = {It is significant to comprehend the relationship between metabolic pathway and molecular pathway for synthesizing new molecules, for instance optimizing drug metabolization. In bioinformatics fields, multi-label prediction of metabolic pathways is a typical manner to understand this relationship. Graph neural networks (GNNs) have become an effective method to extract molecular structure's features for multi-label prediction of metabolic pathways. Though GNNs can effectively capture structural features from molecular structure graphs, building a well-performed GNN model for a given molecular structure data set requires the manual design of the GNN architecture and fine-tuning of the hyperparameters, which are time-consuming and rely on expert experience. To address the above challenge, we design an end-to-end automatic molecular structure representation learning framework named AutoMSR that can design the optimal GNN model based on a given molecular structure data set without manual intervention. We propose a multi-seed age evolution (MSAE) search algorithm to identify the optimal GNN architecture from the GNN architecture subspace. For a given molecular structure data set, AutoMSR first uses MSAE to search the GNN architecture, and then it adopts a tree-structured parzen estimator to obtain the best hyperparameters in the hyperparameters subspace. Finally, AutoMSR automatically constructs the optimal GNN model based on the best GNN architecture and hyperparameters to extract the molecular structure features for multi-label metabolic pathway prediction. We test the performance of AutoMSR on the real data set KEGG. The experiment results show that AutoMSR outperforms baseline methods on different multi-label classification evaluation metrics. keywords = computer architecture;task analysis;feature extraction;graph neural networks;prediction algorithms;drugs;architecture},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Bechtel, M.; Weng, Q.; Yun, H.
DeepPicarMicro: Applying TinyML to Autonomous Cyber Physical Systems Proceedings Article
In: 2022 IEEE 28th International Conference on Embedded and Real-Time Computing Systems and Applications (RTCSA), pp. 120-127, IEEE Computer Society, Los Alamitos, CA, USA, 2022.
@inproceedings{9904754,
title = {DeepPicarMicro: Applying TinyML to Autonomous Cyber Physical Systems},
author = {M. Bechtel and Q. Weng and H. Yun},
url = {https://doi.ieeecomputersociety.org/10.1109/RTCSA55878.2022.00019},
doi = {10.1109/RTCSA55878.2022.00019},
year = {2022},
date = {2022-08-01},
urldate = {2022-08-01},
booktitle = {2022 IEEE 28th International Conference on Embedded and Real-Time Computing Systems and Applications (RTCSA)},
pages = {120-127},
publisher = {IEEE Computer Society},
address = {Los Alamitos, CA, USA},
abstract = {Running deep neural networks (DNNs) on tiny Micro-controller Units (MCUs) is challenging due to their limitations in computing, memory, and storage capacity. Fortunately, recent advances in both MCU hardware and machine learning software frameworks make it possible to run fairly complex neural networks on modern MCUs, resulting in a new field of study widely known as TinyML. However, there have been few studies to show the potential for TinyML applications in cyber physical systems (CPS).In this paper, we present DeepPicarMicro, a small self-driving RC car testbed, which runs a convolutional neural network (CNN) on a Raspberry Pi Pico MCU. We apply a state-of-the-art DNN optimization to successfully fit the well-known PilotNet CNN architecture, which was used to drive NVIDIA’s real self-driving car, on the MCU. We apply a state-of-art network architecture search (NAS) approach to find further optimized networks that can effectively control the car in real-time in an end-to-end manner. From an extensive systematic experimental evaluation study, we observe an interesting relationship between the accuracy, latency, and control performance of a system. From this, we propose a joint optimization strategy that takes both accuracy and latency of a model in the network architecture search process for AI enabled CPS.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Li, Jolen; Galazis, Christopher; Popov, Illarion; Ovchinnikov, Lev; Vesnin, Sergey; Losev, Alexander; Goryanin, Igor
Dynamic Weight Agnostic Neural Networks and Medical Microwave Radiometry (MWR) for Breast Cancer Diagnostics Technical Report
2022.
@techreport{LiPrePrint2022,
title = {Dynamic Weight Agnostic Neural Networks and Medical Microwave Radiometry (MWR) for Breast Cancer Diagnostics},
author = { Jolen Li and Christopher Galazis and Illarion Popov and Lev Ovchinnikov and Sergey Vesnin and Alexander Losev and Igor Goryanin},
url = {https://www.preprints.org/manuscript/202207.0370/v1},
doi = {10.20944/preprints202207.0370.v1},
year = {2022},
date = {2022-07-29},
urldate = {2022-07-29},
howpublished = {Preprints 2022},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Jeong, Joonhyun; Yu, Joonsang; Han, Dongyoon; Yoo, YoungJoon
Neural Architecture Search with Loss Flatness-aware Measure Proceedings Article
In: DyNN workshop at the 39th International Conference on Machine Learning, 2022.
@inproceedings{JeongDyNNICML2022,
title = {Neural Architecture Search with Loss Flatness-aware Measure},
author = {Joonhyun Jeong and Joonsang Yu and Dongyoon Han and YoungJoon Yoo},
url = {https://dynn-icml2022.github.io/papers/paper_11.pdf},
year = {2022},
date = {2022-07-26},
urldate = {2022-07-26},
booktitle = {DyNN workshop at the 39th International Conference on Machine Learning},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Peng, Cheng; Li, Yangyang; Shang, Ronghua; Jiao, Licheng
ReCNAS: Resource-Constrained Neural Architecture Search Based on Differentiable Annealing and Dynamic Pruning Journal Article
In: IEEE Trans Neural Netw Learn Syst , 2022.
@article{PengTNLS2022,
title = { ReCNAS: Resource-Constrained Neural Architecture Search Based on Differentiable Annealing and Dynamic Pruning },
author = {Cheng Peng and Yangyang Li and Ronghua Shang and Licheng Jiao
},
url = {https://pubmed.ncbi.nlm.nih.gov/35862327/},
doi = {10.1109/TNNLS.2022.3192169},
year = {2022},
date = {2022-07-26},
urldate = {2022-07-26},
journal = { IEEE Trans Neural Netw Learn Syst },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Suchopárová, Gabriela; Neruda, Roman
Graph Embedding for Neural Architecture Search with Input-Output Information Proceedings Article
In: 1st International Conference on, Automated Machine Learning, Late-Breaking Workshop, 2022.
@inproceedings{SuchopárováAML22,
title = {Graph Embedding for Neural Architecture Search with Input-Output Information},
author = {Gabriela Suchopárová and Roman Neruda},
url = {https://automl.cc/wp-content/uploads/2022/07/graph_embedding_for_neural_arc.pdf},
year = {2022},
date = {2022-07-21},
urldate = {2022-07-21},
booktitle = {1st International Conference on, Automated Machine Learning, Late-Breaking Workshop},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
van Gastel, Rob; Vanschoren, Joaquin
Regularized Meta-Learning for Neural Architecture Search Proceedings Article
In: 1st International Conference on, Automated Machine Learning, Late-Breaking Workshop, 2022.
@inproceedings{GastelAML22,
title = {Regularized Meta-Learning for Neural Architecture Search},
author = {Rob van Gastel and Joaquin Vanschoren
},
url = {https://automl.cc/wp-content/uploads/2022/07/regularized_meta_learning_for_.pdf},
year = {2022},
date = {2022-07-21},
urldate = {2022-07-21},
booktitle = {1st International Conference on, Automated Machine Learning, Late-Breaking Workshop},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chatzimichailidis, Avraam; Zela, Arber; Keuper, Janis; Yang, Yang
GSparsity: Unifying Network Pruning and Neural Architecture Search by Group Sparsity Proceedings Article
In: 1st International Conference on, Automated Machine Learning, Late-Breaking Workshop, 2022.
@inproceedings{Chatzimichailidisaml22,
title = {GSparsity: Unifying Network Pruning and Neural Architecture Search by Group Sparsity},
author = {Avraam Chatzimichailidis and Arber Zela and Janis Keuper and Yang Yang
},
url = {https://automl.cc/wp-content/uploads/2022/07/gsparsity_unifying_network_pru.pdf},
year = {2022},
date = {2022-07-21},
urldate = {2022-07-21},
booktitle = {1st International Conference on, Automated Machine Learning, Late-Breaking Workshop},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Panyue; Wang, Rui; Zhao, Ping; Liu, Guanming; Wei, Zhihua
Searching Efficient Dynamic Graph CNN for Point Cloud Processing Proceedings Article
In: 1st International Conference on Automated Machine Learning, Late-Breaking Workshop, 2022.
@inproceedings{nokey,
title = {Searching Efficient Dynamic Graph CNN for Point Cloud Processing},
author = {Panyue Chen and Rui Wang and Ping Zhao and Guanming Liu and Zhihua Wei
},
url = {https://automl.cc/wp-content/uploads/2022/07/searching_efficient_dynamic_gr.pdf},
year = {2022},
date = {2022-07-21},
urldate = {2022-07-21},
booktitle = {1st International Conference on Automated Machine Learning, Late-Breaking Workshop},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhang, Tianning; Kee, Chun Yun; Ang, Yee Sin; LI, Erping; Ang, Lay Kee
Symmetry Enhanced Network Architecture Search for Complex Metasurface Design Journal Article
In: IEEE Access, 2022.
@article{ZhangIEEEACCESS2022,
title = {Symmetry Enhanced Network Architecture Search for Complex Metasurface Design},
author = {Tianning Zhang and Chun Yun Kee and Yee Sin Ang and Erping LI and Lay Kee Ang},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=9826756},
year = {2022},
date = {2022-07-18},
urldate = {2022-07-18},
journal = {IEEE Access},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gupta, Abhibha; Sheth, Parth; Xie, Pengtao
Neural architecture search for pneumonia diagnosis from chest X-rays Journal Article
In: Scientific Reports, vol. 12, 2022.
@article{Gupta2022,
title = {Neural architecture search for pneumonia diagnosis from chest X-rays},
author = {Abhibha Gupta and Parth Sheth and Pengtao Xie
},
url = {https://www.nature.com/articles/s41598-022-15341-0},
year = {2022},
date = {2022-07-04},
urldate = {2022-07-04},
journal = {Scientific Reports},
volume = {12},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gridin, Ivan
Automated Deep Learning Using Neural Network Intelligence Book
2022, ISBN: 978-1-4842-8148-2.
@book{GridinADLbook,
title = {Automated Deep Learning Using Neural Network Intelligence},
author = { Ivan Gridin },
url = {https://link.springer.com/content/pdf/10.1007/978-1-4842-8149-9.pdf},
isbn = {978-1-4842-8148-2},
year = {2022},
date = {2022-07-01},
urldate = {2022-07-01},
keywords = {},
pubstate = {published},
tppubtype = {book}
}
Liu, Jia; Jin, Yaochu
Bi-fidelity Multi-objective Neural Architecture Search for Adversarial Robustness with Surrogate as a Helper-objective Proceedings Article
In: 2022.
@inproceedings{LiIjCAI2022,
title = {Bi-fidelity Multi-objective Neural Architecture Search for Adversarial Robustness with Surrogate as a Helper-objective},
author = {Jia Liu and Yaochu Jin},
url = {https://federated-learning.org/fl-ijcai-2022/Papers/FL-IJCAI-22_paper_22.pdf},
year = {2022},
date = {2022-07-01},
urldate = {2022-07-01},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Fayyazifar, Najmeh
Deep learning and neural architecture search for cardiac arrhythmias classification PhD Thesis
2022.
@phdthesis{FayyazifarPhD,
title = {Deep learning and neural architecture search for cardiac arrhythmias classification},
author = {Najmeh Fayyazifar},
url = {https://ro.ecu.edu.au/theses/2553/},
year = {2022},
date = {2022-07-01},
urldate = {2022-07-01},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
Wen, Long; Wang, You; Li, Xinyu
A new automatic convolutional neural network based on deep reinforcement learning for fault diagnosis Journal Article
In: Frontiers of Mechanical Engineering, vol. 17, 2022.
@article{WenFME2022,
title = {A new automatic convolutional neural network based on deep reinforcement learning for fault diagnosis},
author = { Long Wen and You Wang and Xinyu Li },
url = {https://link.springer.com/article/10.1007/s11465-022-0673-7},
year = {2022},
date = {2022-07-01},
urldate = {2022-07-01},
journal = {Frontiers of Mechanical Engineering},
volume = {17},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Parallel and Distributed Methods for Autonomous Design of Artificial Neural Networks PhD Thesis
2022.
@phdthesis{GeorgeKyriakides,
title = {Parallel and Distributed Methods for Autonomous Design of Artificial Neural Networks},
url = {https://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=&ved=2ahUKEwiXytvG4vX4AhXUXvEDHTLtD3wQFnoECAUQAQ&url=https%3A%2F%2Fdspace.lib.uom.gr%2Fbitstream%2F2159%2F27216%2F5%2FKyriakidesGeorgePhD2022.pdf&usg=AOvVaw3uanEdjCn9YtbbC59ScY2Z},
year = {2022},
date = {2022-07-01},
urldate = {2022-07-01},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
Sun, Zhenhong; Lin, Ming; Sun, Xiuyu; Tan, Zhiyu; Li, Hao; Jin, Rong
MAE-DET: Revisiting Maximum Entropy Principle in Zero-Shot NAS for Efficient Object Detection Proceedings Article
In: Chaudhuri, Kamalika; Jegelka, Stefanie; Song, Le; Szepesvari, Csaba; Niu, Gang; Sabato, Sivan (Ed.): Proceedings of the 39th International Conference on Machine Learning, pp. 20810–20826, PMLR, 2022.
@inproceedings{pmlr-v162-sun22c,
title = {MAE-DET: Revisiting Maximum Entropy Principle in Zero-Shot NAS for Efficient Object Detection},
author = {Zhenhong Sun and Ming Lin and Xiuyu Sun and Zhiyu Tan and Hao Li and Rong Jin},
editor = {Kamalika Chaudhuri and Stefanie Jegelka and Le Song and Csaba Szepesvari and Gang Niu and Sivan Sabato},
url = {https://proceedings.mlr.press/v162/sun22c.html},
year = {2022},
date = {2022-07-01},
urldate = {2022-07-01},
booktitle = {Proceedings of the 39th International Conference on Machine Learning},
volume = {162},
pages = {20810--20826},
publisher = {PMLR},
series = {Proceedings of Machine Learning Research},
abstract = {In object detection, the detection backbone consumes more than half of the overall inference cost. Recent researches attempt to reduce this cost by optimizing the backbone architecture with the help of Neural Architecture Search (NAS). However, existing NAS methods for object detection require hundreds to thousands of GPU hours of searching, making them impractical in fast-paced research and development. In this work, we propose a novel zero-shot NAS method to address this issue. The proposed method, named MAE-DET, automatically designs efficient detection backbones via the Maximum Entropy Principle without training network parameters, reducing the architecture design cost to nearly zero yet delivering the state-of-the-art (SOTA) performance. Under the hood, MAE-DET maximizes the differential entropy of detection backbones, leading to a better feature extractor for object detection under the same computational budgets. After merely one GPU day of fully automatic design, MAE-DET innovates SOTA detection backbones on multiple detection benchmark datasets with little human intervention. Comparing to ResNet-50 backbone, MAE-DET is $+2.0%$ better in mAP when using the same amount of FLOPs/parameters, and is $1.54$ times faster on NVIDIA V100 at the same mAP. Code and pre-trained models are available here (https://github.com/alibaba/lightweight-neural-architecture-search).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Sun, Zhenhong; Lin, Ming; Sun, Xiuyu; Tan, Zhiyu; Li, Hao; Jin, Rong
MAE-DET: Revisiting Maximum Entropy Principle in Zero-Shot NAS for Efficient Object Detection Proceedings Article
In: Chaudhuri, Kamalika; Jegelka, Stefanie; Song, Le; Szepesvari, Csaba; Niu, Gang; Sabato, Sivan (Ed.): Proceedings of the 39th International Conference on Machine Learning, pp. 20810–20826, PMLR, 2022.
@inproceedings{pmlr-v162-sun22cb,
title = {MAE-DET: Revisiting Maximum Entropy Principle in Zero-Shot NAS for Efficient Object Detection},
author = {Zhenhong Sun and Ming Lin and Xiuyu Sun and Zhiyu Tan and Hao Li and Rong Jin},
editor = {Kamalika Chaudhuri and Stefanie Jegelka and Le Song and Csaba Szepesvari and Gang Niu and Sivan Sabato},
url = {https://proceedings.mlr.press/v162/sun22c.html},
year = {2022},
date = {2022-07-01},
urldate = {2022-07-01},
booktitle = {Proceedings of the 39th International Conference on Machine Learning},
volume = {162},
pages = {20810--20826},
publisher = {PMLR},
series = {Proceedings of Machine Learning Research},
abstract = {In object detection, the detection backbone consumes more than half of the overall inference cost. Recent researches attempt to reduce this cost by optimizing the backbone architecture with the help of Neural Architecture Search (NAS). However, existing NAS methods for object detection require hundreds to thousands of GPU hours of searching, making them impractical in fast-paced research and development. In this work, we propose a novel zero-shot NAS method to address this issue. The proposed method, named MAE-DET, automatically designs efficient detection backbones via the Maximum Entropy Principle without training network parameters, reducing the architecture design cost to nearly zero yet delivering the state-of-the-art (SOTA) performance. Under the hood, MAE-DET maximizes the differential entropy of detection backbones, leading to a better feature extractor for object detection under the same computational budgets. After merely one GPU day of fully automatic design, MAE-DET innovates SOTA detection backbones on multiple detection benchmark datasets with little human intervention. Comparing to ResNet-50 backbone, MAE-DET is $+2.0%$ better in mAP when using the same amount of FLOPs/parameters, and is $1.54$ times faster on NVIDIA V100 at the same mAP. Code and pre-trained models are available here (https://github.com/alibaba/lightweight-neural-architecture-search).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
