Maintained by Difan Deng and Marius Lindauer.
The following list considers papers related to neural architecture search. It is by no means complete. If you miss a paper on the list, please let us know.
Please note that although NAS methods steadily improve, the quality of empirical evaluations in this field are still lagging behind compared to other areas in machine learning, AI and optimization. We would therefore like to share some best practices for empirical evaluations of NAS methods, which we believe will facilitate sustained and measurable progress in the field. If you are interested in a teaser, please read our blog post or directly jump to our checklist.
Transformers have gained increasing popularity in different domains. For a comprehensive list of papers focusing on Neural Architecture Search for Transformer-Based spaces, the awesome-transformer-search repo is all you need.
2022
Chen, Jiamin; Gao, Jianliang; Chen, Yibo; Oloulade, Babatounde MOCTARD; Lyu, Tengfei; Li, Zhao
Auto-GNAS: A Parallel Graph Neural Architecture Search Framework Journal Article
In: IEEE Transactions on Parallel and Distributed Systems, pp. 1-1, 2022.
@article{9714826,
title = {Auto-GNAS: A Parallel Graph Neural Architecture Search Framework},
author = {Jiamin Chen and Jianliang Gao and Yibo Chen and Babatounde MOCTARD Oloulade and Tengfei Lyu and Zhao Li},
url = {https://ieeexplore.ieee.org/abstract/document/9714826},
doi = {10.1109/TPDS.2022.3151895},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Transactions on Parallel and Distributed Systems},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kim, Youngkee; Yun, Won Joon; Lee, Youn Kyu; Kim, Joongheon
Two-Stage Architectural Fine-Tuning with Neural Architecture Search using Early-Stopping in Image Classification Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2202-08604,
title = {Two-Stage Architectural Fine-Tuning with Neural Architecture Search using Early-Stopping in Image Classification},
author = {Youngkee Kim and Won Joon Yun and Youn Kyu Lee and Joongheon Kim},
url = {https://arxiv.org/abs/2202.08604},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2202.08604},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Kim, Jae Kwan; Ahn, Wonbin; Park, Sangin; Lee, Soo-Hong; Kim, Laehyun
Early Prediction of Sepsis Onset Using Neural Architecture Search Based on Genetic Algorithms Journal Article
In: International Journal of Environmental Research and Public Health, vol. 19, no. 4, 2022, ISSN: 1660-4601.
@article{ijerph19042349,
title = {Early Prediction of Sepsis Onset Using Neural Architecture Search Based on Genetic Algorithms},
author = {Jae Kwan Kim and Wonbin Ahn and Sangin Park and Soo-Hong Lee and Laehyun Kim},
url = {https://www.mdpi.com/1660-4601/19/4/2349},
doi = {10.3390/ijerph19042349},
issn = {1660-4601},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {International Journal of Environmental Research and Public Health},
volume = {19},
number = {4},
abstract = {Sepsis is a life-threatening condition with a high mortality rate. Early prediction and treatment are the most effective strategies for increasing survival rates. This paper proposes a neural architecture search (NAS) model to predict the onset of sepsis with a low computational cost and high search performance by applying a genetic algorithm (GA). The proposed model shares the weights of all possible connection nodes internally within the neural network. Externally, the search cost is reduced through the weight-sharing effect between the genotypes of the GA. A predictive analysis was performed using the Medical Information Mart for Intensive Care III (MIMIC-III), a medical time-series dataset, with the primary objective of predicting sepsis onset 3 h before occurrence. In addition, experiments were conducted under various prediction times (0-12 h) for comparison. The proposed model exhibited an area under the receiver operating characteristic curve (AUROC) score of 0.94 (95% CI: 0.92-0.96) for 3 h, which is 0.31-0.26 higher than the scores obtained using the Sequential Organ Failure Assessment (SOFA), quick SOFA (qSOFA), and Simplified Acute Physiology Score (SAPS) II scoring systems. Furthermore, the proposed model exhibited a 12% improvement in the AUROC value over a simple model based on the long short-term memory neural network. Additionally, it is not only optimally searchable for sepsis onset prediction, but also outperforms conventional models that use similar predictive purposes and datasets. Notably, it is sufficiently robust to shape changes in the input data and has less structural dependence.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Dai, Liuyao; Cheng, Quan; Wang, Yuhang; Huang, Gengbin; Zhou, Junzhuo; Li, Kai; Mao, Wei; Yu, Hao
An Energy-Efficient Bit-Split-and-Combination Systolic Accelerator for NAS-Based Multi-Precision Convolution Neural Networks Proceedings Article
In: 2022 27th Asia and South Pacific Design Automation Conference (ASP-DAC), pp. 448-453, 2022.
@inproceedings{9712509,
title = {An Energy-Efficient Bit-Split-and-Combination Systolic Accelerator for NAS-Based Multi-Precision Convolution Neural Networks},
author = {Liuyao Dai and Quan Cheng and Yuhang Wang and Gengbin Huang and Junzhuo Zhou and Kai Li and Wei Mao and Hao Yu},
url = {https://ieeexplore.ieee.org/abstract/document/9712509},
doi = {10.1109/ASP-DAC52403.2022.9712509},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {2022 27th Asia and South Pacific Design Automation Conference (ASP-DAC)},
pages = {448-453},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, Jooyeon; Park, Junsang; Lee, Seunghyun; Kung, Jaeha
Implication of Optimizing NPU Dataflows on Neural Architecture Search for Mobile Devices Journal Article
In: ACM Trans. Des. Autom. Electron. Syst., 2022, ISSN: 1084-4309, (Just Accepted).
@article{10.1145/3513085,
title = {Implication of Optimizing NPU Dataflows on Neural Architecture Search for Mobile Devices},
author = {Jooyeon Lee and Junsang Park and Seunghyun Lee and Jaeha Kung},
url = {https://doi.org/10.1145/3513085},
doi = {10.1145/3513085},
issn = {1084-4309},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {ACM Trans. Des. Autom. Electron. Syst.},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {Recent advances in deep learning have made it possible to implement artificial intelligence in mobile devices. Many studies have put a lot of effort into developing lightweight deep learning models optimized for mobile devices. To overcome the performance limitations of manually designed deep learning models, an automated search algorithm, called neural architecture search (NAS), has been proposed. However, studies on the effect of hardware architecture of the mobile device on the performance of NAS have been less explored. In this paper, we show the importance of optimizing a hardware architecture, namely NPU dataflow, when searching for a more accurate yet fast deep learning model. To do so, we first implement an optimization framework, named FlowOptimizer, for generating a best possible NPU dataflow for a given deep learning operator. Then, we utilize this framework during the latency-aware NAS to find the model with the highest accuracy satisfying the latency constraint. As a result, we show that the searched model with FlowOptimizer outperforms the performance by 87.1% and 92.3% on average compared to the searched model with NVDLA and Eyeriss, respectively, with better accuracy on a proxy dataset. We also show that the searched model can be transferred to a larger model to classify a more complex image dataset, i.e., ImageNet, achieving 0.2%/5.4% higher Top-1/Top-5 accuracy compared to MobileNetV2-1.0 with 3.6 texttimes lower latency.},
note = {Just Accepted},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zhang, Chunhui; Yuan, Xiaoming; Zhang, Qianyun; Zhu, Guangxu; Cheng, Lei; Zhang, Ning
Towards Tailored Models on Private AIoT Devices: Federated Direct Neural Architecture Search Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2202-11490,
title = {Towards Tailored Models on Private AIoT Devices: Federated Direct Neural Architecture Search},
author = {Chunhui Zhang and Xiaoming Yuan and Qianyun Zhang and Guangxu Zhu and Lei Cheng and Ning Zhang},
url = {https://arxiv.org/abs/2202.11490},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2202.11490},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Bosma, Martijn M. A.; Dushatskiy, Arkadiy; Grewal, Monika; Alderliesten, Tanja; Bosman, Peter A. N.
Mixed-Block Neural Architecture Search for Medical Image Segmentation Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2202-11401,
title = {Mixed-Block Neural Architecture Search for Medical Image Segmentation},
author = {Martijn M. A. Bosma and Arkadiy Dushatskiy and Monika Grewal and Tanja Alderliesten and Peter A. N. Bosman},
url = {https://arxiv.org/abs/2202.11401},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2202.11401},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Sheng, Yi; Yang, Junhuan; Wu, Yawen; Mao, Kevin; Shi, Yiyu; Hu, Jingtong; Jiang, Weiwen; Yang, Lei
The Larger The Fairer? Small Neural Networks Can Achieve Fairness for Edge Devices Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2202-11317,
title = {The Larger The Fairer? Small Neural Networks Can Achieve Fairness for Edge Devices},
author = {Yi Sheng and Junhuan Yang and Yawen Wu and Kevin Mao and Yiyu Shi and Jingtong Hu and Weiwen Jiang and Lei Yang},
url = {https://arxiv.org/abs/2202.11317},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2202.11317},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Zhao, Shixiong; Li, Fanxin; Chen, Xusheng; Shen, Tianxiang; Chen, Li; Wang, Sen; Zhang, Nicholas; Li, Cheng; Cui, Heming
NASPipe: High Performance and Reproducible Pipeline Parallel Supernet Training via Causal Synchronous Parallelism Proceedings Article
In: Proceedings of the 27th ACM International Conference on Architectural Support for Programming Languages and Operating Systems, pp. 374–387, Association for Computing Machinery, Lausanne, Switzerland, 2022, ISBN: 9781450392051.
@inproceedings{10.1145/3503222.3507735,
title = {NASPipe: High Performance and Reproducible Pipeline Parallel Supernet Training via Causal Synchronous Parallelism},
author = {Shixiong Zhao and Fanxin Li and Xusheng Chen and Tianxiang Shen and Li Chen and Sen Wang and Nicholas Zhang and Cheng Li and Heming Cui},
url = {https://doi.org/10.1145/3503222.3507735},
doi = {10.1145/3503222.3507735},
isbn = {9781450392051},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of the 27th ACM International Conference on Architectural Support for Programming Languages and Operating Systems},
pages = {374–387},
publisher = {Association for Computing Machinery},
address = {Lausanne, Switzerland},
series = {ASPLOS 2022},
abstract = {Supernet training, a prevalent and important paradigm in Neural Architecture Search, embeds the whole DNN architecture search space into one monolithic supernet, iteratively activates a subset of the supernet (i.e., a subnet) for fitting each batch of data, and searches a high-quality subnet which meets specific requirements. Although training subnets in parallel on multiple GPUs is desirable for acceleration, there inherently exists a race hazard that concurrent subnets may access the same DNN layers. Existing systems support neither efficiently parallelizing subnets’ training executions, nor resolving the race hazard deterministically, leading to unreproducible training procedures and potentiallly non-trivial accuracy loss. We present NASPipe, the first high-performance and reproducible distributed supernet training system via causal synchronous parallel (CSP) pipeline scheduling abstraction: NASPipe partitions a supernet across GPUs and concurrently executes multiple generated sub-tasks (subnets) in a pipelined manner; meanwhile, it oversees the correlations between the subnets and deterministically resolves any causal dependency caused by subnets’ layer sharing. To obtain high performance, NASPipe’s CSP scheduler exploits the fact that the larger a supernet spans, the fewer dependencies manifest between chronologically close subnets; therefore, it aggressively schedules the subnets with larger chronological orders into execution, only if they are not causally dependent on unfinished precedent subnets. Moreover, to relieve the excessive GPU memory burden for holding the whole supernet’s parameters, NASPipe uses a context switch technique that stashes the whole supernet in CPU memory, precisely predicts the subnets’ schedule, and pre-fetches/evicts a subnet before/after its execution. The evaluation shows that NASPipe is the only system that retains supernet training reproducibility, while achieving a comparable and even higher performance (up to 7.8X) compared to three recent pipeline training systems (e.g., GPipe).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhang, Wentao; Shen, Yu; Lin, Zheyu; Li, Yang; Li, Xiaosen; Ouyang, Wen; Tao, Yangyu; Yang, Zhi; Cui, Bin
PaSca: a Graph Neural Architecture Search System under the Scalable Paradigm Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2203-00638,
title = {PaSca: a Graph Neural Architecture Search System under the Scalable Paradigm},
author = {Wentao Zhang and Yu Shen and Zheyu Lin and Yang Li and Xiaosen Li and Wen Ouyang and Yangyu Tao and Zhi Yang and Bin Cui},
url = {https://doi.org/10.48550/arXiv.2203.00638},
doi = {10.48550/arXiv.2203.00638},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2203.00638},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Zhang, Tianning; Ang, Yee Sin; Li, Erping; Kee, Chun Yun; Ang, L. K.
SUTD-PRCM Dataset and Neural Architecture Search Approach for Complex Metasurface Design Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2203-00002,
title = {SUTD-PRCM Dataset and Neural Architecture Search Approach for Complex Metasurface Design},
author = {Tianning Zhang and Yee Sin Ang and Erping Li and Chun Yun Kee and L. K. Ang},
url = {https://doi.org/10.48550/arXiv.2203.00002},
doi = {10.48550/arXiv.2203.00002},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2203.00002},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Huang, Yongdong; Li, Yuanzhan; Cao, Xulong; Zhang, Siyu; Cai, Shen; Lu, Ting; Liu, Yuqi
An Efficient End-to-End 3D Model Reconstruction based on Neural Architecture Search Technical Report
2022.
@techreport{HuangNAS,
title = {An Efficient End-to-End 3D Model Reconstruction based on Neural Architecture Search},
author = {Yongdong Huang and Yuanzhan Li and Xulong Cao and Siyu Zhang and Shen Cai and Ting Lu and Yuqi Liu},
url = {https://arxiv.org/abs/2202.13313},
doi = {10.48550/ARXIV.2202.13313},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
publisher = {arXiv},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Seong, Jaeho; Lee, Chaehyun; Han, Dong Seog
Neural Architecture Search for Real-Time Driver Behavior Recognition Proceedings Article
In: 2022 International Conference on Artificial Intelligence in Information and Communication (ICAIIC), pp. 104-108, 2022.
@inproceedings{9722706,
title = {Neural Architecture Search for Real-Time Driver Behavior Recognition},
author = {Jaeho Seong and Chaehyun Lee and Dong Seog Han},
url = {https://ieeexplore.ieee.org/abstract/document/9722706},
doi = {10.1109/ICAIIC54071.2022.9722706},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {2022 International Conference on Artificial Intelligence in Information and Communication (ICAIIC)},
pages = {104-108},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Cummings, Daniel; Sridhar, Sharath Nittur; Sarah, Anthony; Szankin, Maciej
Accelerating Neural Architecture Exploration Across Modalities Using Genetic Algorithms Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2202-12934,
title = {Accelerating Neural Architecture Exploration Across Modalities Using Genetic Algorithms},
author = {Daniel Cummings and Sharath Nittur Sridhar and Anthony Sarah and Maciej Szankin},
url = {https://arxiv.org/abs/2202.12934},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2202.12934},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Zhao, Yaqin; Feng, Liqi; Tang, Jiaxi; Zhao, Wenxuan; Ding, Zhipeng; Li, Ao; Zheng, Zhaoxiang
Automatically recognizing four-legged animal behaviors to enhance welfare using spatial temporal graph convolutional networks Journal Article
In: Applied Animal Behaviour Science, vol. 249, pp. 105594, 2022, ISSN: 0168-1591.
@article{ZHAO2022105594,
title = {Automatically recognizing four-legged animal behaviors to enhance welfare using spatial temporal graph convolutional networks},
author = {Yaqin Zhao and Liqi Feng and Jiaxi Tang and Wenxuan Zhao and Zhipeng Ding and Ao Li and Zhaoxiang Zheng},
url = {https://www.sciencedirect.com/science/article/pii/S0168159122000521},
doi = {https://doi.org/10.1016/j.applanim.2022.105594},
issn = {0168-1591},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Applied Animal Behaviour Science},
volume = {249},
pages = {105594},
abstract = {Automatically recognizing animal behaviors in zoos and in national natural reserves can provide valuable insight into their welfare for facilitating scientific decision-making processes in animal management. Due to the difficulty of capturing massive amounts of animal video footage, a few existing methods have identified the behaviors of several different animal species in static images, but little is known about video-based animal behavior recognition. An animal's behavior is carried out in consecutive frames rather than in a single image; thus, image-based animal behavior recognition methods have low recognition accuracy. To address this dilemma, we not only construct the first skeleton-based dynamic multispecies dataset (Animal-Skeleton) but also propose a novel scheme that automatically designs the best spatial-temporal graph convolutional network (GCN) architecture with neural architecture search (NAS) to perform animal behavior recognition, named Animal-Nas for short. This is the first time that GCNs with NAS have been introduced into the animal behavior recognition task. To alleviate the trial-and-error cost of manually designing the network structure, we turn to NAS and design a novel search space with graph-based cells. Furthermore, we adopt a differentiable architecture search strategy to automatically search the cost-efficient spatial-temporal graph convolutional network structure. To evaluate the performance of the proposed model, we conduct extensive experiments on Animal-Skeleton datasets from three perspectives: model accuracy, parameter amount and stability. The results show that our model can achieve state-of the-art performance with fewer parameters.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Sarah, Anthony; Cummings, Daniel; Sridhar, Sharath Nittur; Sundaresan, Sairam; Szankin, Maciej; Webb, Tristan; Munoz, J. Pablo
A Hardware-Aware System for Accelerating Deep Neural Network Optimization Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2202-12954,
title = {A Hardware-Aware System for Accelerating Deep Neural Network Optimization},
author = {Anthony Sarah and Daniel Cummings and Sharath Nittur Sridhar and Sairam Sundaresan and Maciej Szankin and Tristan Webb and J. Pablo Munoz},
url = {https://arxiv.org/abs/2202.12954},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2202.12954},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Sinha, Nilotpal; Chen, Kuan-Wen
Neural Architecture Search using Progressive Evolution Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2203-01559,
title = {Neural Architecture Search using Progressive Evolution},
author = {Nilotpal Sinha and Kuan-Wen Chen},
url = {https://doi.org/10.48550/arXiv.2203.01559},
doi = {10.48550/arXiv.2203.01559},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2203.01559},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Ye, Peng; Li, Baopu; Li, Yikang; Chen, Tao; Fan, Jiayuan; Ouyang, Wanli
(beta)-DARTS: Beta-Decay Regularization for Differentiable Architecture Search Proceedings Article
In: CVPR2022, 2022.
@inproceedings{DBLP:journals/corr/abs-2203-01665,
title = {(beta)-DARTS: Beta-Decay Regularization for Differentiable Architecture Search},
author = {Peng Ye and Baopu Li and Yikang Li and Tao Chen and Jiayuan Fan and Wanli Ouyang},
url = {https://openaccess.thecvf.com/content/CVPR2022/papers/Ye_b-DARTS_Beta-Decay_Regularization_for_Differentiable_Architecture_Search_CVPR_2022_paper.pdf},
doi = {10.48550/arXiv.2203.01665},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {CVPR2022},
journal = {CoRR},
volume = {abs/2203.01665},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Szwarcman, Daniela; Civitarese, Daniel; Vellasco, Marley
Quantum-inspired evolutionary algorithm applied to neural architecture search Journal Article
In: Applied Soft Computing, vol. 120, pp. 108674, 2022, ISSN: 1568-4946.
@article{SZWARCMAN2022108674,
title = {Quantum-inspired evolutionary algorithm applied to neural architecture search},
author = {Daniela Szwarcman and Daniel Civitarese and Marley Vellasco},
url = {https://www.sciencedirect.com/science/article/pii/S1568494622001478},
doi = {https://doi.org/10.1016/j.asoc.2022.108674},
issn = {1568-4946},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Applied Soft Computing},
volume = {120},
pages = {108674},
abstract = {The success of machine learning models over the last few years is mostly related to the significant progress of deep neural networks. These powerful and flexible models can even surpass human-level performance in tasks such as image recognition and strategy games. However, experts need to spend considerable time and resources to design the network structure. The demand for new architectures drives interest in automating this design process. Researchers have proposed new algorithms to address the neural architecture search (NAS) problem, including efforts to reduce the high computational cost of such methods. A common approach to improve efficiency is to reduce the search space with the help of expert knowledge, searching for cells rather than entire networks. Motivated by the faster convergence promoted by quantum-inspired evolutionary methods, the Q-NAS algorithm was proposed to address the NAS problem without relying on cell search. In this work, we consolidate Q-NAS, adding a new penalization feature, enhancing its retraining scheme, and also investigating more challenging search spaces than before. In CIFAR-10, we reached 93.85% of test accuracy in 67 GPU days, considering the addition of an early-stopping mechanism. We also applied Q-NAS to CIFAR-100, without modifying the parameters, and our best accuracy was 74.23%, which is comparable to ResNet164. The enhancements and results presented in this work show that Q-NAS can automatically generate network architectures that outperform hand-designed models for CIFAR-10 and CIFAR-100. Also, compared to other NAS methods, Q-NAS results are promising regarding the balance between performance, runtime efficiency, and automation. We believe that our results enrich the discussion on this balance, considering alternatives to the cell search approach.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Huynh, Lam; Rahtu, Esa; Matas, Jiri; Heikkilä, Janne
Fast Neural Architecture Search for Lightweight Dense Prediction Networks Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2203-01994,
title = {Fast Neural Architecture Search for Lightweight Dense Prediction Networks},
author = {Lam Huynh and Esa Rahtu and Jiri Matas and Janne Heikkilä},
url = {https://doi.org/10.48550/arXiv.2203.01994},
doi = {10.48550/arXiv.2203.01994},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2203.01994},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Lin, Ke; A, Yong; Gan, Zhuoxin; Jiang, Yingying
WPNAS: Neural Architecture Search by jointly using Weight Sharing and Predictor Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2203-02086,
title = {WPNAS: Neural Architecture Search by jointly using Weight Sharing and Predictor},
author = {Ke Lin and Yong A and Zhuoxin Gan and Yingying Jiang},
url = {https://doi.org/10.48550/arXiv.2203.02086},
doi = {10.48550/arXiv.2203.02086},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2203.02086},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Chen, Xuehui; Niu, Xin; Jiang, Jingfei; Pan, Hengyue; Dong, Peijie; Wei, Zimian
Influence of Initialization and Modularization on the Performance of Network Morphism-Based Neural Architecture Search Proceedings Article
In: Yao, Jian; Xiao, Yang; You, Peng; Sun, Guang (Ed.): The International Conference on Image, Vision and Intelligent Systems (ICIVIS 2021), pp. 875–887, Springer Singapore, Singapore, 2022, ISBN: 978-981-16-6963-7.
@inproceedings{10.1007/978-981-16-6963-7_77,
title = {Influence of Initialization and Modularization on the Performance of Network Morphism-Based Neural Architecture Search},
author = {Xuehui Chen and Xin Niu and Jingfei Jiang and Hengyue Pan and Peijie Dong and Zimian Wei},
editor = {Jian Yao and Yang Xiao and Peng You and Guang Sun},
url = {https://link.springer.com/chapter/10.1007/978-981-16-6963-7_77},
isbn = {978-981-16-6963-7},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {The International Conference on Image, Vision and Intelligent Systems (ICIVIS 2021)},
pages = {875--887},
publisher = {Springer Singapore},
address = {Singapore},
abstract = {Neural Architecture Search (NAS), the process of automatic network architecture design, has enabled remarkable progress over the last years on Computer Vision tasks. In this paper, we propose a novel and efficient NAS framework based on network morphism to further improve the performance of NAS algorithms. Firstly, we design four modular structures termed RBNC block, CBNR block, BNRC block and RCBN block which correspond to four initial neural network architectures and four modular network morphism methods. Each block is composed of a ReLU layer, a Batch-Norm layer and a convolutional layer. Then we introduce network morphism to correlate different modular structures for constructing network architectures. Moreover, we study the influence of different initial neural network architectures and modular network morphism methods on the performance of network morphism-based NAS algorithms through comparative experiments and ablation experiments. Finally, we find that the network morphism-based NAS algorithm that uses CBNR block for initialization and modularization is the best method to improve performance. Our proposed method achieves a test accuracy of 95.84% on CIFAR-10 with least parameters (only 2.72 M) and fewer search costs (2 GPU-days) for network architecture search.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Sun, Jialiang; Jiang, Tingsong; Li, Chao; Zhou, Weien; Zhang, Xiaoya; Yao, Wen; Chen, Xiaoqian
Searching for Robust Neural Architectures via Comprehensive and Reliable Evaluation Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2203-03128,
title = {Searching for Robust Neural Architectures via Comprehensive and Reliable Evaluation},
author = {Jialiang Sun and Tingsong Jiang and Chao Li and Weien Zhou and Xiaoya Zhang and Wen Yao and Xiaoqian Chen},
url = {https://doi.org/10.48550/arXiv.2203.03128},
doi = {10.48550/arXiv.2203.03128},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2203.03128},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Chebykin, Alexander; Alderliesten, Tanja; Bosman, Peter A. N.
Evolutionary Neural Cascade Search across Supernetworks Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2203-04011,
title = {Evolutionary Neural Cascade Search across Supernetworks},
author = {Alexander Chebykin and Tanja Alderliesten and Peter A. N. Bosman},
url = {https://doi.org/10.48550/arXiv.2203.04011},
doi = {10.48550/arXiv.2203.04011},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2203.04011},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Xiao, Yuhan; Sun, Shang; Liao, TaoLin
Parameter search-based scaling network for self-supervised depth Proceedings Article
In: Mohiddin, Md Khaja; Chen, Siting; EL-Zoghdy, Said Fathy (Ed.): Third International Conference on Electronics and Communication; Network and Computer Technology (ECNCT 2021), pp. 463 – 467, International Society for Optics and Photonics SPIE, 2022.
@inproceedings{10.1117/12.2629190,
title = {Parameter search-based scaling network for self-supervised depth},
author = {Yuhan Xiao and Shang Sun and TaoLin Liao},
editor = {Md Khaja Mohiddin and Siting Chen and Said Fathy EL-Zoghdy},
url = {https://doi.org/10.1117/12.2629190},
doi = {10.1117/12.2629190},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Third International Conference on Electronics and Communication; Network and Computer Technology (ECNCT 2021)},
volume = {12167},
pages = {463 -- 467},
publisher = {SPIE},
organization = {International Society for Optics and Photonics},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Javaheripi, Mojan; Shah, Shital; Mukherjee, Subhabrata; Religa, Tomasz L.; Mendes, Caio C. T.; Rosa, Gustavo H.; Bubeck, Sébastien; Koushanfar, Farinaz; Dey, Debadeepta
LiteTransformerSearch: Training-free On-device Search for Efficient Autoregressive Language Models Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2203-02094,
title = {LiteTransformerSearch: Training-free On-device Search for Efficient Autoregressive Language Models},
author = {Mojan Javaheripi and Shital Shah and Subhabrata Mukherjee and Tomasz L. Religa and Caio C. T. Mendes and Gustavo H. Rosa and Sébastien Bubeck and Farinaz Koushanfar and Debadeepta Dey},
url = {https://doi.org/10.48550/arXiv.2203.02094},
doi = {10.48550/arXiv.2203.02094},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2203.02094},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Lopes, Vasco; Alexandre, Luís A.
Towards Less Constrained Macro-Neural Architecture Search Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2203-05508,
title = {Towards Less Constrained Macro-Neural Architecture Search},
author = {Vasco Lopes and Luís A. Alexandre},
url = {https://doi.org/10.48550/arXiv.2203.05508},
doi = {10.48550/arXiv.2203.05508},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2203.05508},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Wu, Xixin; Hu, Shoukang; Wu, Zhiyong; Liu, Xunying; Meng, Helen
Neural Architecture Search for Speech Emotion Recognition Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2203-16928,
title = {Neural Architecture Search for Speech Emotion Recognition},
author = {Xixin Wu and Shoukang Hu and Zhiyong Wu and Xunying Liu and Helen Meng},
url = {https://doi.org/10.48550/arXiv.2203.16928},
doi = {10.48550/arXiv.2203.16928},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2203.16928},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Wei, Zimian; Pan, Hengyue; Niu, Xin; Dong, Peijie; Li, Dongsheng
UENAS: A Unified Evolution-based NAS Framework Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2203-04300,
title = {UENAS: A Unified Evolution-based NAS Framework},
author = {Zimian Wei and Hengyue Pan and Xin Niu and Peijie Dong and Dongsheng Li},
url = {https://doi.org/10.48550/arXiv.2203.04300},
doi = {10.48550/arXiv.2203.04300},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2203.04300},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Xiang, Tiange; Zhang, Chaoyi; Wang, Xinyi; Song, Yang; Liu, Dongnan; Huang, Heng; Cai, Weidong
Towards bi-directional skip connections in encoder-decoder architectures and beyond Journal Article
In: Medical Image Analysis, vol. 78, pp. 102420, 2022, ISSN: 1361-8415.
@article{XIANG2022102420,
title = {Towards bi-directional skip connections in encoder-decoder architectures and beyond},
author = {Tiange Xiang and Chaoyi Zhang and Xinyi Wang and Yang Song and Dongnan Liu and Heng Huang and Weidong Cai},
url = {https://www.sciencedirect.com/science/article/pii/S1361841522000718},
doi = {https://doi.org/10.1016/j.media.2022.102420},
issn = {1361-8415},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Medical Image Analysis},
volume = {78},
pages = {102420},
abstract = {U-Net, as an encoder-decoder architecture with forward skip connections, has achieved promising results in various medical image analysis tasks. Many recent approaches have also extended U-Net with more complex building blocks, which typically increase the number of network parameters considerably. Such complexity makes the inference stage highly inefficient for clinical applications. Towards an effective yet economic segmentation network design, in this work, we propose backward skip connections that bring decoded features back to the encoder. Our design can be jointly adopted with forward skip connections in any encoder-decoder architecture forming a recurrence structure without introducing extra parameters. With the backward skip connections, we propose a U-Net based network family, namely Bi-directional O-shape networks, which set new benchmarks on multiple public medical imaging segmentation datasets. On the other hand, with the most plain architecture (BiO-Net), network computations inevitably increase along with the pre-set recurrence time. We have thus studied the deficiency bottleneck of such recurrent design and propose a novel two-phase Neural Architecture Search (NAS) algorithm, namely BiX-NAS, to search for the best multi-scale bi-directional skip connections. The ineffective skip connections are then discarded to reduce computational costs and speed up network inference. The finally searched BiX-Net yields the least network complexity and outperforms other state-of-the-art counterparts by large margins. We evaluate our methods on both 2D and 3D segmentation tasks in a total of six datasets. Extensive ablation studies have also been conducted to provide a comprehensive analysis for our proposed methods.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Sengar, Neha; Singh, Akriti; Yadav, Saumya; Dutta, Malay Kishore
Äutomated System for Face-Mask Detection Using Convolutional Neural Network Proceedings Article
In: Giri, Debasis; Choo, Kim-Kwang Raymond; Ponnusamy, Saminathan; Meng, Weizhi; Akleylek, Sedat; Maity, Santi Prasad (Ed.): Proceedings of the Seventh International Conference on Mathematics and Computing, pp. 373–380, Springer Singapore, Singapore, 2022, ISBN: 978-981-16-6890-6.
@inproceedings{10.1007/978-981-16-6890-6_28,
title = {Äutomated System for Face-Mask Detection Using Convolutional Neural Network},
author = {Neha Sengar and Akriti Singh and Saumya Yadav and Malay Kishore Dutta},
editor = {Debasis Giri and Kim-Kwang Raymond Choo and Saminathan Ponnusamy and Weizhi Meng and Sedat Akleylek and Santi Prasad Maity},
url = {https://link.springer.com/chapter/10.1007/978-981-16-6890-6_28},
isbn = {978-981-16-6890-6},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of the Seventh International Conference on Mathematics and Computing},
pages = {373--380},
publisher = {Springer Singapore},
address = {Singapore},
abstract = {Coronavirus Disease 2019 (COVID-19) pandemic is affecting the health of the global population severely. It is one of the deadliest diseases in history and has severely affected all the countries. The only way to prevent the spread of corona is to cover faces and follow social distancing norms until a vaccine is developed. The face mask is effective in blocking the droplets that contain the COVID-19 virus. Hence, it is necessary to wear a face mask as a precautionary measure against it. In the proposed work, the face mask detection model is generated using an optimized neural network architecture for performing the classification task (mask or no mask). For training and model assessment, a dataset of 8695 images has been taken from four different sources. The model achieves a validation accuracy of 99.52%.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Girish, Sharath; Dey, Debadeepta; Joshi, Neel; Vineet, Vibhav; Shah, Shital; Mendes, Caio Cesar Teodoro; Shrivastava, Abhinav; Song, Yale
One Network Doesn't Rule Them All: Moving Beyond Handcrafted Architectures in Self-Supervised Learning Technical Report
2022.
@techreport{girish2022one,
title = {One Network Doesn't Rule Them All: Moving Beyond Handcrafted Architectures in Self-Supervised Learning},
author = {Sharath Girish and Debadeepta Dey and Neel Joshi and Vibhav Vineet and Shital Shah and Caio Cesar Teodoro Mendes and Abhinav Shrivastava and Yale Song},
url = {https://arxiv.org/abs/2203.08130},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {arXiv preprint arXiv:2203.08130},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Li, Zi; Li, Ziyang; Liu, Risheng; Luo, Zhongxuan; Fan, Xin
Automated Learning for Deformable Medical Image Registration by Jointly Optimizing Network Architectures and Objective Functions Technical Report
2022.
@techreport{li2022automated,
title = {Automated Learning for Deformable Medical Image Registration by Jointly Optimizing Network Architectures and Objective Functions},
author = {Zi Li and Ziyang Li and Risheng Liu and Zhongxuan Luo and Xin Fan},
url = {https://arxiv.org/abs/2203.06810},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {arXiv preprint arXiv:2203.06810},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Wang, Haoxiang; Wang, Yite; Sun, Ruoyu; Li, Bo
Global Convergence of MAML and Theory-Inspired Neural Architecture Search for Few-Shot Learning Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2203-09137,
title = {Global Convergence of MAML and Theory-Inspired Neural Architecture Search for Few-Shot Learning},
author = {Haoxiang Wang and Yite Wang and Ruoyu Sun and Bo Li},
url = {https://doi.org/10.48550/arXiv.2203.09137},
doi = {10.48550/arXiv.2203.09137},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2203.09137},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Shi, Jiachen; Zhou, Guoqiang; Bao, Shudi; Shen, Jun
Multi-SelfGAN: A Self-Guiding Neural Architecture Search Method for Generative Adversarial Networks with Multi-Controllers Journal Article
In: IEEE Transactions on Cognitive and Developmental Systems, pp. 1-1, 2022.
@article{9737565,
title = {Multi-SelfGAN: A Self-Guiding Neural Architecture Search Method for Generative Adversarial Networks with Multi-Controllers},
author = {Jiachen Shi and Guoqiang Zhou and Shudi Bao and Jun Shen},
url = {https://ieeexplore.ieee.org/abstract/document/9737565},
doi = {10.1109/TCDS.2022.3160475},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Transactions on Cognitive and Developmental Systems},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Dong, Junwei; Hou, Boyu; Feng, Liang; Tang, Huajin; Tan, Kay Chen; Ong, Yew-Soon
A Cell-Based Fast Memetic Algorithm for Automated Convolutional Neural Architecture Design Journal Article
In: IEEE Transactions on Neural Networks and Learning Systems, pp. 1-14, 2022.
@article{9737315,
title = {A Cell-Based Fast Memetic Algorithm for Automated Convolutional Neural Architecture Design},
author = {Junwei Dong and Boyu Hou and Liang Feng and Huajin Tang and Kay Chen Tan and Yew-Soon Ong},
url = {https://ieeexplore.ieee.org/abstract/document/9737315},
doi = {10.1109/TNNLS.2022.3155230},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Transactions on Neural Networks and Learning Systems},
pages = {1-14},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Mi, Jian-Xun; Feng, Jie; Huang, Ke-Yang
Designing efficient convolutional neural network structure: A survey Journal Article
In: Neurocomputing, vol. 489, pp. 139-156, 2022, ISSN: 0925-2312.
@article{MI2022139,
title = {Designing efficient convolutional neural network structure: A survey},
author = {Jian-Xun Mi and Jie Feng and Ke-Yang Huang},
url = {https://www.sciencedirect.com/science/article/pii/S0925231222003162},
doi = {https://doi.org/10.1016/j.neucom.2021.08.158},
issn = {0925-2312},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Neurocomputing},
volume = {489},
pages = {139-156},
abstract = {As a powerful machine learning method, deep learning has attracted the attention of numerous researchers. While exploring a high-performance neural network model, the floating-point operations of a neural network model are also increasing. In recent years, many researchers have noticed that efficiency is also one of important indicators to measure the property of neural network models. Obviously, the efficient neural network model is more helpful to deploy on mobile and embedded devices. Therefore, the efficient neural network model becomes a hot research spot. In this paper, we review the methods related to the structural design of efficient convolution neural networks in recent years. According to the characteristics of these methods, we divide them into three kinds of methods: model pruning, efficient architecture, and neural architecture search. Detailed analyses of each method are presented to demonstrate their advantages and disadvantages. Then, we comprehensively compare them in detail and propose many suggestions about the design of the efficient convolution neural network model structure. Inspired by these suggestions, we built a new efficient neural network model, SharedNet. And the SharedNet obtains the best accuracy of manually-designed efficient CNN models on the ImageNet dataset.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Blumberg, Stefano B.; Lin, Hongxiang; Grussu, Francesco; Zhou, Yukun; Figini, Matteo; Alexander, Daniel C.
Progressive Subsampling for Oversampled Data - Application to Quantitative MRI Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2203-09268,
title = {Progressive Subsampling for Oversampled Data - Application to Quantitative MRI},
author = {Stefano B. Blumberg and Hongxiang Lin and Francesco Grussu and Yukun Zhou and Matteo Figini and Daniel C. Alexander},
url = {https://doi.org/10.48550/arXiv.2203.09268},
doi = {10.48550/arXiv.2203.09268},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2203.09268},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Vo-Ho, Viet-Khoa; Yamazaki, Kashu; Hoang, Hieu; Tran, Minh-Triet; Le, Ngan
Meta-Learning of NAS for Few-shot Learning in Medical Image Applications Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2203-08951,
title = {Meta-Learning of NAS for Few-shot Learning in Medical Image Applications},
author = {Viet-Khoa Vo-Ho and Kashu Yamazaki and Hieu Hoang and Minh-Triet Tran and Ngan Le},
url = {https://doi.org/10.48550/arXiv.2203.08951},
doi = {10.48550/arXiv.2203.08951},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2203.08951},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Chang, Qing; Peng, Junran; Xie, Lingxi; Sun, Jiajun; Yin, Haoran; Tian, Qi; Zhang, Zhaoxiang
DATA: Domain-Aware and Task-Aware Self-supervised Learning Proceedings Article
In: CVPR2022, 2022.
@inproceedings{DBLP:journals/corr/abs-2203-09041,
title = {DATA: Domain-Aware and Task-Aware Self-supervised Learning},
author = {Qing Chang and Junran Peng and Lingxi Xie and Jiajun Sun and Haoran Yin and Qi Tian and Zhaoxiang Zhang},
url = {https://openaccess.thecvf.com/content/CVPR2022/papers/Chang_DATA_Domain-Aware_and_Task-Aware_Self-Supervised_Learning_CVPR_2022_paper.pdf},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {CVPR2022},
journal = {CoRR},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lu, Zhenyu; Liang, Shaoyang; Yang, Qiang; Du, Bo
Evolving Block-Based Convolutional Neural Network for Hyperspectral Image Classification Journal Article
In: IEEE Transactions on Geoscience and Remote Sensing, vol. 60, pp. 1-21, 2022.
@article{9737511,
title = {Evolving Block-Based Convolutional Neural Network for Hyperspectral Image Classification},
author = {Zhenyu Lu and Shaoyang Liang and Qiang Yang and Bo Du},
url = {https://ieeexplore.ieee.org/abstract/document/9737511},
doi = {10.1109/TGRS.2022.3160513},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Transactions on Geoscience and Remote Sensing},
volume = {60},
pages = {1-21},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lukasik, Jovita; Jung, Steffen; Keuper, Margret
Learning Where To Look - Generative NAS is Surprisingly Efficient Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2203-08734,
title = {Learning Where To Look - Generative NAS is Surprisingly Efficient},
author = {Jovita Lukasik and Steffen Jung and Margret Keuper},
url = {https://doi.org/10.48550/arXiv.2203.08734},
doi = {10.48550/arXiv.2203.08734},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2203.08734},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Yan, Chenqian; Zhang, Yuge; Zhang, Quanlu; Yang, Yaming; Jiang, Xinyang; Yang, Yuqing; Wang, Baoyuan
Privacy-preserving Online AutoML for Domain-Specific Face Detection Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2203-08399,
title = {Privacy-preserving Online AutoML for Domain-Specific Face Detection},
author = {Chenqian Yan and Yuge Zhang and Quanlu Zhang and Yaming Yang and Xinyang Jiang and Yuqing Yang and Baoyuan Wang},
url = {https://doi.org/10.48550/arXiv.2203.08399},
doi = {10.48550/arXiv.2203.08399},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2203.08399},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Yang, Sen; Yang, Wankou; Cui, Zhen
Searching part-specific neural fabrics for human pose estimation Journal Article
In: Pattern Recognition, vol. 128, pp. 108652, 2022, ISSN: 0031-3203.
@article{YANG2022108652,
title = {Searching part-specific neural fabrics for human pose estimation},
author = {Sen Yang and Wankou Yang and Zhen Cui},
url = {https://www.sciencedirect.com/science/article/pii/S0031320322001339},
doi = {https://doi.org/10.1016/j.patcog.2022.108652},
issn = {0031-3203},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Pattern Recognition},
volume = {128},
pages = {108652},
abstract = {Neural architecture search (NAS) has emerged in many domains to jointly learn the architectures and weights of neural networks. The core spirit behind NAS is to automatically search neural architectures for target tasks with better performance-efficiency trade-offs. However, existing approaches emphasize on only searching a single architecture with less human intervention to replace a human-designed neural network, yet making the search process almost independent of the domain knowledge. In this paper, we aim to apply NAS for human pose estimation and we ask: when NAS meets this localization task, can the articulated human body structure help to search better task-specific architectures? To this end, we first design a new neural architecture search space, Cell-based Neural Fabric (CNF), to learn micro as well as macro neural architecture using a differentiable search strategy. Then, by viewing locating human parts as multiple disentangled prediction sub-tasks, we exploit the compositionality of human body structure as guidance to search multiple part-specific CNFs specialized for different human parts. After the search, all these part-specific neural fabrics have been tailored with distinct micro and macro architecture parameters. The results show that such knowledge-guided NAS-based model outperforms a hand-crafted part-based baseline model, and the resulting multiple part-specific architectures gain significant performance improvement against a single NAS-based architecture for the whole body. The experiments on MPII and COCO datasets show that our models11Code is available at https://github.com/yangsenius/PoseNFS. achieve comparable performance against the state-of-the-art methods while being relatively lightweight.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zhang, Haichao; Hao, Kuangrong; Pedrycz, Witold; Gao, Lei; Tang, Xue-Song; Wei, Bing
Vision Transformer with Convolutions Architecture Search Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2203-10435,
title = {Vision Transformer with Convolutions Architecture Search},
author = {Haichao Zhang and Kuangrong Hao and Witold Pedrycz and Lei Gao and Xue-Song Tang and Bing Wei},
url = {https://doi.org/10.48550/arXiv.2203.10435},
doi = {10.48550/arXiv.2203.10435},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2203.10435},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Hu, Yiming; Wang, Xingang; Gu, Qingyi
PWSNAS: Powering Weight Sharing NAS With General Search Space Shrinking Framework Journal Article
In: IEEE Transactions on Neural Networks and Learning Systems, pp. 1-14, 2022.
@article{9739130,
title = {PWSNAS: Powering Weight Sharing NAS With General Search Space Shrinking Framework},
author = {Yiming Hu and Xingang Wang and Qingyi Gu},
url = {https://ieeexplore.ieee.org/abstract/document/9739130},
doi = {10.1109/TNNLS.2022.3156373},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Transactions on Neural Networks and Learning Systems},
pages = {1-14},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Wang, Xiaoxing; Lin, Jiale; Yan, Junchi; Zhao, Juanping; Yang, Xiaokang
EAutoDet: Efficient Architecture Search for Object Detection Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2203-10747,
title = {EAutoDet: Efficient Architecture Search for Object Detection},
author = {Xiaoxing Wang and Jiale Lin and Junchi Yan and Juanping Zhao and Xiaokang Yang},
url = {https://doi.org/10.48550/arXiv.2203.10747},
doi = {10.48550/arXiv.2203.10747},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2203.10747},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Habibian, Amirhossein; Yahia, Haitam Ben; Abati, Davide; Gavves, Efstratios; Porikli, Fatih
Delta Distillation for Efficient Video Processing Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2203-09594,
title = {Delta Distillation for Efficient Video Processing},
author = {Amirhossein Habibian and Haitam Ben Yahia and Davide Abati and Efstratios Gavves and Fatih Porikli},
url = {https://doi.org/10.48550/arXiv.2203.09594},
doi = {10.48550/arXiv.2203.09594},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2203.09594},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Arora, Parul; Jalali, Seyed Mohammad Jafar; Ahmadian, Sajad; Panigrahi, Bijaya Ketan; Suganthan, Pn; Khosravi, Abbas
Probabilistic Wind Power Forecasting Using Optimised Deep Auto-Regressive Recurrent Neural Networks Journal Article
In: IEEE Transactions on Industrial Informatics, pp. 1-1, 2022.
@article{9739990,
title = {Probabilistic Wind Power Forecasting Using Optimised Deep Auto-Regressive Recurrent Neural Networks},
author = {Parul Arora and Seyed Mohammad Jafar Jalali and Sajad Ahmadian and Bijaya Ketan Panigrahi and Pn Suganthan and Abbas Khosravi},
url = {https://ieeexplore.ieee.org/abstract/document/9739990},
doi = {10.1109/TII.2022.3160696},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Transactions on Industrial Informatics},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Yüzügüler, Ahmet Caner; Dimitriadis, Nikolaos; Frossard, Pascal
U-Boost NAS: Utilization-Boosted Differentiable Neural Architecture Search Technical Report
2022.
@techreport{yuzuguler2022u,
title = {U-Boost NAS: Utilization-Boosted Differentiable Neural Architecture Search},
author = {Ahmet Caner Yüzügüler and Nikolaos Dimitriadis and Pascal Frossard},
url = {https://arxiv.org/abs/2203.12412},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {arXiv preprint arXiv:2203.12412},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}