Maintained by Difan Deng and Marius Lindauer.
The following list considers papers related to neural architecture search. It is by no means complete. If you miss a paper on the list, please let us know.
Please note that although NAS methods steadily improve, the quality of empirical evaluations in this field are still lagging behind compared to other areas in machine learning, AI and optimization. We would therefore like to share some best practices for empirical evaluations of NAS methods, which we believe will facilitate sustained and measurable progress in the field. If you are interested in a teaser, please read our blog post or directly jump to our checklist.
Transformers have gained increasing popularity in different domains. For a comprehensive list of papers focusing on Neural Architecture Search for Transformer-Based spaces, the awesome-transformer-search repo is all you need.
2022
Liu, Dingbang; Zhou, Haoxiang; Mao, Wei; Liu, Jun; Han, Yuliang; Man, Changhai; Wu, Qiuping; Guo, Zhiru; Huang, Mingqiang; Luo, Shaobo; Lv, Mingsong; Chen, Quan; Yu, Hao
An Energy-Efficient Mixed-Bit CNN Accelerator with Column Parallel Readout for ReRAM-based In-memory Computing Journal Article
In: IEEE Journal on Emerging and Selected Topics in Circuits and Systems, pp. 1-1, 2022.
@article{9911654,
title = {An Energy-Efficient Mixed-Bit CNN Accelerator with Column Parallel Readout for ReRAM-based In-memory Computing},
author = {Dingbang Liu and Haoxiang Zhou and Wei Mao and Jun Liu and Yuliang Han and Changhai Man and Qiuping Wu and Zhiru Guo and Mingqiang Huang and Shaobo Luo and Mingsong Lv and Quan Chen and Hao Yu},
url = {https://ieeexplore.ieee.org/abstract/document/9911654},
doi = {10.1109/JETCAS.2022.3212314},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Journal on Emerging and Selected Topics in Circuits and Systems},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Dirks, Matthew C.; Poole, David L.
Automatic Neural Network Hyperparameter Optimization for Extrapolation: Lessons Learned from Visible and Near-Infrared Spectroscopy of Mango Fruit Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2210-01124,
title = {Automatic Neural Network Hyperparameter Optimization for Extrapolation: Lessons Learned from Visible and Near-Infrared Spectroscopy of Mango Fruit},
author = {Matthew C. Dirks and David L. Poole},
url = {https://doi.org/10.48550/arXiv.2210.01124},
doi = {10.48550/arXiv.2210.01124},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2210.01124},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Chitty-Venkata, Krishna Teja; Emani, Murali; Vishwanath, Venkatram; Somani, Arun K.
Neural Architecture Search for Transformers: A Survey Journal Article
In: IEEE Access, vol. 10, pp. 108374–108412, 2022.
@article{DBLP:journals/access/Chitty-VenkataE22,
title = {Neural Architecture Search for Transformers: A Survey},
author = {Krishna Teja Chitty-Venkata and Murali Emani and Venkatram Vishwanath and Arun K. Somani},
url = {https://doi.org/10.1109/ACCESS.2022.3212767},
doi = {10.1109/ACCESS.2022.3212767},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Access},
volume = {10},
pages = {108374--108412},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Wang, Zihan; Gupta, Akshat; Meng, Qi; Lan, Haifeng; Zhang, Xinrui; Guo, Kehao
MULTILINGUAL SPEECH EMOTION RECOGNITION WITH MULTI-GATING MECHANISM AND NEURAL ARCHITECTURE SEARCH Proceedings Article
In: SLT'22, 2022.
@inproceedings{inproceedings,
title = {MULTILINGUAL SPEECH EMOTION RECOGNITION WITH MULTI-GATING MECHANISM AND NEURAL ARCHITECTURE SEARCH},
author = {Zihan Wang and Akshat Gupta and Qi Meng and Haifeng Lan and Xinrui Zhang and Kehao Guo},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {SLT'22},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Li, Zhuojin; Paolieri, Marco; Golubchik, Leana
Inference Latency Prediction at the Edge Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2210-02620,
title = {Inference Latency Prediction at the Edge},
author = {Zhuojin Li and Marco Paolieri and Leana Golubchik},
url = {https://doi.org/10.48550/arXiv.2210.02620},
doi = {10.48550/arXiv.2210.02620},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2210.02620},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Zhou, Qin; Cao, Jiajia; Yin, Ling; Zhang, Fei; Li, Yun
INAS: Incremental Neural Architecture Search Proceedings Article
In: 2022 27th International Conference on Automation and Computing (ICAC), pp. 1-6, 2022.
@inproceedings{9911083,
title = {INAS: Incremental Neural Architecture Search},
author = {Qin Zhou and Jiajia Cao and Ling Yin and Fei Zhang and Yun Li},
url = {https://ieeexplore.ieee.org/abstract/document/9911083},
doi = {10.1109/ICAC55051.2022.9911083},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {2022 27th International Conference on Automation and Computing (ICAC)},
pages = {1-6},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhang, Xinbang; Jin, Qizhao; Xiang, Shiming; Pan, Chunhong
MFNet: The Spatio-Temporal Network for Meteorological Forecasting With Architecture Search Journal Article
In: IEEE Geoscience and Remote Sensing Letters, vol. 19, pp. 1-5, 2022.
@article{9915588,
title = {MFNet: The Spatio-Temporal Network for Meteorological Forecasting With Architecture Search},
author = {Xinbang Zhang and Qizhao Jin and Shiming Xiang and Chunhong Pan},
url = {https://ieeexplore.ieee.org/abstract/document/9915588},
doi = {10.1109/LGRS.2022.3213618},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Geoscience and Remote Sensing Letters},
volume = {19},
pages = {1-5},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kao, Sheng-Chun; Parashar, Angshuman; Tsai, Po-An; Krishna, Tushar
Demystifying Map Space Exploration for NPUs Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2210-03731,
title = {Demystifying Map Space Exploration for NPUs},
author = {Sheng-Chun Kao and Angshuman Parashar and Po-An Tsai and Tushar Krishna},
url = {https://doi.org/10.48550/arXiv.2210.03731},
doi = {10.48550/arXiv.2210.03731},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2210.03731},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Duan, Yiqun; Wang, Zhen; Li, Yi; Tang, Jianhang; Wang, Yu-Kai; Lin, Chin-Teng
Cross Task Neural Architecture Search for EEG Signal Classifications Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2210-06298,
title = {Cross Task Neural Architecture Search for EEG Signal Classifications},
author = {Yiqun Duan and Zhen Wang and Yi Li and Jianhang Tang and Yu-Kai Wang and Chin-Teng Lin},
url = {https://doi.org/10.48550/arXiv.2210.06298},
doi = {10.48550/arXiv.2210.06298},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2210.06298},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Lv, Zeqiong; Qian, Chao; Yen, Gary G.; Sun, Yanan
Analysis of Expected Hitting Time for Designing Evolutionary Neural Architecture Search Algorithms Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2210-05397,
title = {Analysis of Expected Hitting Time for Designing Evolutionary Neural Architecture Search Algorithms},
author = {Zeqiong Lv and Chao Qian and Gary G. Yen and Yanan Sun},
url = {https://doi.org/10.48550/arXiv.2210.05397},
doi = {10.48550/arXiv.2210.05397},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2210.05397},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Marchisio, Alberto; Mrazek, Vojtech; Massa, Andrea; Bussolino, Beatrice; Martina, Maurizio; Shafique, Muhammad
In: IEEE Access, vol. 10, pp. 109043–109055, 2022.
@article{DBLP:journals/access/MarchisioMMBMS22,
title = {RoHNAS: A Neural Architecture Search Framework With Conjoint Optimization for Adversarial Robustness and Hardware Efficiency of Convolutional and Capsule Networks},
author = {Alberto Marchisio and Vojtech Mrazek and Andrea Massa and Beatrice Bussolino and Maurizio Martina and Muhammad Shafique},
url = {https://doi.org/10.1109/ACCESS.2022.3214312},
doi = {10.1109/ACCESS.2022.3214312},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Access},
volume = {10},
pages = {109043--109055},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zhou, Yuwei; Wang, Xin; Chen, Hong; Duan, Xuguang; Guan, Chaoyu; Zhu, Wenwu
Curriculum-NAS: Curriculum Weight-Sharing Neural Architecture Search Proceedings Article
In: Proceedings of the 30th ACM International Conference on Multimedia, pp. 6792–6801, Association for Computing Machinery, Lisboa, Portugal, 2022, ISBN: 9781450392037.
@inproceedings{10.1145/3503161.3548271,
title = {Curriculum-NAS: Curriculum Weight-Sharing Neural Architecture Search},
author = {Yuwei Zhou and Xin Wang and Hong Chen and Xuguang Duan and Chaoyu Guan and Wenwu Zhu},
url = {https://doi.org/10.1145/3503161.3548271},
doi = {10.1145/3503161.3548271},
isbn = {9781450392037},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of the 30th ACM International Conference on Multimedia},
pages = {6792–6801},
publisher = {Association for Computing Machinery},
address = {Lisboa, Portugal},
series = {MM '22},
abstract = {Neural Architecture Search (NAS) is an effective way to automatically design neural architectures for various multimedia applications. Weight-sharing, as one of the most popular NAS strategies, has been widely adopted due to its search efficiency. Existing weight-sharing NAS methods overlook the influence of data distribution and treat each data sample equally. Contrastively, in this paper, we empirically discover that different data samples have different influences on architectures, e.g., some data samples are easy to fit by certain architectures but hard by others. Hence, there exist architectures with better performances on early data samples being more likely to be discovered in the whole NAS searching process, which leads to a suboptimal searching result. To tackle this problem, we propose Curriculum-NAS, a curriculum training framework on weight-sharing NAS, which dynamically changes the training data weights during the searching process. In particular, Curriculum-NAS utilizes the multiple subnets included in weight-sharing NAS to jointly assess data uncertainty, which serves as the difficulty criterion in a curriculum manner, so that the potentially optimal architectures can obtain higher probability of being fully trained and discovered. Extensive experiments on several image and text datasets demonstrate that our Curriculum-NAS can bring consistent improvement over existing weight-sharing NAS. The code is available online at https://github.com/zhouyw16/curriculum-nas.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bakhtiarifard, Pedram; Igel, Christian; Selvan, Raghavendra
Energy Consumption-Aware Tabular Benchmarks for Neural Architecture Search Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2210-06015,
title = {Energy Consumption-Aware Tabular Benchmarks for Neural Architecture Search},
author = {Pedram Bakhtiarifard and Christian Igel and Raghavendra Selvan},
url = {https://doi.org/10.48550/arXiv.2210.06015},
doi = {10.48550/arXiv.2210.06015},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2210.06015},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Premchandar, Sumegha; Madireddy, Sandeep; Jantre, Sanket; Balaprakash, Prasanna
Unified Probabilistic Neural Architecture and Weight Ensembling Improves Model Robustness Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2210-04083,
title = {Unified Probabilistic Neural Architecture and Weight Ensembling Improves Model Robustness},
author = {Sumegha Premchandar and Sandeep Madireddy and Sanket Jantre and Prasanna Balaprakash},
url = {https://doi.org/10.48550/arXiv.2210.04083},
doi = {10.48550/arXiv.2210.04083},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2210.04083},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Guo, Bicheng; Chen, Tao; He, Shibo; Liu, Haoyu; Xu, Lilin; Ye, Peng; Chen, Jiming
Generalized Global Ranking-Aware Neural Architecture Ranker for Efficient Image Classifier Search Proceedings Article
In: Proceedings of the 30th ACM International Conference on Multimedia, pp. 3730–3741, Association for Computing Machinery, Lisboa, Portugal, 2022, ISBN: 9781450392037.
@inproceedings{10.1145/3503161.3548149,
title = {Generalized Global Ranking-Aware Neural Architecture Ranker for Efficient Image Classifier Search},
author = {Bicheng Guo and Tao Chen and Shibo He and Haoyu Liu and Lilin Xu and Peng Ye and Jiming Chen},
url = {https://doi.org/10.1145/3503161.3548149},
doi = {10.1145/3503161.3548149},
isbn = {9781450392037},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of the 30th ACM International Conference on Multimedia},
pages = {3730–3741},
publisher = {Association for Computing Machinery},
address = {Lisboa, Portugal},
series = {MM '22},
abstract = {Neural Architecture Search (NAS) is a powerful tool for automating effective image processing DNN designing. The ranking has been advocated to design an efficient performance predictor for NAS. The previous contrastive method solves the ranking problem by comparing pairs of architectures and predicting their relative performance. However, it only focuses on the rankings between two involved architectures and neglects the overall quality distributions of the search space, which may suffer generalization issues. A predictor, namely Neural Architecture Ranker (NAR) which concentrates on the global quality tier of specific architecture, is proposed to tackle such problems caused by the local perspective. The NAR explores the quality tiers of the search space globally and classifies each individual to the tier they belong to according to its global ranking. Thus, the predictor gains the knowledge of the performance distributions of the search space which helps to generalize its ranking ability to the datasets more easily. Meanwhile, the global quality distribution facilitates the search phase by directly sampling candidates according to the statistics of quality tiers, which is free of training a search algorithm, e.g., Reinforcement Learning (RL) or Evolutionary Algorithm (EA), thus it simplifies the NAS pipeline and saves the computational overheads. The proposed NAR achieves better performance than the state-of-the-art methods on two widely used datasets for NAS research. On the vast search space of NAS-Bench-101, the NAR easily finds the architecture with top 0.01 performance only by sampling. It also generalizes well to different image datasets of NAS-Bench-201, i.e., CIFAR-10, CIFAR-100, and ImageNet-16-120 by identifying the optimal architectures for each of them.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Chenxi; Leng, Zhaoqi; Sun, Pei; Cheng, Shuyang; Qi, Charles R.; Zhou, Yin; Tan, Mingxing; Anguelov, Dragomir
LidarNAS: Unifying and Searching Neural Architectures for 3D Point Clouds Proceedings Article
In: Avidan, Shai; Brostow, Gabriel J.; Cissé, Moustapha; Farinella, Giovanni Maria; Hassner, Tal (Ed.): Computer Vision - ECCV 2022 - 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XXI, pp. 158–175, Springer, 2022.
@inproceedings{DBLP:conf/eccv/LiuLSCQZTA22,
title = {LidarNAS: Unifying and Searching Neural Architectures for 3D Point Clouds},
author = {Chenxi Liu and Zhaoqi Leng and Pei Sun and Shuyang Cheng and Charles R. Qi and Yin Zhou and Mingxing Tan and Dragomir Anguelov},
editor = {Shai Avidan and Gabriel J. Brostow and Moustapha Cissé and Giovanni Maria Farinella and Tal Hassner},
url = {https://doi.org/10.1007/978-3-031-19803-8_10},
doi = {10.1007/978-3-031-19803-8_10},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Computer Vision - ECCV 2022 - 17th European Conference, Tel Aviv,
Israel, October 23-27, 2022, Proceedings, Part XXI},
volume = {13681},
pages = {158--175},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Maile, Kaitlin; Wilson, Dennis G.; Forré, Patrick
Architectural Optimization over Subgroups for Equivariant Neural Networks Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2210-05484,
title = {Architectural Optimization over Subgroups for Equivariant Neural Networks},
author = {Kaitlin Maile and Dennis G. Wilson and Patrick Forré},
url = {https://doi.org/10.48550/arXiv.2210.05484},
doi = {10.48550/arXiv.2210.05484},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2210.05484},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Lin, Haojia; Li, Lijiang; Zheng, Xiawu; Chao, Fei; Ji, Rongrong
Searching Lightweight Neural Network for Image Signal Processing Proceedings Article
In: Proceedings of the 30th ACM International Conference on Multimedia, pp. 2825–2833, Association for Computing Machinery, Lisboa, Portugal, 2022, ISBN: 9781450392037.
@inproceedings{10.1145/3503161.3548288,
title = {Searching Lightweight Neural Network for Image Signal Processing},
author = {Haojia Lin and Lijiang Li and Xiawu Zheng and Fei Chao and Rongrong Ji},
url = {https://doi.org/10.1145/3503161.3548288},
doi = {10.1145/3503161.3548288},
isbn = {9781450392037},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of the 30th ACM International Conference on Multimedia},
pages = {2825–2833},
publisher = {Association for Computing Machinery},
address = {Lisboa, Portugal},
series = {MM '22},
abstract = {Recently, it has been shown that the traditional Image Signal Processing (ISP) can be replaced by deep neural networks due to their superior performance. However, most of these networks require heavy computation burden and thus are far from sufficient to be deployed on resource-limited platforms, including but not limited to mobile devices and FPGA. To tackle this challenge, we propose an automated search framework that derives ISP models with high image quality while satisfying the low-computation requirement. To reduce the search cost, we adopt the weight-sharing strategy by introducing a supernet and decouple the architecture search into two stages, supernet training and hard-aware evolutionary search. With the proposed framework, we can train the ISP model once and quickly find high-performance but low-computation models on multiple devices. Experiments demonstrate that the searched ISP models have an excellent trade-off between image quality and model complexity, i.e., achieve compelling reconstruction quality with more than 90% reduction in FLOPs as compared to the state-of-the-art networks.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hosseini, Ramtin; Xie, Pengtao
Image Understanding by Captioning with Differentiable Architecture Search Proceedings Article
In: Proceedings of the 30th ACM International Conference on Multimedia, pp. 4665–4673, Association for Computing Machinery, Lisboa, Portugal, 2022, ISBN: 9781450392037.
@inproceedings{10.1145/3503161.3548150,
title = {Image Understanding by Captioning with Differentiable Architecture Search},
author = {Ramtin Hosseini and Pengtao Xie},
url = {https://doi.org/10.1145/3503161.3548150},
doi = {10.1145/3503161.3548150},
isbn = {9781450392037},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of the 30th ACM International Conference on Multimedia},
pages = {4665–4673},
publisher = {Association for Computing Machinery},
address = {Lisboa, Portugal},
series = {MM '22},
abstract = {In deep learning applications, image understanding is a crucial task, where several techniques such as image captioning and visual question answering have been widely studied to improve and evaluate the performances of deep neural networks (DNN) in this area. In image captioning, models have encoder-decoder architectures, where the encoders take the input images, produce embeddings, and feed them into the decoders to generate textual descriptions. Designing a proper image captioning encoder-decoder architecture manually is a difficult challenge due to the complexity of recognizing the critical objects of the input images and their relationships to generate caption descriptions. To address this issue, we propose a three-level optimization method that employs differentiable architecture search strategies to seek the most suitable architecture for image captioning automatically. Our optimization framework involves three stages, which are performed end-to-end. In the first stage, an image captioning model learns and updates the weights of its encoder and decoder to create image captions. At the next stage, the trained encoder-decoder generates a pseudo image captioning dataset from unlabeled images, and the predictive model trains on the generated dataset to update its weights. Finally, the trained model validates its performance on the validation set and updates the encoder-decoder architecture by minimizing the validation loss. Experiments and studies on the COCO image captions datasets demonstrate that our method performs significantly better than the baselines and can achieve state-of-the-art results in image understanding tasks.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wen, Long; Gao, Liang; Li, Xinyu; Li, Hui
A new genetic algorithm based evolutionary neural architecture search for image classification Journal Article
In: Swarm and Evolutionary Computation, vol. 75, pp. 101191, 2022, ISSN: 2210-6502.
@article{WEN2022101191,
title = {A new genetic algorithm based evolutionary neural architecture search for image classification},
author = {Long Wen and Liang Gao and Xinyu Li and Hui Li},
url = {https://www.sciencedirect.com/science/article/pii/S2210650222001547},
doi = {https://doi.org/10.1016/j.swevo.2022.101191},
issn = {2210-6502},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Swarm and Evolutionary Computation},
volume = {75},
pages = {101191},
abstract = {Deep Learning (DL) has achieved the great breakthrough in image classification. As DL structure is problem-dependent and it has the crucial impact on its performance, it is still necessary to re-design the structures of DL according to the actual needs, even there exists various benchmark DL structures. Neural Architecture Search (NAS) which can design the DL network automatically has been widely investigated. However, many NAS methods suffer from the huge computation time. To overcome this drawback, this research proposed a new Evolutionary Neural Architecture Search with RepVGG nodes (EvoNAS-Rep). Firstly, a new encoding strategy is developed, which can map the fixed-length encoding individual to DL structure with variable depth using RepVGG nodes. Secondly, Genetic Algorithm (GA) is adopted for searching the optimal individual and its corresponding DL model. Thirdly, the iterative training process is designed to train the DL model and to evolve the GA simultaneously. The proposed EvoNAS-Rep is validated on the famous CIFAR 10 and CIFAR 100. The results show that EvoNAS-Rep has obtained 96.35% and 79.82% with only near 0.2 GPU days, which is both effectiveness and efficiency. EvoNAS-Rep is also validated on two real-world applications, including the NEU-CLS and the Chest XRay 2017 datasets, and the results show that EvoNAS-Rep has achieved the competitive results.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Li, Yifan; Liu, Jing; Teng, Yingzhi
A decomposition-based memetic neural architecture search algorithm for univariate time series forecasting Journal Article
In: Applied Soft Computing, vol. 130, pp. 109714, 2022, ISSN: 1568-4946.
@article{LI2022109714,
title = {A decomposition-based memetic neural architecture search algorithm for univariate time series forecasting},
author = {Yifan Li and Jing Liu and Yingzhi Teng},
url = {https://www.sciencedirect.com/science/article/pii/S1568494622007633},
doi = {https://doi.org/10.1016/j.asoc.2022.109714},
issn = {1568-4946},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Applied Soft Computing},
volume = {130},
pages = {109714},
abstract = {Although deep learning has made remarkable progress in time series forecasting, enormous hyperparameters consume a lot of effort to tune. Moreover, to further build the forecasting models with better performance, time series decomposition is usually adopted to mine implicit patterns of the data. Inspired by the time series decomposition, automatically searching for a network architecture after decomposing the time series is proposed. The searching process is non-trivial and has two key challenges: 1) impairment of time series information after decomposing and 2) enlarged search space caused by the huge parameters to be optimized. In this paper, a decomposition-based memetic neural architecture search algorithm is proposed for univariate time series forecasting to address these two challenges. For the first challenge, a general univariate time series forecasting paradigm is designed as the building pipeline of the individual in the proposed algorithm, which considers both the decomposed components and the original series as the compensation information to improve the network representation ability. For the second challenge, with the intrinsic property of representation of individuals in mind, we design a decomposition-based memetic algorithm with a discriminative local search operator to automatically optimize the network configurations. The experimental results on nine benchmarks with four horizons and one application of remaining useful forecasting demonstrate that the discovered architectures by the proposed algorithm achieve competitive performance compared with six methods under aligned settings. Codes and models will be released in https://github.com/EavanLi/dMA-NAS-UTSF.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Yang, Fan; Li, Xin; Shen, Jianbing
Nested Architecture Search for Point Cloud Semantic Segmentation Journal Article
In: IEEE Transactions on Image Processing, pp. 1-1, 2022.
@article{9919408,
title = {Nested Architecture Search for Point Cloud Semantic Segmentation},
author = {Fan Yang and Xin Li and Jianbing Shen},
url = {https://ieeexplore.ieee.org/abstract/document/9919408},
doi = {10.1109/TIP.2022.3147983},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Transactions on Image Processing},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Shu, Yao; Chen, Yizhou; Dai, Zhongxiang; Low, Bryan Kian Hsiang
Neural ensemble search via Bayesian sampling Proceedings Article
In: Cussens, James; Zhang, Kun (Ed.): Uncertainty in Artificial Intelligence, Proceedings of the Thirty-Eighth Conference on Uncertainty in Artificial Intelligence, UAI 2022, 1-5 August 2022, Eindhoven, The Netherlands, pp. 1803–1812, PMLR, 2022.
@inproceedings{DBLP:conf/uai/ShuCDL22,
title = {Neural ensemble search via Bayesian sampling},
author = {Yao Shu and Yizhou Chen and Zhongxiang Dai and Bryan Kian Hsiang Low},
editor = {James Cussens and Kun Zhang},
url = {https://proceedings.mlr.press/v180/shu22a.html},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Uncertainty in Artificial Intelligence, Proceedings of the Thirty-Eighth
Conference on Uncertainty in Artificial Intelligence, UAI 2022,
1-5 August 2022, Eindhoven, The Netherlands},
volume = {180},
pages = {1803--1812},
publisher = {PMLR},
series = {Proceedings of Machine Learning Research},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jawahar, Ganesh; Mukherjee, Subhabrata; Liu, Xiaodong; Kim, Young Jin; Abdul-Mageed, Muhammad; Lakshmanan, Laks V. S.; Awadallah, Ahmed Hassan; Bubeck, Sébastien; Gao, Jianfeng
AutoMoE: Neural Architecture Search for Efficient Sparsely Activated Transformers Journal Article
In: CoRR, vol. abs/2210.07535, 2022.
@article{DBLP:journals/corr/abs-2210-07535,
title = {AutoMoE: Neural Architecture Search for Efficient Sparsely Activated Transformers},
author = {Ganesh Jawahar and Subhabrata Mukherjee and Xiaodong Liu and Young Jin Kim and Muhammad Abdul-Mageed and Laks V. S. Lakshmanan and Ahmed Hassan Awadallah and Sébastien Bubeck and Jianfeng Gao},
url = {https://doi.org/10.48550/arXiv.2210.07535},
doi = {10.48550/arXiv.2210.07535},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2210.07535},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chen, Hongjiang; Wang, Yang; Liu, Leibo; Wei, Shaojun; Yin, Shouyi
HQNAS: Auto CNN deployment framework for joint quantization and architecture search Journal Article
In: CoRR, vol. abs/2210.08485, 2022.
@article{DBLP:journals/corr/abs-2210-08485,
title = {HQNAS: Auto CNN deployment framework for joint quantization and architecture search},
author = {Hongjiang Chen and Yang Wang and Leibo Liu and Shaojun Wei and Shouyi Yin},
url = {https://doi.org/10.48550/arXiv.2210.08485},
doi = {10.48550/arXiv.2210.08485},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2210.08485},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Guo, Yong; Chen, Yaofo; Zheng, Yin; Chen, Qi; Zhao, Peilin; Chen, Jian; Huang, Junzhou; Tan, Mingkui
Pareto-aware Neural Architecture Generation for Diverse Computational Budgets Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2210-07634,
title = {Pareto-aware Neural Architecture Generation for Diverse Computational Budgets},
author = {Yong Guo and Yaofo Chen and Yin Zheng and Qi Chen and Peilin Zhao and Jian Chen and Junzhou Huang and Mingkui Tan},
url = {https://doi.org/10.48550/arXiv.2210.07634},
doi = {10.48550/arXiv.2210.07634},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2210.07634},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Chen, Hongjiang; Wang, Yang; Liu, Leibo; Wei, Shaojun; Yin, Shouyi
FAQS: Communication-efficient Federate DNN Architecture and Quantization Co-Search for personalized Hardware-aware Preferences Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2210-08450,
title = {FAQS: Communication-efficient Federate DNN Architecture and Quantization Co-Search for personalized Hardware-aware Preferences},
author = {Hongjiang Chen and Yang Wang and Leibo Liu and Shaojun Wei and Shouyi Yin},
url = {https://doi.org/10.48550/arXiv.2210.08450},
doi = {10.48550/arXiv.2210.08450},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2210.08450},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Li, Fuxian; Yan, Huan; Jin, Guangyin; Liu, Yue; Li, Yong; Jin, Depeng
Automated Spatio-Temporal Synchronous Modeling with Multiple Graphs for Traffic Prediction Technical Report
2022.
@techreport{DBLP:conf/cikm/LiYJLLJ22,
title = {Automated Spatio-Temporal Synchronous Modeling with Multiple Graphs for Traffic Prediction},
author = {Fuxian Li and Huan Yan and Guangyin Jin and Yue Liu and Yong Li and Depeng Jin},
editor = {Mohammad Al Hasan and Li Xiong},
url = {https://doi.org/10.1145/3511808.3557243},
doi = {10.1145/3511808.3557243},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of the 31st ACM International Conference on Information
& Knowledge Management, Atlanta, GA, USA, October 17-21, 2022},
pages = {1084--1093},
publisher = {ACM},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Mao, Wei; Dai, Liuyao; Li, Kai; Cheng, Quan; Wang, Yuhang; Du, Laimin; Luo, Shaobo; Huang, Mingqiang; Yu, Hao
An Energy-Efficient Mixed-Bitwidth Systolic Accelerator for NAS-Optimized Deep Neural Networks Journal Article
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems, pp. 1-13, 2022.
@article{9920733,
title = {An Energy-Efficient Mixed-Bitwidth Systolic Accelerator for NAS-Optimized Deep Neural Networks},
author = {Wei Mao and Liuyao Dai and Kai Li and Quan Cheng and Yuhang Wang and Laimin Du and Shaobo Luo and Mingqiang Huang and Hao Yu},
url = {https://ieeexplore.ieee.org/abstract/document/9920733},
doi = {10.1109/TVLSI.2022.3210069},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Transactions on Very Large Scale Integration (VLSI) Systems},
pages = {1-13},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Movahedi, Sajad; Adabinejad, Melika; Imani, Ayyoob; Keshavarz, Arezou; Dehghani, Mostafa; Shakery, Azadeh; Araabi, Babak Nadjar
(Łambda)-DARTS: Mitigating Performance Collapse by Harmonizing Operation Selection among Cells Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2210-07998,
title = {(Łambda)-DARTS: Mitigating Performance Collapse by Harmonizing Operation Selection among Cells},
author = {Sajad Movahedi and Melika Adabinejad and Ayyoob Imani and Arezou Keshavarz and Mostafa Dehghani and Azadeh Shakery and Babak Nadjar Araabi},
url = {https://doi.org/10.48550/arXiv.2210.07998},
doi = {10.48550/arXiv.2210.07998},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2210.07998},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Hoang, Duc; Wang, Haotao; Zhao, Handong; Rossi, Ryan; Kim, Sungchul; Mahadik, Kanak; Wang, Zhangyang
AutoMARS: Searching to Compress Multi-Modality Recommendation Systems Proceedings Article
In: Proceedings of the 31st ACM International Conference on Information & Knowledge Management, pp. 727–736, Association for Computing Machinery, Atlanta, GA, USA, 2022, ISBN: 9781450392365.
@inproceedings{10.1145/3511808.3557242,
title = {AutoMARS: Searching to Compress Multi-Modality Recommendation Systems},
author = {Duc Hoang and Haotao Wang and Handong Zhao and Ryan Rossi and Sungchul Kim and Kanak Mahadik and Zhangyang Wang},
url = {https://doi.org/10.1145/3511808.3557242},
doi = {10.1145/3511808.3557242},
isbn = {9781450392365},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of the 31st ACM International Conference on Information & Knowledge Management},
pages = {727–736},
publisher = {Association for Computing Machinery},
address = {Atlanta, GA, USA},
series = {CIKM '22},
abstract = {Web applications utilize Recommendation Systems (RS) to address the problem of consumer over-choices. Recent works have taken advantage of multi-modality or multi-view, input information (such as user interaction, images, texts, rating scores) to boost recommendation system performance compared with using single-modality information. However, the use of multi-modality input demands much higher computational cost and storage capacity. On the other hand, the real-world RS services usually have strict budgets on both time and space for a good customer experience. As a result, the model efficiency of multi-modality recommendation systems has gained increasing importance. While unfortunately, to the best of our knowledge, there is no existing study of a generic compression framework for multi-modality RS. In this paper, we investigate, for the first time, how to compress a multi-modality recommendation system with a fixed budget. Assuming that input information from different modalities are of unequal importance, a good compression algorithm should learn to automatically allocate different resource budgets to each input, based on their importance in maximally preserving recommendation efficacy. To this end, we leverage the tools of neural architecture search (NAS) and distillation and propose Auto Multi-modAlity Recommendation System (AutoMARS), a unified modality-aware model compression framework dedicated to multi-modality recommendation systems. We demonstrate the effectiveness and generality of AutoMARS by testing it on three different Amazon datasets of various sparsity. AutoMARS demonstrates superior multi-modality compression performance than previous state-of-the-art compression methods. For example on the Amazon Beauty dataset, we achieve on average a 20% higher accuracy over previous state-of-the-art methods, while enjoying 65% reduction over baselines. Codes are available at: https://github.com/VITA-Group/AutoMARS.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hoang, Duc; Wang, Haotao; Zhao, Handong; Rossi, Ryan; Kim, Sungchul; Mahadik, Kanak; Wang, Zhangyang
AutoMARS: Searching to Compress Multi-Modality Recommendation Systems Proceedings Article
In: Proceedings of the 31st ACM International Conference on Information & Knowledge Management, pp. 727–736, Association for Computing Machinery, Atlanta, GA, USA, 2022, ISBN: 9781450392365.
@inproceedings{10.1145/3511808.3557242b,
title = {AutoMARS: Searching to Compress Multi-Modality Recommendation Systems},
author = {Duc Hoang and Haotao Wang and Handong Zhao and Ryan Rossi and Sungchul Kim and Kanak Mahadik and Zhangyang Wang},
url = {https://doi.org/10.1145/3511808.3557242},
doi = {10.1145/3511808.3557242},
isbn = {9781450392365},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of the 31st ACM International Conference on Information & Knowledge Management},
pages = {727–736},
publisher = {Association for Computing Machinery},
address = {Atlanta, GA, USA},
series = {CIKM '22},
abstract = {Web applications utilize Recommendation Systems (RS) to address the problem of consumer over-choices. Recent works have taken advantage of multi-modality or multi-view, input information (such as user interaction, images, texts, rating scores) to boost recommendation system performance compared with using single-modality information. However, the use of multi-modality input demands much higher computational cost and storage capacity. On the other hand, the real-world RS services usually have strict budgets on both time and space for a good customer experience. As a result, the model efficiency of multi-modality recommendation systems has gained increasing importance. While unfortunately, to the best of our knowledge, there is no existing study of a generic compression framework for multi-modality RS. In this paper, we investigate, for the first time, how to compress a multi-modality recommendation system with a fixed budget. Assuming that input information from different modalities are of unequal importance, a good compression algorithm should learn to automatically allocate different resource budgets to each input, based on their importance in maximally preserving recommendation efficacy. To this end, we leverage the tools of neural architecture search (NAS) and distillation and propose Auto Multi-modAlity Recommendation System (AutoMARS), a unified modality-aware model compression framework dedicated to multi-modality recommendation systems. We demonstrate the effectiveness and generality of AutoMARS by testing it on three different Amazon datasets of various sparsity. AutoMARS demonstrates superior multi-modality compression performance than previous state-of-the-art compression methods. For example on the Amazon Beauty dataset, we achieve on average a 20% higher accuracy over previous state-of-the-art methods, while enjoying 65% reduction over baselines. Codes are available at: https://github.com/VITA-Group/AutoMARS.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Zhaozhi; Su, Kefan; Zhang, Jian; Jia, Huizhu; Ye, Qixiang; Xie, Xiaodong; Lu, Zongqing
Multi-Agent Automated Machine Learning Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2210-09084,
title = {Multi-Agent Automated Machine Learning},
author = {Zhaozhi Wang and Kefan Su and Jian Zhang and Huizhu Jia and Qixiang Ye and Xiaodong Xie and Zongqing Lu},
url = {https://doi.org/10.48550/arXiv.2210.09084},
doi = {10.48550/arXiv.2210.09084},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2210.09084},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Sun, Jia; Li, Yanfeng; Chen, Houjin; Peng, Yahui
A Person Re-Identification Baseline Based on Attention Block Neural Architecture Search Proceedings Article
In: 2022 IEEE International Conference on Image Processing (ICIP), pp. 841-845, 2022.
@inproceedings{9897906,
title = {A Person Re-Identification Baseline Based on Attention Block Neural Architecture Search},
author = {Jia Sun and Yanfeng Li and Houjin Chen and Yahui Peng},
url = {https://ieeexplore.ieee.org/abstract/document/9897906},
doi = {10.1109/ICIP46576.2022.9897906},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {2022 IEEE International Conference on Image Processing (ICIP)},
pages = {841-845},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chau, Thomas Chun-Pong; Dudziak, Lukasz; Wen, Hongkai; Lane, Nicholas Donald; Abdelfattah, Mohamed S.
BLOX: Macro Neural Architecture Search Benchmark and Algorithms Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2210-07271,
title = {BLOX: Macro Neural Architecture Search Benchmark and Algorithms},
author = {Thomas Chun-Pong Chau and Lukasz Dudziak and Hongkai Wen and Nicholas Donald Lane and Mohamed S. Abdelfattah},
url = {https://doi.org/10.48550/arXiv.2210.07271},
doi = {10.48550/arXiv.2210.07271},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2210.07271},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Cai, He; Zhang, Zhaokai; Feng, Tianpeng; Guo, Yandong
DARTS-PD: Differentiable Architecture Search with Path-Wise Weight Sharing Derivation Proceedings Article
In: 2022 IEEE International Conference on Image Processing (ICIP), pp. 1256-1260, 2022.
@inproceedings{9897275,
title = {DARTS-PD: Differentiable Architecture Search with Path-Wise Weight Sharing Derivation},
author = {He Cai and Zhaokai Zhang and Tianpeng Feng and Yandong Guo},
url = {https://ieeexplore.ieee.org/abstract/document/9897275},
doi = {10.1109/ICIP46576.2022.9897275},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {2022 IEEE International Conference on Image Processing (ICIP)},
pages = {1256-1260},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Sukthanker, Rhea; Dooley, Samuel; Dickerson, John P.; White, Colin; Hutter, Frank; Goldblum, Micah
On the Importance of Architectures and Hyperparameters for Fairness in Face Recognition Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2210-09943,
title = {On the Importance of Architectures and Hyperparameters for Fairness in Face Recognition},
author = {Rhea Sukthanker and Samuel Dooley and John P. Dickerson and Colin White and Frank Hutter and Micah Goldblum},
url = {https://doi.org/10.48550/arXiv.2210.09943},
doi = {10.48550/arXiv.2210.09943},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2210.09943},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Li, Yuhong; Li, Jiajie; Han, Cong; Li, Pan; Xiong, Jinjun; Chen, Deming
Extensible Proxy for Efficient NAS Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2210-09459,
title = {Extensible Proxy for Efficient NAS},
author = {Yuhong Li and Jiajie Li and Cong Han and Pan Li and Jinjun Xiong and Deming Chen},
url = {https://doi.org/10.48550/arXiv.2210.09459},
doi = {10.48550/arXiv.2210.09459},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2210.09459},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Shi, Huihong; You, Haoran; Zhao, Yang; Wang, Zhongfeng; Lin, Yingyan
NASA: Neural Architecture Search and Acceleration for Hardware Inspired Hybrid Networks Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2210-13361,
title = {NASA: Neural Architecture Search and Acceleration for Hardware Inspired Hybrid Networks},
author = {Huihong Shi and Haoran You and Yang Zhao and Zhongfeng Wang and Yingyan Lin},
url = {https://doi.org/10.48550/arXiv.2210.13361},
doi = {10.48550/arXiv.2210.13361},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2210.13361},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Bober-Irizar, Mikel; Shumailov, Ilia; Zhao, Yiren; Mullins, Robert; Papernot, Nicolas
Architectural Backdoors in Neural Networks Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2206-07840,
title = {Architectural Backdoors in Neural Networks},
author = {Mikel Bober-Irizar and Ilia Shumailov and Yiren Zhao and Robert Mullins and Nicolas Papernot},
url = {https://doi.org/10.48550/arXiv.2206.07840},
doi = {10.48550/arXiv.2206.07840},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2206.07840},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Koh, Edwin J. Y.; Amini, Eiman; Gaur, Shruti; Maquieira, Miguel Becerra; Heck, Christian Jara; McLachlan, Geoffrey J.; Beaton, Nick
In: Minerals Engineering, vol. 189, pp. 107886, 2022, ISSN: 0892-6875.
@article{KOH2022107886,
title = {An Automated Machine learning (AutoML) approach to regression models in minerals processing with case studies of developing industrial comminution and flotation models},
author = {Edwin J. Y. Koh and Eiman Amini and Shruti Gaur and Miguel Becerra Maquieira and Christian Jara Heck and Geoffrey J. McLachlan and Nick Beaton},
url = {https://www.sciencedirect.com/science/article/pii/S0892687522004964},
doi = {https://doi.org/10.1016/j.mineng.2022.107886},
issn = {0892-6875},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Minerals Engineering},
volume = {189},
pages = {107886},
abstract = {Deep learning (DL), a subset of machine learning (ML) has been a popular research interest after obtaining remarkable achievements on various tasks like image classification, object detection, language processing, and artificial intelligence. However, the successes of these algorithms were highly dependent on human expertise for hyperparameter optimisation and data preparation. As a result, widespread application of DL systems in minerals processing is still absent despite the increasing ability to collect data from process information (PI) and assay data. Automated Machine Learning (AutoML) is an emerging area of research which aims to automate the development of ready-to-use end-to-end ML models with little to no user ML knowledge. However, existing commercially available AutoML algorithms are not well designed for minerals processing data. In this study, we develop an AutoML algorithm to develop steady-state minerals processing models suitable for mine scheduling and process optimisation. The algorithm consists of data filtering, temporal resolution alignment, feature selection, neural network architecture search, and development. The AutoML algorithm was tested on three case studies of different processes and ore types. These case studies cover the range of difficulties of possible datasets encountered in the mining and processing industry from clean simulated data to noisy data with poor correlation. The algorithm successfully developed neural network models within hours from hourly raw PI and/or daily assay data with no human intervention. These models derived from process data have minimal errors as low as < 3 % for major valuables like Ni and Cu, 6–7 % for by-products like Au, 8–10 % for deleterious minerals like MgO, and 5–8 % for gangue. The algorithm was also designed so that expert minerals processing knowledge can influence the pipeline to improve the quality of models. As a result, the AutoML algorithm becomes a powerful tool for mining and mineral processing experts to apply their domain knowledge of the process to develop models of equipment or processing circuits.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Bitzer, Matthias; Meister, Mona; Zimmer, Christoph
Structural Kernel Search via Bayesian Optimization and Symbolical Optimal Transport Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2210-11836,
title = {Structural Kernel Search via Bayesian Optimization and Symbolical Optimal Transport},
author = {Matthias Bitzer and Mona Meister and Christoph Zimmer},
url = {https://doi.org/10.48550/arXiv.2210.11836},
doi = {10.48550/arXiv.2210.11836},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2210.11836},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Pujari, Keerthi Nagasree; Miriyala, Srinivas Soumitri; Mittal, Prateek; Mitra, Kishalay
Better Wind forecasting using Evolutionary Neural Architecture Search driven Green Deep Learning Journal Article
In: Expert Systems with Applications, pp. 119063, 2022, ISSN: 0957-4174.
@article{NAGASREEPUJARI2022119063,
title = {Better Wind forecasting using Evolutionary Neural Architecture Search driven Green Deep Learning},
author = {Keerthi Nagasree Pujari and Srinivas Soumitri Miriyala and Prateek Mittal and Kishalay Mitra},
url = {https://www.sciencedirect.com/science/article/pii/S0957417422020814},
doi = {https://doi.org/10.1016/j.eswa.2022.119063},
issn = {0957-4174},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Expert Systems with Applications},
pages = {119063},
abstract = {Climate Change heavily impacts global cities, the downsides of which can be minimized by adopting renewables like wind energy. However, despite its advantages, the nonlinear nature of wind renders the forecasting approaches to design and control wind farms ineffective. To expand the research horizon, the current study a) analyses and performs statistical decomposition of real-world wind time-series data, b) presents the application of Long Short-Term Memory (LSTM) networks, Nonlinear Auto-Regressive (NAR) models, and Wavelet Neural Networks (WNN) as efficient models for accurate wind forecasting with a comprehensive comparison among them to justify their application and c) proposes an evolutionary multi-objective strategy for Neural Architecture Search (NAS) to minimize the computational cost associated with training and inferring the networks which form the central theme of Green Deep Learning.Balancing the trade-off between parsimony and prediction accuracy, the proposed NAS strategy could optimally design NAR, WNN, and LSTM models with a mean test accuracy of 99%. The robust methodologies discussed in this work not only accurately model the wind behavior but also provide a green & generic approach for designing Deep Neural Networks.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zhang, Sheng; Guo, Lixiang; Fan, Jing; Zhang, Xin; Zhang, Weiming
Exploring neural architecture search for text classification Proceedings Article
In: Zhang, Tao (Ed.): 7th International Symposium on Advances in Electrical, Electronics, and Computer Engineering, pp. 122945T, International Society for Optics and Photonics SPIE, 2022.
@inproceedings{10.1117/12.2639851,
title = {Exploring neural architecture search for text classification},
author = {Sheng Zhang and Lixiang Guo and Jing Fan and Xin Zhang and Weiming Zhang},
editor = {Tao Zhang},
url = {https://doi.org/10.1117/12.2639851},
doi = {10.1117/12.2639851},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {7th International Symposium on Advances in Electrical, Electronics, and Computer Engineering},
volume = {12294},
pages = {122945T},
publisher = {SPIE},
organization = {International Society for Optics and Photonics},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ge, J.; Guo, D.; Ye, X.; Song, Y.; Hua, X.; Lu, L.; Lin, C. Y.; Jin, D.; Ho, T. Y.
In: International Journal of Radiation Oncology*Biology*Physics, vol. 114, no. 3, Supplement, pp. e583, 2022, ISSN: 0360-3016, (ASTRO Annual 2022 Meeting).
@article{GE2022e583,
title = {Dosimetry Validation Study for Automated Head and Neck Cancer Organs at Risk Segmentation Using Stratified Learning and Neural Architecture Search},
author = {J. Ge and D. Guo and X. Ye and Y. Song and X. Hua and L. Lu and C. Y. Lin and D. Jin and T. Y. Ho},
url = {https://www.sciencedirect.com/science/article/pii/S0360301622030115},
doi = {https://doi.org/10.1016/j.ijrobp.2022.07.2255},
issn = {0360-3016},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {International Journal of Radiation Oncology*Biology*Physics},
volume = {114},
number = {3, Supplement},
pages = {e583},
abstract = {Purpose/Objective(s)
Organs at risk (OARs) segmentation is an essential process in head and neck (H&N) cancer radiotherapy. We have reported high automated segmentation geometric accuracy of Stratified Learning and Neural Architecture Search method in terms of Dice Score (DSC) from our previous study. In this study, we would evaluate the dosimetric influence of our automated approach before integrating this method into the clinical workflow.
Materials/Methods
To measure the dosimetric effects brought by the OARs’ variance, the intensity-modulated radiotherapy (IMRT) dose plans of 10 head and neck cancer patients were replanned using the original tumor target volumes and three substitute OAR contours permutations (deep learning generated stratified organs at risk segmentation (SOARS), SOARS revised by physician (SOARS-revised), and OAR delineated from scratch by physician (human reader)). We further examined the clinical dosimetric accuracy and the clinical reference OAR contours were overlaid on top of each replanned dose grid to evaluate the dosimetric differences.
Results
After replanning, SOARS and SOARS-revised contours have slightly smaller Diff (max dose) as compared to human reader contours (3.4%, 3.5% vs. 4.1%). For the Diff (mean dose), human reader, SOARS, SOARS-revised achieves similar results, i.e., 5.3%, 5.0%, and 5.0%, respectively. However, more OARs from the human reader have dose variations larger than 10% or 20% as compared to SOARS and SOARS-revised. Overall, our results indicate that using OAR contours from human reader, SOARS, and SOARS-revised lead to generally comparable dose accuracy in clinical practice. SOAR-related OAR contours have fewer OARs with dose error larger than 10% or 20%.
Conclusion
This study further validates the clinical applicability of a deep learning based automated H&N OAR segmentation method by comparing dosimetry of plans using OAR contours generated automatically and by a human reader to the gold standard contours. The dose variations calculated after planning on automated segmentation contours are less than 5%. Our proposed automated H&N OAR segmentation method not only achieves high geometric accuracy but also helps deliver treatment beams with little variances.},
note = {ASTRO Annual 2022 Meeting},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Organs at risk (OARs) segmentation is an essential process in head and neck (H&N) cancer radiotherapy. We have reported high automated segmentation geometric accuracy of Stratified Learning and Neural Architecture Search method in terms of Dice Score (DSC) from our previous study. In this study, we would evaluate the dosimetric influence of our automated approach before integrating this method into the clinical workflow.
Materials/Methods
To measure the dosimetric effects brought by the OARs’ variance, the intensity-modulated radiotherapy (IMRT) dose plans of 10 head and neck cancer patients were replanned using the original tumor target volumes and three substitute OAR contours permutations (deep learning generated stratified organs at risk segmentation (SOARS), SOARS revised by physician (SOARS-revised), and OAR delineated from scratch by physician (human reader)). We further examined the clinical dosimetric accuracy and the clinical reference OAR contours were overlaid on top of each replanned dose grid to evaluate the dosimetric differences.
Results
After replanning, SOARS and SOARS-revised contours have slightly smaller Diff (max dose) as compared to human reader contours (3.4%, 3.5% vs. 4.1%). For the Diff (mean dose), human reader, SOARS, SOARS-revised achieves similar results, i.e., 5.3%, 5.0%, and 5.0%, respectively. However, more OARs from the human reader have dose variations larger than 10% or 20% as compared to SOARS and SOARS-revised. Overall, our results indicate that using OAR contours from human reader, SOARS, and SOARS-revised lead to generally comparable dose accuracy in clinical practice. SOAR-related OAR contours have fewer OARs with dose error larger than 10% or 20%.
Conclusion
This study further validates the clinical applicability of a deep learning based automated H&N OAR segmentation method by comparing dosimetry of plans using OAR contours generated automatically and by a human reader to the gold standard contours. The dose variations calculated after planning on automated segmentation contours are less than 5%. Our proposed automated H&N OAR segmentation method not only achieves high geometric accuracy but also helps deliver treatment beams with little variances.
Huang, Junhao; Xue, Bing; Sun, Yanan; Zhang, Mengjie; Yen, Gary G.
Particle Swarm Optimization for Compact Neural Architecture Search for Image Classification Journal Article
In: IEEE Transactions on Evolutionary Computation, pp. 1-1, 2022.
@article{9930866,
title = {Particle Swarm Optimization for Compact Neural Architecture Search for Image Classification},
author = {Junhao Huang and Bing Xue and Yanan Sun and Mengjie Zhang and Gary G. Yen},
url = {https://ieeexplore.ieee.org/abstract/document/9930866},
doi = {10.1109/TEVC.2022.3217290},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Transactions on Evolutionary Computation},
pages = {1-1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Liu, Guangyuan; Li, Yangyang; Chen, Yanqiao; Shang, Ronghua; Jiao, Licheng
Pol-NAS: A Neural Architecture Search Method With Feature Selection for PolSAR Image Classification Journal Article
In: IEEE J. Sel. Top. Appl. Earth Obs. Remote. Sens., vol. 15, pp. 9339–9354, 2022.
@article{DBLP:journals/staeors/LiuLCSJ22,
title = {Pol-NAS: A Neural Architecture Search Method With Feature Selection for PolSAR Image Classification},
author = {Guangyuan Liu and Yangyang Li and Yanqiao Chen and Ronghua Shang and Licheng Jiao},
url = {https://doi.org/10.1109/JSTARS.2022.3217047},
doi = {10.1109/JSTARS.2022.3217047},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE J. Sel. Top. Appl. Earth Obs. Remote. Sens.},
volume = {15},
pages = {9339--9354},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Shu, Xin; Niu, Mengxuan; Zhang, Yi; Zhou, Renjie
NAS-PRNet: Neural Architecture Search generated Phase Retrieval Net for Off-axis Quantitative Phase Imaging Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2210-14231,
title = {NAS-PRNet: Neural Architecture Search generated Phase Retrieval Net for Off-axis Quantitative Phase Imaging},
author = {Xin Shu and Mengxuan Niu and Yi Zhang and Renjie Zhou},
url = {https://doi.org/10.48550/arXiv.2210.14231},
doi = {10.48550/arXiv.2210.14231},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2210.14231},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Qiu, Xin; Miikkulainen, Risto
Shortest Edit Path Crossover: A Theory-driven Solution to the Permutation Problem in Evolutionary Neural Architecture Search Bachelor Thesis
2022.
@bachelorthesis{DBLP:journals/corr/abs-2210-14016,
title = {Shortest Edit Path Crossover: A Theory-driven Solution to the Permutation Problem in Evolutionary Neural Architecture Search},
author = {Xin Qiu and Risto Miikkulainen},
url = {https://doi.org/10.48550/arXiv.2210.14016},
doi = {10.48550/arXiv.2210.14016},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2210.14016},
keywords = {},
pubstate = {published},
tppubtype = {bachelorthesis}
}
Du, Mingyang; Zhong, Ping; Cai, Xiaohao; Bi, Daping; Li, Zhifei
Balanced neural architecture search and optimization for specific emitter identification Proceedings Article
In: 2022 IEEE 12th International Conference on RFID Technology and Applications (RFID-TA), pp. 220-223, 2022.
@inproceedings{9924146,
title = {Balanced neural architecture search and optimization for specific emitter identification},
author = {Mingyang Du and Ping Zhong and Xiaohao Cai and Daping Bi and Zhifei Li},
url = {https://ieeexplore.ieee.org/abstract/document/9924146},
doi = {10.1109/RFID-TA54958.2022.9924146},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {2022 IEEE 12th International Conference on RFID Technology and Applications (RFID-TA)},
pages = {220-223},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}