@article{MTMT:34687657, title = {Deep Reinforcement Learning combined with RRT for trajectory tracking of autonomous vehicles.}, url = {https://m2.mtmt.hu/api/publication/34687657}, author = {Kővári, Bálint and Angyal, Balint Gergo and Bécsi, Tamás}, doi = {10.1016/j.trpro.2024.02.032}, journal-iso = {TRANSP RES PROCEDIA}, journal = {TRANSPORTATION RESEARCH PROCEDIA}, volume = {78}, unique-id = {34687657}, issn = {2352-1465}, abstract = {Sample inefficiency is a long-standing problem in Deep Reinforcement Learning based algorithms, which shadows the potential of these techniques. So far, the primary approach for tackling this issue is prioritizing the gathered experiences. However, the strategy behind collecting the experiences received less atention, but it is also a legitimate approach for prioritizing. In this paper, the Rapidly exploring Random Trees algorithm and Deep Reinforcement Learning are combined for the trajectory tracking of autonomous vehicles to mitigate the issues regarding sample efficiency. The core of the concept is to utilize the tremendous explorational power of RRT for covering the state space via experiences for the Agent to diversify its training data buffer. The results demonstrate that this approach outperforms the classic trial-and-error-based concept according to several performance indicators. © 2024 The Authors. Published by ELSEVIER B.V.}, keywords = {reinforcement learning; Exploration-exploitation trade-off; RRT}, year = {2024}, eissn = {2352-1457}, pages = {246-253}, orcid-numbers = {Bécsi, Tamás/0000-0002-1487-9672} } @article{MTMT:34430090, title = {Multi-Agent Reinforcement Learning for Highway Platooning}, url = {https://m2.mtmt.hu/api/publication/34430090}, author = {Kolat, Máté and Bécsi, Tamás}, doi = {10.3390/electronics12244963}, journal = {ELECTRONICS (SWITZ)}, volume = {12}, unique-id = {34430090}, abstract = {The advent of autonomous vehicles has opened new horizons for transportation efficiency and safety. Platooning, a strategy where vehicles travel closely together in a synchronized manner, holds promise for reducing traffic congestion, lowering fuel consumption, and enhancing overall road safety. This article explores the application of Multi-Agent Reinforcement Learning (MARL) combined with Proximal Policy Optimization (PPO) to optimize autonomous vehicle platooning. We delve into the world of MARL, which empowers vehicles to communicate and collaborate, enabling real-time decision making in complex traffic scenarios. PPO, a cutting-edge reinforcement learning algorithm, ensures stable and efficient training for platooning agents. The synergy between MARL and PPO enables the development of intelligent platooning strategies that adapt dynamically to changing traffic conditions, minimize inter-vehicle gaps, and maximize road capacity. In addition to these insights, this article introduces a cooperative approach to Multi-Agent Reinforcement Learning (MARL), leveraging Proximal Policy Optimization (PPO) to further optimize autonomous vehicle platooning. This cooperative framework enhances the adaptability and efficiency of platooning strategies, marking a significant advancement in the pursuit of intelligent and responsive autonomous vehicle systems.}, year = {2023}, eissn = {2079-9292}, orcid-numbers = {Bécsi, Tamás/0000-0002-1487-9672} } @article{MTMT:34418334, title = {Enhanced Experience Prioritization: A Novel Upper Confidence Bound Approach}, url = {https://m2.mtmt.hu/api/publication/34418334}, author = {Kővári, Bálint and Pelenczei, Bálint and Bécsi, Tamás}, doi = {10.1109/ACCESS.2023.3339248}, journal-iso = {IEEE ACCESS}, journal = {IEEE ACCESS}, volume = {11}, unique-id = {34418334}, issn = {2169-3536}, year = {2023}, eissn = {2169-3536}, pages = {138488-138501}, orcid-numbers = {Bécsi, Tamás/0000-0002-1487-9672} } @article{MTMT:34224651, title = {On the relationship between the activity at point of interests and road traffic}, url = {https://m2.mtmt.hu/api/publication/34224651}, author = {Kolat, Máté and Tettamanti, Tamás and Bécsi, Tamás and Esztergár-Kiss, Domokos}, doi = {10.1016/j.commtr.2023.100102}, journal-iso = {COMM TRANSPORT RES}, journal = {COMMUNICATIONS IN TRANSPORTATION RESEARCH}, volume = {3}, unique-id = {34224651}, issn = {2772-4247}, year = {2023}, orcid-numbers = {Tettamanti, Tamás/0000-0002-8934-3653; Bécsi, Tamás/0000-0002-1487-9672; Esztergár-Kiss, Domokos/0000-0002-7424-4214} } @inproceedings{MTMT:34039486, title = {A Runtime-Efficient Multi-Object Tracking Approach for Automotive Perception Systems}, url = {https://m2.mtmt.hu/api/publication/34039486}, author = {Lindenmaier, László and Czibere, Balázs and Aradi, Szilárd and Bécsi, Tamás}, booktitle = {IEEE 17th International Symposium on Applied Computational Intelligence and Informatics SACI 2023 : Proceedings}, doi = {10.1109/SACI58269.2023.10158542}, unique-id = {34039486}, year = {2023}, pages = {000785-000792}, orcid-numbers = {Bécsi, Tamás/0000-0002-1487-9672} } @article{MTMT:34027317, title = {Object-Level Data-Driven Sensor Simulation for Automotive Environment Perception}, url = {https://m2.mtmt.hu/api/publication/34027317}, author = {Lindenmaier, László and Aradi, Szilárd and Bécsi, Tamás and Törő, Olivér and Gáspár, Péter}, doi = {10.1109/TIV.2023.3287278}, journal-iso = {IEEE Transactions on Intelligent Vehicles}, journal = {IEEE Transactions on Intelligent Vehicles}, volume = {8}, unique-id = {34027317}, issn = {2379-8858}, year = {2023}, eissn = {2379-8904}, pages = {4341-4356}, orcid-numbers = {Bécsi, Tamás/0000-0002-1487-9672; Törő, Olivér/0000-0002-7288-5229; Gáspár, Péter/0000-0003-3388-1724} } @inproceedings{MTMT:34012750, title = {A Point of Interest (POI) adatok és a közúti forgalom közötti összefüggés / Correlation between POI and road traffic data}, url = {https://m2.mtmt.hu/api/publication/34012750}, author = {Kolat, Máté and Esztergár-Kiss, Domokos and Tettamanti, Tamás and Bécsi, Tamás}, booktitle = {XIII. International Conference on Transport Sciences / XIII. Nemzetközi Közlekedéstudományi Konferencia, Győr}, unique-id = {34012750}, year = {2023}, pages = {293}, orcid-numbers = {Esztergár-Kiss, Domokos/0000-0002-7424-4214; Tettamanti, Tamás/0000-0002-8934-3653; Bécsi, Tamás/0000-0002-1487-9672} } @article{MTMT:33706849, title = {Traffic Signal Control with Successor Feature-Based Deep Reinforcement Learning Agent}, url = {https://m2.mtmt.hu/api/publication/33706849}, author = {Szőke, László and Aradi, Szilárd and Bécsi, Tamás}, doi = {10.3390/electronics12061442}, journal = {ELECTRONICS (SWITZ)}, volume = {12}, unique-id = {33706849}, abstract = {In this paper, we study the problem of traffic signal control in general intersections by applying a recent reinforcement learning technique. Nowadays, traffic congestion and road usage are increasing significantly as more and more vehicles enter the same infrastructures. New solutions are needed to minimize travel times or maximize the network capacity (throughput). Recent studies embrace machine learning approaches that have the power to aid and optimize the increasing demands. However, most reinforcement learning algorithms fail to be adaptive regarding goal functions. To this end, we provide a novel successor feature-based solution to control a single intersection to optimize the traffic flow, reduce the environmental impact, and promote sustainability. Our method allows for flexibility and adaptability to changing circumstances and goals. It supports changes in preferences during inference, so the behavior of the trained agent (traffic signal controller) can be changed rapidly during the inference time. By introducing the successor features to the domain, we define the basics of successor features, the base reward functions, and the goal preferences of the traffic signal control system. As our main direction, we tackle environmental impact reduction and support prioritized vehicles’ commutes. We include an evaluation of how our method achieves a more effective operation considering the environmental impact and how adaptive it is compared to a general Deep-Q-Network solution. Aside from this, standard rule-based and adaptive signal-controlling technologies are compared to our method to show its advances. Furthermore, we perform an ablation analysis on the adaptivity of the agent and demonstrate a consistent level of performance under similar circumstances.}, year = {2023}, eissn = {2079-9292}, orcid-numbers = {Szőke, László/0000-0001-9926-4054; Bécsi, Tamás/0000-0002-1487-9672} } @article{MTMT:33644779, title = {Multi-Agent Reinforcement Learning for Traffic Signal Control: A Cooperative Approach}, url = {https://m2.mtmt.hu/api/publication/33644779}, author = {Kolat, Máté and Kővári, Bálint and Bécsi, Tamás and Aradi, Szilárd}, doi = {10.3390/su15043479}, journal-iso = {SUSTAINABILITY-BASEL}, journal = {SUSTAINABILITY}, volume = {15}, unique-id = {33644779}, abstract = {The rapid growth of urbanization and the constant demand for mobility have put a great strain on transportation systems in cities. One of the major challenges in these areas is traffic congestion, particularly at signalized intersections. This problem not only leads to longer travel times for commuters, but also results in a significant increase in local and global emissions. The fixed cycle of traffic lights at these intersections is one of the primary reasons for this issue. To address these challenges, applying reinforcement learning to coordinating traffic light controllers has become a highly researched topic in the field of transportation engineering. This paper focuses on the traffic signal control problem, proposing a solution using a multi-agent deep Q-learning algorithm. This study introduces a novel rewarding concept in the multi-agent environment, as the reward schemes have yet to evolve in the following years with the advancement of techniques. The goal of this study is to manage traffic networks in a more efficient manner, taking into account both sustainability and classic measures. The results of this study indicate that the proposed approach can bring about significant improvements in transportation systems. For instance, the proposed approach can reduce fuel consumption by 11% and average travel time by 13%. The results of this study demonstrate the potential of reinforcement learning in improving the coordination of traffic light controllers and reducing the negative impacts of traffic congestion in urban areas. The implementation of this proposed solution could contribute to a more sustainable and efficient transportation system in the future.}, year = {2023}, eissn = {2071-1050}, orcid-numbers = {Bécsi, Tamás/0000-0002-1487-9672} } @article{MTMT:33313067, title = {Analytic solution of the exact Daum–Huang flow equation for particle filters}, url = {https://m2.mtmt.hu/api/publication/33313067}, author = {Törő, Olivér and Bécsi, Tamás}, doi = {10.1016/j.inffus.2022.11.027}, journal-iso = {INFORM FUSION}, journal = {INFORMATION FUSION}, volume = {92}, unique-id = {33313067}, issn = {1566-2535}, abstract = {State estimation for nonlinear systems, especially in high dimensions, is a generally intractable problem, despite the ever-increasing computing power. Efficient algorithms usually apply a finite-dimensional model for approximating the probability density of the state vector or treat the estimation problem numerically. In 2007 Daum and Huang introduced a novel particle filter approach that uses a homotopy-induced particle flow for the Bayesian update step. Multiple types of particle flows were derived since with different properties. The exact flow considered in this work is a first-order linear ordinary time-varying inhomogeneous differential equation for the particle motion. An analytic solution in the interval [0,1] is derived for the scalar measurement case, which enables significantly faster computation of the Bayesian update step for particle filters.}, keywords = {state estimation; Nonlinear filtering; Particle filter; Computer Science, Artificial Intelligence; particle flow}, year = {2023}, eissn = {1872-6305}, pages = {247-255}, orcid-numbers = {Törő, Olivér/0000-0002-7288-5229; Bécsi, Tamás/0000-0002-1487-9672} }