@article{MTMT:34760960, title = {Mi is valójában a mesterséges intelligencia?}, url = {https://m2.mtmt.hu/api/publication/34760960}, author = {Végh, János}, doi = {10.53793/RV.2024.1.2}, journal-iso = {RENDVÉDELEM}, journal = {RENDVÉDELEM TUDOMÁNYOS FOLYÓIRAT (ON-LINE)}, volume = {13}, unique-id = {34760960}, abstract = {A mesterséges intelligencia (MI) az emberek által végzett, nem rutinszerű tevékenységek ellátására készített számítógépes rendszerek neve, de ismertté szöveggenerálási képessége (LLM) révén vált. Meg kell értenünk alapfogalmait és működési elveit, valamint használatának következményeit energiafelhasználási, fenntarthatósági és környezetszennyezési szempontból is. Az MI lehetőségei messze állnak attól, amit feltételeznek, de veszélyei sem akkorák; feltéve, hogy megértjük, helyesen használjuk és szabályozzuk használatát.}, keywords = {Mesterséges intelligencia; generatív; ChatGPT}, year = {2024}, eissn = {2560-2349}, pages = {25-40} } @article{MTMT:33199928, title = {On the Role of Speed in Technological and Biological Information Transfer for Computations}, url = {https://m2.mtmt.hu/api/publication/33199928}, author = {Végh, János and Berki, Ádám József}, doi = {10.1007/s10441-022-09450-6}, journal-iso = {ACTA BIOTHEOR}, journal = {ACTA BIOTHEORETICA}, volume = {70}, unique-id = {33199928}, issn = {0001-5342}, abstract = {In all kinds of implementations of computing, whether technological or biological, some material carrier for the information exists, so in real-world implementations, the propagation speed of information cannot exceed the speed of its carrier. Because of this limitation, one must also consider the transfer time between computing units for any implementation. We need a different mathematical method to consider this limitation: classic mathematics can only describe infinitely fast and small computing system implementations. The difference between mathematical handling methods leads to different descriptions of the computing features of the systems. The proposed handling also explains why biological implementations can have lifelong learning and technological ones cannot. Our conclusion about learning matches published experimental evidence, both in biological and technological computing.}, year = {2022}, eissn = {1572-8358}, orcid-numbers = {Végh, János/0000-0002-3247-7810; Berki, Ádám József/0000-0001-7099-167X} } @article{MTMT:33095696, title = {Towards Generalizing the Information Theory for Neural Communication}, url = {https://m2.mtmt.hu/api/publication/33095696}, author = {Végh, János and Berki, Ádám József}, doi = {10.3390/e24081086}, journal-iso = {ENTROPY-SWITZ}, journal = {ENTROPY}, volume = {24}, unique-id = {33095696}, abstract = {Neuroscience extensively uses the information theory to describe neural communication, among others, to calculate the amount of information transferred in neural communication and to attempt the cracking of its coding. There are fierce debates on how information is represented in the brain and during transmission inside the brain. The neural information theory attempts to use the assumptions of electronic communication; despite the experimental evidence that the neural spikes carry information on non-discrete states, they have shallow communication speed, and the spikes' timing precision matters. Furthermore, in biology, the communication channel is active, which enforces an additional power bandwidth limitation to the neural information transfer. The paper revises the notions needed to describe information transfer in technical and biological communication systems. It argues that biology uses Shannon's idea outside of its range of validity and introduces an adequate interpretation of information. In addition, the presented time-aware approach to the information theory reveals pieces of evidence for the role of processes (as opposed to states) in neural operations. The generalized information theory describes both kinds of communication, and the classic theory is the particular case of the generalized theory.}, keywords = {INFORMATION THEORY; CAUSALITY; Neural computing; Information content; Neural information; neural communication; Neural Learning; time-aware computing; neural bandwidth; power bandwidth}, year = {2022}, eissn = {1099-4300}, orcid-numbers = {Berki, Ádám József/0000-0001-7099-167X} } @article{MTMT:32649571, title = {Why Learning and Machine Learning Are Different}, url = {https://m2.mtmt.hu/api/publication/32649571}, author = {Végh, János and Berki, Ádám József}, doi = {10.54364/AAIML.2021.1109}, journal-iso = {AAIML}, journal = {Advances in Artificial Intelligence and Machine Learning}, volume = {1}, unique-id = {32649571}, year = {2021}, eissn = {2582-9793}, pages = {136-154}, orcid-numbers = {Berki, Ádám József/0000-0001-7099-167X} } @article{MTMT:32649565, title = {Revising the Classic Computing Paradigm and Its Technological Implementations}, url = {https://m2.mtmt.hu/api/publication/32649565}, author = {Végh, János}, doi = {10.3390/informatics8040071}, journal-iso = {INFORMATICS-BASEL}, journal = {INFORMATICS (BASEL)}, volume = {8}, unique-id = {32649565}, year = {2021}, eissn = {2227-9709} } @article{MTMT:32234513, title = {Which scaling rule applies to large artificial neural networks. Technological limitations for biology-imitating computing}, url = {https://m2.mtmt.hu/api/publication/32234513}, author = {Végh, János}, doi = {10.1007/s00521-021-06456-y}, journal-iso = {NEURAL COMPUT APPL}, journal = {NEURAL COMPUTING & APPLICATIONS}, volume = {33}, unique-id = {32234513}, issn = {0941-0643}, abstract = {Experience shows that cooperating and communicating computing systems, comprising segregated single processors, have severe performance limitations, which cannot be explained using von Neumann’s classic computing paradigm. In his classic “First Draft,” he warned that using a “too fast processor” vitiates his simple “procedure” (but not his computing model!); furthermore, that using the classic computing paradigm for imitating neuronal operations is unsound. Amdahl added that large machines, comprising many processors, have an inherent disadvantage. Given that artificial neural network’s (ANN’s) components are heavily communicating with each other, they are built from a large number of components designed/fabricated for use in conventional computing, furthermore they attempt to mimic biological operation using improper technological solutions, and their achievable payload computing performance is conceptually modest. The type of workload that artificial intelligence-based systems generate leads to an exceptionally low payload computational performance, and their design/technology limits their size to just above the “toy” level systems: The scaling of processor-based ANN systems is strongly nonlinear. Given the proliferation and growing size of ANN systems, we suggest ideas to estimate in advance the efficiency of the device or application. The wealth of ANN implementations and the proprietary technical data do not enable more. Through analyzing published measurements, we provide evidence that the role of data transfer time drastically influences both ANNs performance and feasibility. It is discussed how some major theoretical limiting factors, ANN’s layer structure and their methods of technical implementation of communication affect their efficiency. The paper starts from von Neumann’s original model, without neglecting the transfer time apart from processing time, and derives an appropriate interpretation and handling for Amdahl’s law. It shows that, in that interpretation, Amdahl’s law correctly describes ANNs.}, keywords = {Artificial intelligence; Neural network; Temporal logic; Energy efficiency; Computing efficiency; Scaling rule}, year = {2021}, eissn = {1433-3058}, pages = {16847-16864} } @article{MTMT:31862730, title = {Why do we need to Introduce Temporal Behavior in Both Modern Science and Modern Computing, with an Outlook to Researching Modern Effects/Materials and Technologies}, url = {https://m2.mtmt.hu/api/publication/31862730}, author = {Végh, János}, doi = {10.34257/GJCSTAVOL20IS1PG13}, journal-iso = {GLOBAL J COMP SCI TECHN}, journal = {GLOBAL JOURNAL OF COMPUTER SCIENCE AND TECHNOLOGY}, volume = {20}, unique-id = {31862730}, issn = {0975-4350}, year = {2021}, eissn = {0975-4172}, pages = {13-29} } @inbook{MTMT:32677831, title = {Do we know the operating principles of our computers better than those of our brain?}, url = {https://m2.mtmt.hu/api/publication/32677831}, author = {Végh, János and Berki, Ádám József}, booktitle = {2020 International Conference on Computational Science and Computational Intelligence (CSCI)}, doi = {10.1109/CSCI51800.2020.00120}, unique-id = {32677831}, year = {2020}, pages = {668-674}, orcid-numbers = {Berki, Ádám József/0000-0001-7099-167X} } @inproceedings{MTMT:32649572, title = {von Neumann’s missing "Second Draft": what it should contain}, url = {https://m2.mtmt.hu/api/publication/32649572}, author = {Végh, János}, booktitle = {2020 International Conference on Computational Science and Computational Intelligence (CSCI)}, doi = {10.1109/CSCI51800.2020.00235}, unique-id = {32649572}, year = {2020}, pages = {1260-1264} } @book{MTMT:31186668, title = {How deep the machine learning can be}, url = {https://m2.mtmt.hu/api/publication/31186668}, author = {Végh, János}, publisher = {Nova Science Publishers Inc.; Nova Publishers}, unique-id = {31186668}, year = {2020} }