An important result from psycholinguistics (Griffiths & Kalish, 2005) states that no language can be learned iteratively by rational agents in a self-sustaining manner. We show how to modify the learning process slightly in order to achieve self-sustainability. Our work is in two parts. First, we characterize iterated learnability in geometric terms and show how a slight, steady increase in the lengths of the training sessions ensures self-sustainability for any discrete language class. In the second part, we tackle the nondiscrete case and investigate self-sustainability for iterated linear regression. We discuss the implications of our findings to issues of non-equilibrium dynamics in natural algorithms.
@InProceedings{chazelle_et_al:LIPIcs.ITCS.2017.17, author = {Chazelle, Bernard and Wang, Chu}, title = {{Self-Sustaining Iterated Learning}}, booktitle = {8th Innovations in Theoretical Computer Science Conference (ITCS 2017)}, pages = {17:1--17:17}, series = {Leibniz International Proceedings in Informatics (LIPIcs)}, ISBN = {978-3-95977-029-3}, ISSN = {1868-8969}, year = {2017}, volume = {67}, editor = {Papadimitriou, Christos H.}, publisher = {Schloss Dagstuhl -- Leibniz-Zentrum f{\"u}r Informatik}, address = {Dagstuhl, Germany}, URL = {https://drops.dagstuhl.de/entities/document/10.4230/LIPIcs.ITCS.2017.17}, URN = {urn:nbn:de:0030-drops-81711}, doi = {10.4230/LIPIcs.ITCS.2017.17}, annote = {Keywords: Iterated learning, language evolution, iterated Bayesian linear regression, non-equilibrium dynamics} }
Feedback for Dagstuhl Publishing