@STRING{i3etnn = "IEEE Trans. Neural Networks"} @STRING{i3etsp = "IEEE Trans. Signal Processing"} @STRING{ieee = "IEEE"} @STRING{nc = "Neural Computation"} @STRING{nips = "Advances in Neural Information Processing Systems"} @STRING{ieeepr = "IEEE Press"} @STRING{mitpr = "The MIT Press"} @STRING{phall = "Prentice-Hall"} @STRING{elsev = "Elsevier Science Publishers B.V. (North Holland)"} @STRING{pinj = "Piscataway, NJ"} @STRING{nyny = "New York, NY"} @STRING{camma = "Cambridge, MA"} @InCollection{back2004book, author = "A.D. Back", title = "Independent Component Analysis", booktitle = "Applied Intelligent Systems: New Directions", series = "Series: Studies in Fuzziness and Soft Computing", editor = "J. Fulcher and L. Jain", publisher = "Springer-Verlag", year = "2004" } @InCollection{back2002book, author = "A.D. Back", title = "Radial Basis Functions", booktitle = "Handbook of Neural Network Signal Processing", series = "The Electrical Engineering and Applied Signal Processing Series", editor = "Y.H. Hu and J-N. Hwang", publisher = "CRC Press", pages = "3-1--3-23", year = "2002" } @InCollection{back2001book, author = "A.C. Tsoi and A.D. Back and J. Principe and M. Mozer", title = "Memory Kernels", booktitle = "A Field Guide to Dynamical Recurrent Networks", editor = "J.F. Kolen and S.C. Kremer", publisher = "Wiley-IEEE Press ", year = "2001" } @article{backtrappenberg2001, author = "A.D Back and T. Trappenberg", title = "Selecting inputs for modelling using normalized higher order statistics and independent component analysis", journal = "IEEE Trans. on Neural Networks", VOLUME = {12}, NUMBER = {3}, PAGES = {612-617}, YEAR = {2001} } @article{nicoback2001, author = "N. Iannella and A.D. Back", title = "A spiking neural network architecture for nonlinear function approximation", journal = "Neural Networks", VOLUME = {14}, NUMBER = {6/7}, YEAR = {2001} } @misc{ back-input, author = "A. Back and T. Trappenberg", title = "Input variable selection using independent component analysis", text = "A.D. Back and T.P. Trappenberg, Input variable selection using independent component analysis, IJCNN'99.", url = "citeseer.nj.nec.com/326179.html" } @InProceedings{trappenbergback2000, AUTHOR = {T. Trappenberg and A.D. Back}, TITLE = {A Classification Scheme for Applications with Ambiguous Data }, BOOKTITLE = {IEEE International Joint Conference on Neural Networks IJCNN2000}, YEAR = {2000} } @InProceedings{back_trappenberg1999, AUTHOR = {A.D. Back and T. Trappenberg}, TITLE = {Input Variable Selection Using Independent Component Analysis}, BOOKTITLE = {IEEE International Joint Conference on Neural Networks IJCNN99}, VOLUME = {2}, PAGES = {989-992}, YEAR = {1999} } @article{back_etal99a, AUTHOR = {A.D. Back and B.G. Horne and A.C. Tsoi and C. Lee Giles}, TITLE = {Alternative Discrete-Time Operators: An Algorithm for Optimal Selection of Parameters}, JOURNAL = {IEEE Trans. Signal Processing}, VOLUME = {47}, NUMBER = {9}, PAGES = {2612--2615}, MONTH = {Sep}, YEAR = {1999}, ABSTRACT = {In this note, we consider the issue of parameter sensitivity in models based on alternative discrete time operators (ADTOs). A generic first order ADTO is proposed which encompasses all the known first order ADTOs introduced so far in the literature. New bounds on the operator parameters are derived, and a new algorithm is given for optimally selecting the parameters to give minimum parameter senstivity.} } @InProceedings{back_cichocki_ica99, AUTHOR = {A.D. Back and A. Cichocki}, TITLE = {Input Variable Selection Using Independent Component Analysis and Higher Order Statistics}, BOOKTITLE = {First International Conference on Independent Component Analysis and Signal Separation}, MONTH = {Jan}, PAGES = {203--208}, ADDRESS = {France}, YEAR = {1999} } @InProceedings{iannella98, AUTHOR = {N. Iannella and A.D. Back}, TITLE = {Function Approximation Using Spiking Neural Networks}, BOOKTITLE = {presented at the RIKEN Brain Science Institute Retreat}, PAGES = {}, PUBLISHER = {}, ADDRESS = {}, MONTH = {Oct.}, YEAR = {1998} } @InProceedings{back_bsi98, AUTHOR = {A.D. Back and T. Trappenberg and A. Cichocki}, TITLE = {Input Variable Selection Using Independent Component Analysis and Higher Order Statistics}, BOOKTITLE = {presented at the RIKEN Brain Science Institute Retreat}, MONTH = {Oct}, YEAR = {1999} } @InProceedings{back_weigend_iafe98, AUTHOR = {A.D. Back and A.S. Weigend}, TITLE = {What drives stock returns? -- an independent component analysis}, BOOKTITLE = {Proceedings of {IEEE}/{IAFE} Conference on Computational Intelligence for Financial Engineering ({CIFE}r)}, PAGES = {141--156}, PUBLISHER = {IEEE}, ADDRESS = {Piscataway, NJ}, YEAR = {1998} } @article{back_tsoi_nc98, AUTHOR = {A.D. Back and A.C. Tsoi}, TITLE = {A low sensitivity recurrent neural network}, JOURNAL = {Neural Computation}, VOLUME = {10}, NUMBER = {1}, PAGES = {165--188}, YEAR = {1998}, ABSTRACT = {The problem of high sensitivity in modelling is well known. Small perturbations in the model parameters may result in large, undesired changes in the model behaviour. A number of authors have considered the issue of sensitivity in feedforward neural networks from a probabilistic perspective. Less attention has been given to such issues in recurrent neural networks. In this paper, we present a new recurrent neural network architecture which is capable of signficantly improved parameter sensitivity properties as compared with existing recurrent neural networks. The new recurrent neural network generalizes previous architectures by employing alternative discrete-time operators in place of the usual shift operator normally used in the feedback connections. An analysis of the model is given which demonstrates the existence of parameter sensitivity in recurrent neural networks and supports the proposed architecture. The new architecture performs significantly better than previous recurrent neural networks as shown by a series of simple numerical experiments.} } @InCollection{lawrence98neural, author = "S. Lawrence and I. Burns and A.D. Back and A.C. Tsoi and C. Lee Giles", title = "Neural Network Classification and Unequal Prior Class Probabilities", booktitle = "Tricks of the Trade", series = "Lecture Notes in Computer Science State-of-the-Art Surveys", editor = "G. Orr and K.-R. {M\"uller} and R. Caruana", publisher = "Springer Verlag", pages = "299--314", year = "1998" } @article{back_weigend_ijns97, AUTHOR = {A.D. Back and A.S. Weigend}, TITLE = {A first application of independent component analysis to extracting structure from stock returns}, JOURNAL = {Int. Journal of Neural Systems}, MONTH = {August}, YEAR = {1997}, VOLUME = {8}, NUMBER = {4}, PAGES = {473--484}, ABSTRACT = {In this paper we consider the application of a signal processing technique known as independent component analysis (ICA) or blind source separation, to multivariate financial time series such as a portfolio of stocks. The key idea of ICA is to linearly map the observed multivariate time series into a new space of statistically independent components (ICs). We apply ICA to three years of daily returns of the 28 largest Japanese stocks and compare the results with those obtained using principal component analysis (PCA). The results indicate that the estimated ICs fall into two categories, (i) infrequent but large shocks (responsible for the major changes in the stock prices), and (ii) frequent smaller fluctuations (contributing little to the overall level of the stocks). We show that the overall stock price can be reconstructed surprisingly well by using a small number of thresholded weighted ICs. In contrast, when using shocks derived from principal components instead of independent components, the reconstructed price is less similar to the original one. Independent component analysis is shown to be a potentially powerful method of analysing and understanding driving mechanisms in financial time series.} } @article{tsoi_back_neuro97, AUTHOR = {A.C. Tsoi and A.D. Back}, TITLE = {Discrete-time recurrent neural network architectures: a unifying review}, JOURNAL = {Neurocomputing}, YEAR = {1997}, VOLUME = {15}, NUMBER = {3&4}, PAGES = {183-223} } @InProceedings{back_weigend_cf97, AUTHOR = {A.D. Back and A.S. Weigend}, TITLE = {Discovering structure in finance using independent component analysis}, BOOKTITLE = {{C}omputational {F}inance 1997 - {T}he {F}ifth {I}nternational {C}onference on {N}eural {N}etworks in the {C}apital {M}arkets}, EDITOR = {A-P.N. Refenes and A.N. Burgess and J.E. Moody}, PUBLISHER = {{K}luwer {A}cademic}, YEAR = {1998}, ABSTRACT = {Independent component analysis is a new signal processing technique. In this paper we apply it to a portfolio of Japanese stock price returns over three years of daily data and compare the results obtained using principal component analysis. The results indicate that the independent components fall into two categories, (i) infrequent but large shocks (responsible for the major changes in the stock prices), and (ii) frequent but rather small fluctuations (contributing little to the overall level of the stocks). The small number of major shocks indicate turning points in the time series and when used to reconstruct the stock prices, give good results in terms of morphology. In contrast, when using shocks derived from principal components instead of independent components, the reconstructed price does not show the same results at all. Independent component analysis is shown to be a potentially powerful method of analysing and understanding driving mechanisms in financial time series.} } @inproceedings{back_nnsp97, AUTHOR = {A.D. Back}, TITLE = {Multiple and time-varying dynamic modelling capabilities of recurrent neural networks}, BOOKTITLE = {Proc. of the 1997 IEEE Workshop Neural Networks for Signal Processing 7 ({NNSP97})}, PAGES = {121-130}, ORGANIZATION = ieeepr, ADDRESS = nyny, YEAR = {1997}, EDITOR = {J. Principe and C.L. Giles and N. Morgan and E. Wilson} } @inproceedings{back_horne_tsoi_giles_nnsp97, AUTHOR = {A.D. Back and B.G. Horne and A.C. Tsoi and C.L. Giles}, TITLE = {Low sensitivity time delay neural networks with cascade form structure}, BOOKTITLE = {Proc. of the 1997 IEEE Workshop Neural Networks for Signal Processing 7 ({NNSP97})}, YEAR = {1997}, ORGANIZATION = ieeepr, ADDRESS = nyny, PAGES = {44-53}, EDITOR = {J. Principe and C.L. Giles and N. Morgan and E. Wilson} } @inproceedings{back_cichocki_iconip97, AUTHOR = {A.D. Back and A.C. Cichocki}, TITLE = {Blind source separation and deconvolution of fast sampled signals}, BOOKTITLE = {Proc of 1997 Int Conf on Neural Information Processing ICONIP'97}, YEAR = {1997}, ORGANIZATION = {Springer-Verlag}, ADDRESS = {Singapore}, EDITOR = {N.~Kasabov and R.~Kozma and K.~Ko and R.~O'Shea and G.~Coghill and T.~Gedeon}, ABSTRACT = {In real world implementations of blind source separation and deconvolution, the mixing takes place in continuous time. In the models normally considered, discrete time sampling is implicitly assumed to provide a mixing filter matrix from a suitable demixing filter matrix which can be learned given an appropriate algorithm. In this paper, we consider the implications of trying to separate and deconvolve signals which may include some signals which are low frequency compared to the sample rate. It is shown that if a fast sampling rate is used to obtain the discrete time observed data, learning to solve blind source separation and deconvolution tasks can be very difficult. This is due to the data covariance matrix becoming almost singular. We propose a discrete time model based on alternative discrete time operators which is capable of overcoming the problems and giving significantly improved performance under the conditions described. } } @inproceedings{back_chen_iconip97, AUTHOR = {A.D. Back and T.P. Chen}, TITLE = {Approximation of hybrid systems by neural networks}, BOOKTITLE = {Proc of 1997 Int Conf on Neural Information Processing ICONIP'97}, VOLUME = {1}, YEAR = {1998}, ORGANIZATION = {Springer-Verlag}, ADDRESS = {Singapore}, PAGES = {326--329}, EDITOR = {N.~Kasabov and R.~Kozma and K.~Ko and R.~O'Shea and G.~Coghill and T.~Gedeon}, ABSTRACT = {In this paper it is shown that hybrid systems can be approximated arbitrarily well by recurrent neural networks. The results indicate that the newly emerging field of hybrid systems can be considered in terms of the architectures and learning algorithms developed for neural network models. Examples are given of the types of architectures that can be developed.} } @techreport{back_tsoi_horne_giles_tr97, AUTHOR = {A.D. Back and A.C. Tsoi and B.G Horne and C.L. Giles}, TITLE = {Alternative discrete-time operators and their application to nonlinear models}, YEAR = {1997}, NUMBER = {CS-TR-3738 and UMIACS-TR-97-03}, TYPE = {Technical Report}, INSTITUTION = {Institute for Advanced Computer Studies University of Maryland, College Park, Md 20742}, ABSTRACT = {The shift operator, defined as qx(t) = x(t+1), is the basis for almost all discrete-time models. It has been shown however, that linear models based on the shift operator suffer problems when used to model lightly-damped-low-frequency (LDLF) systems, with poles near (1,0) on the unit circle in the complex plane. This problem occurs under fast sampling conditions. As the sampling rate increases, coefficient sensitivity and round-off noise become a problem as the difference between successive sampled inputs becomes smaller and smaller. The resulting coefficients of the model approach the coefficients obtained in a binomial expansion, regardless of the underlying continuous-time system. This implies that for a given finite wordlength, severe inaccuracies may result. Wordlengths for the coefficients may also need to be made longer to accommodate models which have low frequency characteristics, corresponding to poles in the neighbourhood of (1,0). These problems also arise in neural network models which comprise of linear parts and nonlinear neural activation functions. Various alternative discrete-time operators can be introduced which offer numerical computational advantages over the conventional shift operator. The alternative discrete-time operators have been proposed independently of each other in the fields of digital filtering, adaptive control and neural networks. These include the delta, rho, gamma and bilinear operators. In this paper we first review these operators and examine some of their properties. An analysis of the TDNN and FIR MLP network structures is given which shows their susceptibility to parameter sensitivity problems. Subsequently, it is shown that models may be formulated using alternative discrete-time operators which have low sensitivity properties. Consideration is given to the problem of finding parameters for stable alternative discrete-time operators. A learning algorithm which adapts the alternative discrete-time operators parameters on-line is presented for MLP neural network models based on alternative discrete-time operators. It is shown that neural network models which use these alternative discrete-time perform better than those using the shift operator alone. } } @Article{ lawrence97face, author = {S. Lawrence and C. Lee Giles and A.C. Tsoi and A.D. Back}, title = {Face Recognition: A Convolutional Neural Network Approach}, journal = {IEEE Transactions on Neural Networks}, volume = {8}, number = {1}, pages = {98--113}, year = {1997}, keywords = {convolutional networks, hybrid systems, face recognition, self-organizing map}, email = {lawrence@research.nj.nec.com}, url = {http://www.neci.nj.nec.com/homepages/lawrence/papers/face-tnn97/} } @Article{ lawrence97distribution, author = {S. Lawrence and A.D. Back and A.C. Tsoi and C. Lee Giles}, title = {On the Distribution of Performance from Multiple Neural Network Trials}, journal = {IEEE Transactions on Neural Networks}, email = {lawrence@research.nj.nec.com}, volume = {8}, number = {6}, pages = {1507--1517}, year = {1997} } @inproceedings{back_tsoi_iconip96a, AUTHOR = {A.D. Back and A.C. Tsoi}, TITLE = {A cascade neural network model with nonlinear poles and zeros}, BOOKTITLE = {Proc of 1996 Int Conf on Neural Information Processing ICONIP'96}, VOLUME = {1}, YEAR = {1996}, ORGANIZATION = {IEEE}, ADDRESS = pinj, PAGES = {486-491}, ABSTRACT = {In this paper we propose a novel nonlinear model which is an extension of the usual cascade model structure to one which permits nonlinear mappings of the poles and zeros.} } @inproceedings{back_tsoi_iconip96b, AUTHOR = {A.D. Back and A.C. Tsoi}, TITLE = {Aspects of adaptive learning algorithms for {FIR} feedforward networks}, BOOKTITLE = {Proc of 1996 Int Conf on Neural Information Processing ICONIP'96}, VOLUME ={2}, YEAR = {1996}, ORGANIZATION = {IEEE}, ADDRESS = pinj, PAGES = {1311-1316} } @article{back_tsoi_jse96, AUTHOR = {A.D. Back and A.C. Tsoi}, TITLE = {Nonlinear system identification using discrete Laguerre functions}, JOURNAL = {Journal of Systems Engineering}, YEAR = {1996}, NUMBER = {3}, PAGES = {194-207}, VOLUME = {6} } @inproceedings{lawrence_tsoi_back_nips96, AUTHOR = {S. Lawrence and A.C. Tsoi and A.D. Back}, TITLE = {The Gamma {MLP} for speech phoneme recognition}, BOOKTITLE = nips, VOLUME ={8}, YEAR = {1996}, ORGANIZATION = mitpr, ADDRESS = camma, EDITOR = {D.S. Touretzky and M. Mozer and M. Hasselmo} } @inproceedings{back_tsoi_acnn96, AUTHOR = {A.D. Back and A.C. Tsoi}, TITLE = {A new robust recurrent neural network structure}, BOOKTITLE = {ACNN96 Proc Seventh Aust Conf on Neural Networks}, YEAR = {1996}, ADDRESS = {Canberra}, PAGES = {138-143}, } @inproceedings{back_tsoi_eann95, AUTHOR = {A.D. Back and A.C. Tsoi}, TITLE = {Identification of nonlinear processes using Laguerre functions}, BOOKTITLE = {Proc. Intern. Conf. on Engineering Applications of Neural Networks}, YEAR = {1995}, ADDRESS = {Helsinki}, PAGES = {255-258}, ORGANIZATION = {Finnish Artificial Intelligence Society}, EDITOR = {A.B. Bulsari and S. Kallio} } @inproceedings{back_tsoi_nnsp95, AUTHOR = {A.D. Back and A.C. Tsoi}, TITLE = {Constrained pole-zero filters as discrete-time operators for system approximation}, BOOKTITLE = {Proc. of the 1995 IEEE Workshop Neural Networks for Signal Processing 5 ({NNSP95})}, YEAR = {1995}, ORGANIZATION = ieeepr, ADDRESS = nyny, PAGES = {191-200}, EDITOR = {F. Girosi and J. Makhoul and E. Manolakos and E. Wilson}, ABSTRACT = {Discrete-time models whether linear or nonlinear, often implicitly use the shift operator to obtain input regression vectors. It has been shown recently that the significantly better performance can be obtained in terms of coefficient sensitivity and output error by using alternative operators to the usual shift operator. These include the delta and gamma operators. In this paper we introduce second order pole-zero operators which have more general modelling properties than those previously considered. We provide some observations on the behaviour of the operators, considering representational issues and convergence chacteristics in particular.} } @inproceedings{back_tsoi_nnsp94, AUTHOR = {A.D. Back and E. Wan and S. Lawrence and A.C. Tsoi}, TITLE = {A unifying view of some training algorithms for multilayer perceptrons with {FIR} filter synapses}, BOOKTITLE = {Proc. of the 1994 IEEE Workshop Neural Networks for Signal Processing 4 ({NNSP94})}, YEAR = {1994}, ORGANIZATION = ieeepr, ADDRESS = nyny, PAGES = {146-154}, EDITOR = {J. Vlontzos and J. Hwang and E. Wilson} } @inproceedings{back_tsoi_nips94, AUTHOR = {A.D. Back and A.C. Tsoi}, TITLE = {A comparison of discrete-time operator models for nonlinear system identification}, BOOKTITLE = nips, VOLUME ={7}, YEAR = {1995}, ORGANIZATION = mitpr, ADDRESS = camma, PAGES = {883-890}, EDITOR = {G. Tesauro and D. S. Touretzky and T. K. Leen}, ABSTRACT = {We present a unifying view of discrete-time operator models used in the context of finite word length linear signal processing. Comparisons are made between the recently presented gamma operator model, and the delta and rho operator models for performing nonlinear system identification and prediction using neural networks. A new model based on an adaptive bilinear transformation which generalizes all of the above models is presented.} } @article{tsoi_back_ieeenn94, AUTHOR = {A.C. Tsoi and A.D. Back}, TITLE = {Locally recurrent globally feedforward networks, a critical review of architectures}, JOURNAL = i3etnn, YEAR = {1994}, PAGES = {229-239}, VOLUME = {5} } @inproceedings{back_tsoi_acnn94, AUTHOR = {A.D. Back and A.C. Tsoi}, TITLE = {On the backpropagation algorithm: paralysis in multilayer perceptrons}, BOOKTITLE = {ACNN94 Proc Fifth Aust Conf on Neural Networks}, YEAR = {1994}, ADDRESS = {Brisbane}, PAGES = {102-104}, } @inproceedings{back_tsoi_icann92, AUTHOR = {A.D. Back and A.C. Tsoi}, TITLE = {Stabilisation properties of multilayer feedforward networks with time-delay synapses}, BOOKTITLE = {Artificial Neural Networks 2}, YEAR = {1992}, ORGANIZATION = elsev, ADDRESS = {Helsinki}, PAGES = {1113-1116}, VOLUME = {2}, EDITOR = {I. Aleksander and J. Taylor} } @article{back_tsoi_nc92, AUTHOR = {A.D. Back and A.C. Tsoi}, TITLE = {An adaptive lattice architecture for dynamic multilayer perceptrons}, JOURNAL = nc, YEAR = {1992}, NUMBER = {6}, PAGES = {922-931}, VOLUME = {4} } @inproceedings{back_tsoi_acc92, AUTHOR = {A.D. Back and A.C. Tsoi}, TITLE = {Representational capabilities of multilayer feedforward networks with time-delay synapses}, YEAR = {1992}, BOOKTITLE = {Proc. of American Control Conf Chicago}, PAGES = {3064-3065} } @inproceedings{back_tsoi_nnsp92, AUTHOR = {A.D. Back and A.C. Tsoi}, TITLE = {Nonlinear system identification using multilayer perceptrons with locally recurrent synaptic structure}, BOOKTITLE = {Proc. of the 1992 {IEEE} Workshop Neural Networks for Signal Processing 2 ({NNSP92})}, YEAR = {1992}, ORGANIZATION = ieeepr, ADDRESS = pinj, PAGES = {444-453}, EDITOR = {S.Y. Kung, F. Fallside, J. Aa. Sorenson and C.A. Kamm} } @article{back_tsoi_nc91, AUTHOR = {A.D. Back and A.C. Tsoi}, TITLE = {{FIR} and {IIR} synapses, a new neural network architecture for time series modelling}, JOURNAL = nc, YEAR = {1991}, NUMBER = {3}, PAGES = {375-385}, VOLUME = {3} } @inproceedings{back_tsoi_dublin90, AUTHOR = {A.D. Back and A.C. Tsoi}, TITLE = {A time series modelling methodology using {FIR} and {IIR} synapses}, BOOKTITLE = {Proc. Workshop on Neural Networks for Statistical and Economic Data, Dublin}, YEAR = {1990}, ORGANIZATION = {DOSES, Statistical Office of European Communities}, PAGES = {187-194}, EDITOR = {F. Murtagh} }