001/* An IIR filter actor that uses a direct form II implementation. 002 003 Copyright (c) 2003-2014 The Regents of the University of California and 004 Research in Motion Limited. 005 All rights reserved. 006 Permission is hereby granted, without written agreement and without 007 license or royalty fees, to use, copy, modify, and distribute this 008 software and its documentation for any purpose, provided that the above 009 copyright notice and the following two paragraphs appear in all copies 010 of this software. 011 012 IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA OR RESEARCH IN MOTION 013 LIMITED BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, 014 INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF THIS 015 SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA 016 OR RESEARCH IN MOTION LIMITED HAVE BEEN ADVISED OF THE POSSIBILITY OF 017 SUCH DAMAGE. 018 019 THE UNIVERSITY OF CALIFORNIA AND RESEARCH IN MOTION LIMITED 020 SPECIFICALLY DISCLAIM ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 021 THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 022 PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" 023 BASIS, AND THE UNIVERSITY OF CALIFORNIA AND RESEARCH IN MOTION 024 LIMITED HAVE NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, 025 ENHANCEMENTS, OR MODIFICATIONS. 026 PT_COPYRIGHT_VERSION_2 027 COPYRIGHTENDKEY 028 029 */ 030package ptolemy.actor.lib; 031 032import ptolemy.actor.TypedIOPort; 033import ptolemy.data.ArrayToken; 034import ptolemy.data.DoubleToken; 035import ptolemy.data.expr.Parameter; 036import ptolemy.data.type.ArrayType; 037import ptolemy.data.type.BaseType; 038import ptolemy.kernel.CompositeEntity; 039import ptolemy.kernel.util.Attribute; 040import ptolemy.kernel.util.IllegalActionException; 041import ptolemy.kernel.util.NameDuplicationException; 042import ptolemy.kernel.util.Workspace; 043 044/////////////////////////////////////////////////////////////////// 045//// GradientAdaptiveLattice 046 047/** 048 An adaptive FIR filter with a lattice structure. This class extends 049 the base class to dynamically adapt the reflection coefficients to 050 minimize the power of the output sequence. The output reflection 051 coefficients are guaranteed to lie between -1.0 and 1.0, ensuring that the 052 resulting filter is a minimum phase linear predictor. The 053 reflectionCoefficients parameter is interpreted as the initial 054 coefficients. 055 056 @author Steve Neuendorffer 057 @version $Id$ 058 @since Ptolemy II 4.0 059 @Pt.ProposedRating Red (vogel) 060 @Pt.AcceptedRating Red (cxh) 061 */ 062public class GradientAdaptiveLattice extends Lattice { 063 /** Construct an actor with the given container and name. 064 * @param container The container. 065 * @param name The name of this actor. 066 * @exception IllegalActionException If the actor cannot be contained 067 * by the proposed container. 068 * @exception NameDuplicationException If the container already has an 069 * actor with this name. 070 */ 071 public GradientAdaptiveLattice(CompositeEntity container, String name) 072 throws NameDuplicationException, IllegalActionException { 073 super(container, name); 074 075 // Parameters 076 timeConstant = new Parameter(this, "timeConstant"); 077 timeConstant.setExpression("1.0"); 078 timeConstant.setTypeEquals(BaseType.DOUBLE); 079 timeConstant.validate(); 080 081 // The currently adapted reflection coefficients 082 adaptedReflectionCoefficients = new TypedIOPort(this, 083 "adaptedReflectionCoefficients", false, true); 084 adaptedReflectionCoefficients 085 .setTypeEquals(new ArrayType(BaseType.DOUBLE)); 086 087 output.setTypeAtLeast(input); 088 } 089 090 /////////////////////////////////////////////////////////////////// 091 //// ports and parameters //// 092 093 /** The output port that produces the current reflection 094 * coefficients. The port is of type array of double. 095 */ 096 public TypedIOPort adaptedReflectionCoefficients; 097 098 /** The time constant of the filter, which determines how fast the 099 * filter adapts. 100 * The default value of this parameter is 1.0. 101 */ 102 public Parameter timeConstant; 103 104 /////////////////////////////////////////////////////////////////// 105 //// public methods //// 106 107 /** Handle parameter change events on the 108 * <i>order</i> and <i>timeConstant</i> parameters. The 109 * filter state vector is reinitialized to zero state. 110 * @param attribute The attribute that changed. 111 * @exception IllegalActionException If this method is invoked 112 * with an unrecognized parameter. 113 */ 114 @Override 115 public void attributeChanged(Attribute attribute) 116 throws IllegalActionException { 117 if (attribute == timeConstant) { 118 double timeConstantValue = ((DoubleToken) timeConstant.getToken()) 119 .doubleValue(); 120 121 // FIXME: there is a bug in either the variable naming or the 122 // two lines below. 123 _oneMinusAlpha = (timeConstantValue - 1.0) 124 / (timeConstantValue + 1.0); 125 _alpha = 1.0 - _oneMinusAlpha; 126 } 127 128 super.attributeChanged(attribute); 129 } 130 131 /** Clone the actor into the specified workspace. This calls the 132 * base class and then sets the type constraints. 133 * @param workspace The workspace for the new object. 134 * @return A new actor. 135 * @exception CloneNotSupportedException If a derived class has 136 * an attribute that cannot be cloned. 137 */ 138 @Override 139 public Object clone(Workspace workspace) throws CloneNotSupportedException { 140 GradientAdaptiveLattice newObject = (GradientAdaptiveLattice) super.clone( 141 workspace); 142 newObject.output.setTypeAtLeast(newObject.input); 143 144 newObject._estimatedErrorPower = new double[newObject._order + 1]; 145 System.arraycopy(newObject._estimatedErrorPower, 0, 146 _estimatedErrorPower, 0, newObject._order + 1); 147 148 newObject._estimatedErrorPowerCache = new double[newObject._order + 1]; 149 System.arraycopy(newObject._estimatedErrorPowerCache, 0, 150 _estimatedErrorPowerCache, 0, newObject._order + 1); 151 152 newObject._reflectionCoefficientsCache = new double[newObject._order]; 153 System.arraycopy(newObject._reflectionCoefficientsCache, 0, 154 _reflectionCoefficientsCache, 0, newObject._order); 155 return newObject; 156 } 157 158 /** Initialize the state of the filter. 159 */ 160 @Override 161 public void initialize() throws IllegalActionException { 162 super.initialize(); 163 164 for (int i = 0; i <= _order; i++) { 165 _estimatedErrorPower[i] = 0.0; 166 _estimatedErrorPowerCache[i] = 0.0; 167 if (i < _order) { 168 _reflectionCoefficientsCache[i] = 0.0; 169 } 170 } 171 172 // Reinitialize the reflection coefficients from the parameter value. 173 ArrayToken value = (ArrayToken) reflectionCoefficients.getToken(); 174 for (int i = 0; i < _order; i++) { 175 _reflectionCoefficients[i] = ((DoubleToken) value.getElement(i)) 176 .doubleValue(); 177 } 178 } 179 180 /** Update the filter state. 181 * 182 * @exception IllegalActionException If the base class throws it. 183 */ 184 @Override 185 public boolean postfire() throws IllegalActionException { 186 System.arraycopy(_estimatedErrorPowerCache, 0, _estimatedErrorPower, 0, 187 _order + 1); 188 System.arraycopy(_reflectionCoefficientsCache, 0, 189 _reflectionCoefficients, 0, _order); 190 return super.postfire(); 191 } 192 193 /////////////////////////////////////////////////////////////////// 194 //// protected methods //// 195 // Compute the filter, updating the caches, based on the current 196 // values. Extend the base class to adapt the reflection coefficients 197 @Override 198 protected void _doFilter() throws IllegalActionException { 199 double k; 200 201 // NOTE: The following code is ported from Ptolemy Classic. 202 // Update forward errors. 203 for (int i = 0; i < _order; i++) { 204 k = _reflectionCoefficients[i]; 205 _forwardCache[i + 1] = -k * _backwardCache[i] + _forwardCache[i]; 206 } 207 208 DoubleToken[] outputArray = new DoubleToken[_order]; 209 210 // Backward: Compute the weights for the next round Note: 211 // strictly speaking, _backwardCache[_order] is not necessary 212 // for computing the output. It is computed for the use of 213 // subclasses which adapt the reflection coefficients. 214 for (int i = _order; i > 0; i--) { 215 k = _reflectionCoefficients[i - 1]; 216 _backwardCache[i] = -k * _forwardCache[i - 1] 217 + _backwardCache[i - 1]; 218 219 double fe_i = _forwardCache[i]; 220 double be_i = _backwardCache[i]; 221 double fe_ip = _forwardCache[i - 1]; 222 double be_ip = _backwardCache[i - 1]; 223 224 double newError = _estimatedErrorPower[i] * _oneMinusAlpha 225 + _alpha * (fe_ip * fe_ip + be_ip * be_ip); 226 double newCoefficient = _reflectionCoefficients[i - 1]; 227 228 if (newError != 0.0) { 229 newCoefficient += _alpha * (fe_i * be_ip + be_i * fe_ip) 230 / newError; 231 232 if (newCoefficient > 1.0) { 233 newCoefficient = 1.0; 234 } else if (newCoefficient < -1.0) { 235 newCoefficient = -1.0; 236 } 237 } 238 239 outputArray[i - 1] = new DoubleToken(newCoefficient); 240 _reflectionCoefficientsCache[i - 1] = newCoefficient; 241 _estimatedErrorPowerCache[i] = newError; 242 } 243 244 adaptedReflectionCoefficients.send(0, 245 new ArrayToken(BaseType.DOUBLE, outputArray)); 246 } 247 248 // Reallocate the internal arrays. Extend the base class to 249 // reallocate the power estimation array. 250 @Override 251 protected void _reallocate() { 252 super._reallocate(); 253 _estimatedErrorPower = new double[_order + 1]; 254 _estimatedErrorPowerCache = new double[_order + 1]; 255 _reflectionCoefficientsCache = new double[_order]; 256 } 257 258 /////////////////////////////////////////////////////////////////// 259 //// private variables //// 260 private double _alpha = 0.0; 261 262 private double _oneMinusAlpha = 1.0; 263 264 // The error power in the output signal. The length is _order. 265 private double[] _estimatedErrorPower; 266 267 // Cache of the error power. The length is _order. 268 private double[] _estimatedErrorPowerCache; 269 270 // Cache of the reflection coefficients. The length is _order; 271 private double[] _reflectionCoefficientsCache; 272}