| 1 | //============================================================================ |
| 2 | // |
| 3 | // SSSS tt lll lll |
| 4 | // SS SS tt ll ll |
| 5 | // SS tttttt eeee ll ll aaaa |
| 6 | // SSSS tt ee ee ll ll aa |
| 7 | // SS tt eeeeee ll ll aaaaa -- "An Atari 2600 VCS Emulator" |
| 8 | // SS SS tt ee ll ll aa aa |
| 9 | // SSSS ttt eeeee llll llll aaaaa |
| 10 | // |
| 11 | // Copyright (c) 1995-2019 by Bradford W. Mott, Stephen Anthony |
| 12 | // and the Stella Team |
| 13 | // |
| 14 | // See the file "License.txt" for information on usage and redistribution of |
| 15 | // this file, and for a DISCLAIMER OF ALL WARRANTIES. |
| 16 | //============================================================================ |
| 17 | |
| 18 | #include <cmath> |
| 19 | |
| 20 | #include "LanczosResampler.hxx" |
| 21 | |
| 22 | namespace { |
| 23 | |
| 24 | constexpr float CLIPPING_FACTOR = 0.75; |
| 25 | constexpr float HIGH_PASS_CUT_OFF = 10; |
| 26 | |
| 27 | uInt32 reducedDenominator(uInt32 n, uInt32 d) |
| 28 | { |
| 29 | for (uInt32 i = std::min(n ,d); i > 1; --i) { |
| 30 | if ((n % i == 0) && (d % i == 0)) { |
| 31 | n /= i; |
| 32 | d /= i; |
| 33 | i = std::min(n ,d); |
| 34 | } |
| 35 | } |
| 36 | |
| 37 | return d; |
| 38 | } |
| 39 | |
| 40 | float sinc(float x) |
| 41 | { |
| 42 | // We calculate the sinc with double precision in order to compensate for precision loss |
| 43 | // around zero |
| 44 | return x == 0.f ? 1 : static_cast<float>( |
| 45 | sin(BSPF::PI_d * static_cast<double>(x)) / BSPF::PI_d / static_cast<double>(x) |
| 46 | ); |
| 47 | } |
| 48 | |
| 49 | float lanczosKernel(float x, uInt32 a) { |
| 50 | return sinc(x) * sinc(x / static_cast<float>(a)); |
| 51 | } |
| 52 | |
| 53 | } |
| 54 | |
| 55 | // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - |
| 56 | LanczosResampler::LanczosResampler( |
| 57 | Resampler::Format formatFrom, |
| 58 | Resampler::Format formatTo, |
| 59 | Resampler::NextFragmentCallback nextFragmentCallback, |
| 60 | uInt32 kernelParameter) |
| 61 | : |
| 62 | Resampler(formatFrom, formatTo, nextFragmentCallback), |
| 63 | // In order to find the number of kernels we need to precompute, we need to find N minimal such that |
| 64 | // |
| 65 | // N / formatTo.sampleRate = M / formatFrom.sampleRate |
| 66 | // |
| 67 | // with integral N and M. Equivalently, we have |
| 68 | // |
| 69 | // formatFrom.sampleRate / formatTo.sampleRate = M / N |
| 70 | // |
| 71 | // -> we find N from fully reducing the fraction. |
| 72 | myPrecomputedKernelCount(reducedDenominator(formatFrom.sampleRate, formatTo.sampleRate)), |
| 73 | myKernelSize(2 * kernelParameter), |
| 74 | myCurrentKernelIndex(0), |
| 75 | myKernelParameter(kernelParameter), |
| 76 | myCurrentFragment(nullptr), |
| 77 | myFragmentIndex(0), |
| 78 | myIsUnderrun(true), |
| 79 | myHighPassL(HIGH_PASS_CUT_OFF, float(formatFrom.sampleRate)), |
| 80 | myHighPassR(HIGH_PASS_CUT_OFF, float(formatFrom.sampleRate)), |
| 81 | myHighPass(HIGH_PASS_CUT_OFF, float(formatFrom.sampleRate)), |
| 82 | myTimeIndex(0) |
| 83 | { |
| 84 | myPrecomputedKernels = make_unique<float[]>(myPrecomputedKernelCount * myKernelSize); |
| 85 | |
| 86 | if (myFormatFrom.stereo) |
| 87 | { |
| 88 | myBufferL = make_unique<ConvolutionBuffer>(myKernelSize); |
| 89 | myBufferR = make_unique<ConvolutionBuffer>(myKernelSize); |
| 90 | } |
| 91 | else |
| 92 | myBuffer = make_unique<ConvolutionBuffer>(myKernelSize); |
| 93 | |
| 94 | precomputeKernels(); |
| 95 | } |
| 96 | |
| 97 | // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - |
| 98 | void LanczosResampler::precomputeKernels() |
| 99 | { |
| 100 | // timeIndex = time * formatFrom.sampleRate * formatTo.sampleRAte |
| 101 | uInt32 timeIndex = 0; |
| 102 | |
| 103 | for (uInt32 i = 0; i < myPrecomputedKernelCount; ++i) { |
| 104 | float* kernel = myPrecomputedKernels.get() + myKernelSize * i; |
| 105 | // The kernel is normalized such to be evaluate on time * formatFrom.sampleRate |
| 106 | float center = |
| 107 | static_cast<float>(timeIndex) / static_cast<float>(myFormatTo.sampleRate); |
| 108 | |
| 109 | for (uInt32 j = 0; j < 2 * myKernelParameter; ++j) { |
| 110 | kernel[j] = lanczosKernel( |
| 111 | center - static_cast<float>(j) + static_cast<float>(myKernelParameter) - 1.f, myKernelParameter |
| 112 | ) * CLIPPING_FACTOR; |
| 113 | } |
| 114 | |
| 115 | // Next step: time += 1 / formatTo.sampleRate |
| 116 | // |
| 117 | // By construction, we limit the argument during kernel evaluation to 0 .. 1, which |
| 118 | // corresponds to 0 .. 1 / formatFrom.sampleRate for time. To implement this, we decompose |
| 119 | // time as follows: |
| 120 | // |
| 121 | // time = N / formatFrom.sampleRate + delta |
| 122 | // timeIndex = N * formatTo.sampleRate + delta * formatTo.sampleRate * formatFrom.sampleRate |
| 123 | // |
| 124 | // with N integral and delta < 0. From this, it follows that we replace |
| 125 | // time with delta, i.e. take the modulus of timeIndex. |
| 126 | timeIndex = (timeIndex + myFormatFrom.sampleRate) % myFormatTo.sampleRate; |
| 127 | } |
| 128 | } |
| 129 | |
| 130 | // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - |
| 131 | void LanczosResampler::fillFragment(float* fragment, uInt32 length) |
| 132 | { |
| 133 | if (myIsUnderrun) { |
| 134 | Int16* nextFragment = myNextFragmentCallback(); |
| 135 | |
| 136 | if (nextFragment) { |
| 137 | myCurrentFragment = nextFragment; |
| 138 | myFragmentIndex = 0; |
| 139 | myIsUnderrun = false; |
| 140 | } |
| 141 | } |
| 142 | |
| 143 | if (!myCurrentFragment) { |
| 144 | std::fill_n(fragment, length, 0.f); |
| 145 | return; |
| 146 | } |
| 147 | |
| 148 | const uInt32 outputSamples = myFormatTo.stereo ? (length >> 1) : length; |
| 149 | |
| 150 | for (uInt32 i = 0; i < outputSamples; ++i) { |
| 151 | float* kernel = myPrecomputedKernels.get() + (myCurrentKernelIndex * myKernelSize); |
| 152 | myCurrentKernelIndex = (myCurrentKernelIndex + 1) % myPrecomputedKernelCount; |
| 153 | |
| 154 | if (myFormatFrom.stereo) { |
| 155 | float sampleL = myBufferL->convoluteWith(kernel); |
| 156 | float sampleR = myBufferR->convoluteWith(kernel); |
| 157 | |
| 158 | if (myFormatTo.stereo) { |
| 159 | fragment[2*i] = sampleL; |
| 160 | fragment[2*i + 1] = sampleR; |
| 161 | } |
| 162 | else |
| 163 | fragment[i] = (sampleL + sampleR) / 2.f; |
| 164 | } else { |
| 165 | float sample = myBuffer->convoluteWith(kernel); |
| 166 | |
| 167 | if (myFormatTo.stereo) |
| 168 | fragment[2*i] = fragment[2*i + 1] = sample; |
| 169 | else |
| 170 | fragment[i] = sample; |
| 171 | } |
| 172 | |
| 173 | myTimeIndex += myFormatFrom.sampleRate; |
| 174 | |
| 175 | uInt32 samplesToShift = myTimeIndex / myFormatTo.sampleRate; |
| 176 | if (samplesToShift == 0) continue; |
| 177 | |
| 178 | myTimeIndex %= myFormatTo.sampleRate; |
| 179 | shiftSamples(samplesToShift); |
| 180 | } |
| 181 | } |
| 182 | |
| 183 | // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - |
| 184 | inline void LanczosResampler::shiftSamples(uInt32 samplesToShift) |
| 185 | { |
| 186 | while (samplesToShift-- > 0) { |
| 187 | if (myFormatFrom.stereo) { |
| 188 | myBufferL->shift(myHighPassL.apply(myCurrentFragment[2*myFragmentIndex] / static_cast<float>(0x7fff))); |
| 189 | myBufferR->shift(myHighPassR.apply(myCurrentFragment[2*myFragmentIndex + 1] / static_cast<float>(0x7fff))); |
| 190 | } |
| 191 | else |
| 192 | myBuffer->shift(myHighPass.apply(myCurrentFragment[myFragmentIndex] / static_cast<float>(0x7fff))); |
| 193 | |
| 194 | ++myFragmentIndex; |
| 195 | |
| 196 | if (myFragmentIndex >= myFormatFrom.fragmentSize) { |
| 197 | myFragmentIndex %= myFormatFrom.fragmentSize; |
| 198 | |
| 199 | Int16* nextFragment = myNextFragmentCallback(); |
| 200 | if (nextFragment) { |
| 201 | myCurrentFragment = nextFragment; |
| 202 | myIsUnderrun = false; |
| 203 | } else { |
| 204 | myUnderrunLogger.log(); |
| 205 | myIsUnderrun = true; |
| 206 | } |
| 207 | } |
| 208 | } |
| 209 | } |
| 210 | |