## Sunday, July 24, 2016

### Google interview: Compute the Fibonacci sequance

// Write code that computes the Fibonacci sequance upto n

#include <cstdint>
#include <vector>
#include <iostream>
#include <functional>
#include <chrono>

// Recursive implementations of Fibonacci are all the rage in a teaching
// environment as it demonstrates recursion nicely. The problem is that a recursive
// implementation is completely a inefficient way to code the task in practice.
// Each call of function calls 2 more and those 2 call 2 more each resulting in 4.
// And those 4 call 4 more each resulting in 8 so at this point your space and time
// complexity is 1 + 2 + 4 + 8 + ... which is exponential growth and just awful

uint32_t fib_naive(uint32_t n)
{
if (n == 0) return 0;
if (n == 1) return 1;

return fib_naive(n-1) + fib_naive(n-2);
}

// So what google is really testing here is the "dynamic programming" paradigm.
// https://en.wikipedia.org/wiki/Dynamic_programming.  Unfortunately the term
// "dynamic programming" has had its meaning diluted and corrupted a bit.  In
// this context what it means is the breaking of a larger problem into sub
// parts and solving each sub part to solve the whole.  This sounds exactly
// like recursion and it is, the distinction is that recursive implementations
// tend to stop at raw mathematical conversion where as DP implementations look
// at lazy caching results to cut off repeated computations

// There are two textbook approaches to this; top-down and bottom-up

// in the top-down approach you start working from the answer point and compute
// the values of the breakdowns, you deploy memory as needed to cache the
// results of prior computations so that if they are used again you use the
// cached value rather than recomputing it from scratch again

uint32_t fib_top_core(uint32_t n, std::vector<uint32_t>& cache)
{
if (n > 0 and cache[n] == 0)
{
cache[n] = fib_top_core(n-2,cache) + fib_top_core(n-1,cache);
}

return cache[n];
}

uint32_t fib_top(uint32_t n)
{
if (n == 0) return 0;

std::vector<uint32_t> cache(n+1, 0);
cache[1] = 1;

return fib_top_core(n, cache);
}

// in the bottom-up approach you start working from the known terminators
// and work your way up to the answer point.  You do this by computing
// the values of all the breakdowns you could possibly need and memorize
// these the results for use in the next computations.  Often you can optimize
// the memory to forget values that we know we will not revisit

uint32_t fib_bot(uint32_t n)
{
if (n == 0) return 0;

uint32_t fib_minus_2 = 0;
uint32_t fib_minus_1 = 0;
uint32_t fib_current = 1;

while (n > 1)
{
fib_minus_2 = fib_minus_1;
fib_minus_1 = fib_current;
fib_current = fib_minus_2 + fib_minus_1;
--n;
}

return fib_current;
}

void test(std::function<uint32_t(uint32_t)> func, uint32_t n)
{
auto start = std::chrono::system_clock::now();
std::cout << "result:" << func(n);
auto end = std::chrono::system_clock::now();
std::cout << " --  time:"
<< std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count()
<< "nSec\n";
}

int main()
{
int n = 40;

test(fib_naive,n);
test(fib_top  ,n);
test(fib_bot  ,n);
}


and the output looks like:
result:102334155 --  time:1282786361 nSec
result:102334155 --  time:51012 nSec
result:102334155 --  time:690 nSec