Saturday, August 13, 2016

google interview: efficiently find a number in a partial sorted list

// http://www.impactinterview.com/2009/10/140-google-interview-questions/#software_engineer
// You are given a list of numbers.  When you reach the end of the list you
// will come back to the beginning of the list (a circular list).  Write the
// most efficient algorithm to find the minimum # in this list.  Find any given
// # in the list.  The numbers in the list are always increasing but you
// don’t know where the circular list begins, ie: 38, 40, 55, 89, 6, 13, 20,
// 23, 36.

// hmm..  essentially the find min number algorithm is a component of the find 
// any number algorithm..  Since the question isn't written as find the min, 
// then fix the list start and then find any # ill assume that it was meant as 
// a helper step to get to the find algorithm..  so in the interest of getting 
// it done lets just cut to the find any # algorithm and code it in the most 
// efficient way i can dream up..

#include <iostream>
#include <vector>
#include <chrono>
#include <functional>

// ****************************************
// *************** CYCLIC LIST ************
// ****************************************

class List
{
    std::vector<int> data_;
    int offset_;

public:
    class Iterator
    {
        List* list_;
        int index_;

    public:
        Iterator() :
            list_(NULL),
            index_(-1)
        {}

        Iterator(List* list) :
            list_(list),
            index_(list->offset_)
        {}

        // WARNING DO NOT USE in find.. that was not spec'ed
        // this is for testing only
        Iterator(List* list, int offset) :
            list_(list),
            index_(offset)
        {}

        Iterator& operator++()
        {
            index_ = (index_ + 1) % list_->data_.size();
            return *this;
        }

        Iterator operator+(int delta)
        {
            int offset = (index_ + delta) % list_->data_.size();
            while (offset < 0) offset += list_->data_.size();

            return Iterator(list_, offset);
        }

        // WARNING DO NOT UE in find.. that was not spec'ed
        // this is for testing only
        Iterator& operator--()
        {
            index_ = (list_->data_.size() + index_ - 1) % list_->data_.size();
            return *this;
        }

        int operator*()
        {
            return list_->data_[index_];
        }

        bool operator==(const Iterator& other) const
        {
            return other.list_ == list_ and
                other.index_ == index_;
        }

        bool operator!=(const Iterator& other) const
        {
            return not (other == *this);
        }

        bool valid() const
        {
            return list_ != NULL;
        }
    };

    List(std::initializer_list<int> data,
         int offset = 0) :
        data_(data),
        offset_(offset)
    {}

    List(int size) :
        data_(size),
        offset_(rand() % size)
    {
        // random fill
        int value = 0;
        for (int i = 0; i < size; ++i)
        {
            value += 2 + rand() % 5;
            data_[i] = value;
        }
    }

    bool empty() const
    {
        return data_.empty();
    }

    Iterator begin()
    {
        return Iterator(this);
    }

    Iterator random()
    {
        return Iterator(this, rand() % data_.size());
    }
};

// ****************************************
// *************** FIND NAIVE *************
// ****************************************

List::Iterator find_naive(List& data, int target)
{
    if (data.empty()) return List::Iterator();

    List::Iterator start = data.begin();
    List::Iterator i = start;

    do
    {
        if (*i == target) return i;
        ++i;
    }
    while (i != start);

    return List::Iterator();
}

// ****************************************
// ************* FIND NAIVE V2 ************
// ****************************************

// Attempt to simply the code a bit so that we handle the cyclic iteration and
// partial sorted issue cleaner for latter scaling

List::Iterator find_naive2(List& data, int target)
{
    if (data.empty()) return  List::Iterator();

    //termination is an issue because start *is* the end ..
    // simplify this by checking the start..
    List::Iterator end = data.begin();
    if (target == *end) return end;

    // then start at the first item after and now "start" is really the end
    List::Iterator i = end + 1;

    while (i != end and *i != target) ++i;
    if (*i == target) return i;

    return  List::Iterator();
}

// Of course you could try a divide and conquer idea..  but your dealing with a
// list that you want to divide in half.  Dividing it it half means you need to
// step the iterator the mid point..  So assuming you know the size of the list
// the total search on a fully sorted list will *always* cost u n/2 + n/4 + n/8
// ...  moves. Which totals to n-1 (ie if n=8 is 4+2+1=7)..  So the performance
// is actually worst on average for the naive version ... of course this might 
// not be true because the cost of iterators might be very low compared to the 
// compare.. but i dont think it is... so im skipping it..

// ****************************************
// ******** SKIP TO SORTED SECTION ********
// ****************************************

// What we can really do is try to take advantage of the sorted order of data
// and terminate early..  In order to do that what you have to do is skip to
// the correct sorted part for your target and then search... (hence the find
// min number stuff before)

// Keep in mind this gets much less robust to problems with the data..  ie
// if the list isnt sort (and parted) before hand..

List::Iterator find_skip_start(List& data, int target)
{
    // handle the empty dataset case..
    if (data.empty()) return  List::Iterator();

    //termination is an issue because start *is* the end ..
    // simplify this by checking the start..
    List::Iterator end = data.begin();
    if (target == *end) return end;

    // then start at the first item after and now "start" is really the end
    List::Iterator i = end + 1;

    // if the target is in the second part
    if (target < *end)
    {
        // skip to the second half
        int split_edge = *end;
        while (*i >= split_edge and i != end) ++i;
    }

    // then search for the target
    while (*i < target and i != end) ++i;

    if (*i == target) return i;
    return  List::Iterator();
}

// ****************************************
// ******* DOUBLE SKIP IN SECTION *********
// ****************************************

// Now because its sorted you dont have to check items every time you move
// forward.  You can step and then roll back if you went to far..  this makes
// some big assumptions about the cost of iterator movement vs integer
// compares.. my "List" class isnt great but it works out.. and in 
// theory should cut out every second compare..

List::Iterator find_double_skip(List& data, int target)
{
    // handle the empty dataset case..
    if (data.empty()) return  List::Iterator();

    //termination is an issue because start *is* the end ..
    // simplify this by checking the start..
    List::Iterator end = data.begin();
    if (target == *end) return end;

    // then start at the first item after and now "start" is really the end
    List::Iterator i  = end + 1;

    if (target < *end)
    {
        int split_edge = *end;

        // double skip to second half
        List::Iterator iprev;
        while (*i >= split_edge and i != end)
        {
            ++i;
            iprev = i;
            if (i != end) ++i;
        }

        // check if we need to back up the double step.
        if (iprev.valid() and *iprev < split_edge) i = iprev;
    }

    {
        // double skip till target found or exceeded
        List::Iterator iprev;
        while (*i < target and i != end)
        {
            ++i;
            iprev = i;
            if (i != end) ++i;
        }

        // check if found the target at either of the pointers..
        if (iprev.valid() and *iprev == target) return iprev;
        if (*i == target) return i;
    }

    return  List::Iterator();
}

// ****************************************
// ********** N SKIP IN SECTION ***********
// ****************************************

// Optimization..  of course if u can do better with steps of 2 then u can
// generalize it to any number of steps..  for example using steps of 20..  now
// we are really playing balance games..  trading the speeds of various
// comparisons, assignments etc..  Then u have to tune the size of skip for
// your machine(s)

List::Iterator find_n_skip(List& data, int target)
{
    const int skip = 20;

    // handle the empty dataset case..
    if (data.empty()) return  List::Iterator();

    //termination is an issue because start *is* the end ..
    // simplify this by checking the start..
    List::Iterator end = data.begin();
    if (target == *end) return end;

    // then start at the first item after and now "start" is really the end
    List::Iterator i  = end + 1;

    if (target < *end)
    {
        List::Iterator iprev = i;
        int split_edge = *end;
        // skip to second half
        while (*i >= split_edge and i != end)
        {
            ++i;
            iprev = i;
            int steps = skip;
            while (--steps > 0 and i != end) ++i;
        }

        // when found return to the last known ok point and search forward 1 by 1
        i = iprev;
        while (*i >= split_edge and i != end) ++i;
    }

    {
        List::Iterator iprev = i;
        while (*i < target and i != end)
        {
            ++i;
            iprev = i;
            int steps = skip;
            while (--steps > 0 and i != end) ++i;
        }

        // when found return to the last known ok point and search forward 1 by 1
        i = iprev;
        while (*i < target and i != end) ++i;

        if (*i == target) return i;
    }

    return  List::Iterator();
}

void test(std::string dataTag,
          std::string algoTag,
          std::function<List::Iterator (List& , int)> func,
          List& data,
          int target,
          bool hasTarget)
{
    auto start = std::chrono::system_clock::now();
    List::Iterator result = func(data, target);
    auto end   = std::chrono::system_clock::now();

    std::cout << algoTag << " for " << dataTag << ": ";
    if (not result.valid())
        std::cout << (hasTarget ? "FAIL" : "pass");
    else
        std::cout << ((hasTarget and (target == *result)) ? "pass" : "FAIL");

    std::cout << " -- time:"
              << std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count()
              << " nSec" << std::endl;
}

void testSet(std::string tag,
             List& data,
             int target,
             bool hasTarget)
{
    test(tag, "naive ",  find_naive,       data, target, hasTarget);
    test(tag, "naive2",  find_naive2,      data, target, hasTarget);
    test(tag, "skip  ",  find_skip_start,  data, target, hasTarget);
    test(tag, "2xskip",  find_double_skip, data, target, hasTarget);
    test(tag, "nskip ",  find_n_skip,      data, target, hasTarget);
    std::cout << "\n";
}

int main()
{
    srand(std::chrono::system_clock::now().time_since_epoch().count());

    {
        List list({10,11,12,1,2,3,4,5,6,7,8});
        testSet("small     ", list, 4, true);
    }

    {
        List list(1000000);
        testSet("big rand  ", list, *list.random(), true);
        testSet("big nrand ", list, *list.random()-1, false);  // note that  random always is +2 ~ +7 so -1 means i will not be in the data..

        testSet("big early ", list, *(list.begin() + 10), true);    // i hope.. its random so it might not be..
        testSet("big nearly", list, *(list.begin() + 10)-1, false); // check early exit in first half on fail

        testSet("big last  ", list, *(--list.begin()), true);
        testSet("big nlast ", list, -1, false);
    }
}



naive  for small     : pass -- time:3207 nSec
naive2 for small     : pass -- time:2175 nSec
skip   for small     : pass -- time:1996 nSec
2xskip for small     : pass -- time:2151 nSec
nskip  for small     : pass -- time:4232 nSec

naive  for big rand  : pass -- time:14229998 nSec
naive2 for big rand  : pass -- time:13228995 nSec
skip   for big rand  : pass -- time:12786059 nSec
2xskip for big rand  : pass -- time:10874261 nSec
nskip  for big rand  : pass -- time:9004453 nSec

naive  for big nrand : pass -- time:43750054 nSec
naive2 for big nrand : pass -- time:37382249 nSec
skip   for big nrand : pass -- time:17939681 nSec
2xskip for big nrand : pass -- time:15509062 nSec
nskip  for big nrand : pass -- time:12927864 nSec

naive  for big early : pass -- time:738 nSec
naive2 for big early : pass -- time:611 nSec
skip   for big early : pass -- time:632 nSec
2xskip for big early : pass -- time:708 nSec
nskip  for big early : pass -- time:1064 nSec

naive  for big nearly: pass -- time:28980196 nSec
naive2 for big nearly: pass -- time:27551947 nSec
skip   for big nearly: pass -- time:1034 nSec
2xskip for big nearly: pass -- time:719 nSec
nskip  for big nearly: pass -- time:1217 nSec

naive  for big last  : pass -- time:27341796 nSec
naive2 for big last  : pass -- time:27780499 nSec
skip   for big last  : pass -- time:28165415 nSec
2xskip for big last  : pass -- time:24593599 nSec
nskip  for big last  : pass -- time:22455148 nSec

naive  for big nlast : pass -- time:27195820 nSec
naive2 for big nlast : pass -- time:26584976 nSec
skip   for big nlast : pass -- time:8640245 nSec
2xskip for big nlast : pass -- time:7304119 nSec
nskip  for big nlast : pass -- time:6601818 nSec

Tuesday, August 9, 2016

google interview: measure the speed of a context switch

// http://www.impactinterview.com/2009/10/140-google-interview-questions/#software_engineer
// Write a C program which measures the the speed of a context switch on a
// UNIX/Linux system.

// wow.. just wow... the questioner hates you.. so moving on...

// A context switch occurs when the kernel or hardware thinks its time to do
// something else...  you have no control of it..  *period*...  the only true
// way to measure it is to get into the kernel remove *everything* and check it
// directly..  without running any other programs and device drivers etc, i
// mean hell painting the screen, that blinking cursor your looking at while
// typing thats *all* a context switch.

// so write code to measure this.. well.. i dont know the kernel so thats out..
// so i dont have the skills to do the *real* job.. the best i can do is guess..

// basically i can measure the whole process: program executing, ctx switch, do
// something else, ctx switch, complete execution

// so we are going to try to measure something that should have very little 
// variation and should scale well..  ie multiply and accumulate a register.  
// we will do the ops few times and profile them. Then change the total number 
// of times we do it and reprofile..  of course smaller context switches may 
// be getting buried in the testing method etc..  not to mention the timers
// effects etc etc

// what we are looking for is a statistical blip...  the mult accum should just
// shift right when we scale it..  anything that stays still isnt part of our
// test..  its likely to be some part of the kernels context switching or device
// drivers

// Anyway results looked as expected..  the test execution time moved to the
// right when the number of cycles was increased..  there is a set of a large
// reoccurring spikes in the execution times..  but i doubt these are the cost
// of a context switch, more likely some device driver, service, interrupt
// handlers bumping into my execution etc


#include <iostream>
#include <chrono>
#include <map>

int main()
{
    std::map<int,int> counts;
    for (int k = 0; k < 100; ++k)
    {
        std::cout << ".";
        for (int j = 0; j < 1000000; ++j)
        {
            auto start = std::chrono::system_clock::now();
            for (int i = 0; i < 100000; ++i)
            {
                i += i*i;
            }
            auto end = std::chrono::system_clock::now();

            int delta = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();

            // sigh.. crud but will work for now
            if (counts.find(delta) != counts.end())
                ++(counts[delta]);
            else
                counts[delta] = 1;
        }
    }
    std::cout << "\n";

    for (std::map<int,int>::iterator cit = counts.begin();
         cit != counts.end();
         ++cit)
    {
        std::cout << " " << cit->first << ":" << cit->second << "\n";
    }
}

The results copied out into open office and graphed etc. Note that the "fuzz" in the red section maybe significant also.. but im not really a maths guy so I cant be certain, and my use of stats here is really flimsy so grain of salt people... Update: i just noticed that i didnt align the graphs very well(this wasn't intentional) anyway the mistake is a bit disadvantageous to the results so ill leave it be.

Saturday, August 6, 2016

google interview: detect a cycle in a graph

// detect a cycle in a directed graph

#include <iostream>
#include <vector>
#include <unordered_set>
#include <unordered_map>
#include <memory>
#include <algorithm>
#include <functional>
#include <chrono>

struct Node
{
    static int gID;
    int id_;

    std::vector<Node*> kids_;

    Node() : id_(++gID) {}
    void link(Node& kid) { kids_.push_back(&kid); }
};

int Node::gID = 0;

// *************************************************
// *************** PRE BOARD WORK  *****************
// *************************************************
//
//  Basic loop with Node "R" going into unknown "Graph"
//                  +---+    << check for self link node in path
//    ~~~~~~~~~     |   |
//   (         ) <--R<--+
//    ) Graph (     ^
//   (         ) -?-+
//    ~~~~~~~~~
//
// Recursive case for "A" a child Node of "R"
//                  +---+---+    << check for prior node in path
//    ~~~~~~~~~     |   v   |
//   (         ) <--A<--R<--+
//    ) Graph (     ^   ^
//   (         ) -?-+-?-+
//    ~~~~~~~~~
//
// Recursive case for "B" with child Node "A"
//                  +---+---+--+    << check for *ANY* prior node in path
//    ~~~~~~~~~     |   v   v  |
//   (         ) <--B<--A<--R<-+
//    ) Graph (     ^   ^   ^
//   (         ) -?-+-?-+-?-+
//    ~~~~~~~~~
//
// etc
//
// Therefore first algo is to DFS visit all nodes and check if current nodes
// kids loop back into the path of nodes before the current one.

// *************************************************
// ***************** BASIC RECURSE *****************
// *************************************************

bool hasCycle_recurseImpl(Node* v,
                          std::vector<Node*>& path)
{
    // Mark the current node as part of pathway
    path.push_back(v);

    // then for all kids
    for(Node* i : v->kids_)
    {
        // if the kid is path of the path you have a loop done
        std::vector<Node*>::iterator pit =
            std::find(path.begin(), path.end(), i);

        if (pit != path.end())
            return true;

        // recurse into kid
        if (hasCycle_recurseImpl(i, path) )
            return true;
    }

    // remove the node from path
    path.pop_back();
    return false;
}

bool hasCycle_recurse(Node* root)
{
    std::vector<Node*> path;

    if (root == NULL) return false;

    return hasCycle_recurseImpl(root, path);
}


// *************************************************
// ***************** UNORDERED PATH ****************
// *************************************************

// optimization of path - ordering is of the path is *not* important.  As a
// result we can choose a data structure with O(1) search, insert and delete ie
// a hash

bool hasCycle_unordPathImpl(Node* v,
                            std::unordered_set<Node*>& path)
{
    // Mark the current node as part of pathway
    path.insert(v);

    // then for all kids
    for(Node* i : v->kids_)
    {
        // if the kid is path of the path you have a loop done
        if (path.find(i) != path.end())
            return true;

        // recurse into kid
        if (hasCycle_unordPathImpl(i, path) )
            return true;
    }

    // remove the node from path
    path.erase(v);
    return false;
}

bool hasCycle_unordPath(Node* root)
{
    std::unordered_set<Node*> path;

    if (root == NULL) return false;

    return hasCycle_unordPathImpl(root, path);
}

// *************************************************
// ****************** NO REVISIT  ******************
// *************************************************

// optimization of visiting - never revisit a node.  The logic behind this is
// easy but it took me a bit to actually see it.
//
// If you visit a node then the DFS will terminate on 2 possible conditions.
// 1) you found a cycle,
// 2) you exhausted all possible nodes below this one and didn't end up in a
// cycle.
//
// Now here is what took me a bit to get..  If you ever enter a path that forms
// a cycle you have no possible way to exit unless you found end of the cycle
// that means that you can never see a visited node unless it is about to start 
// a cycle or has been exhausted and will *never* form a cycle even if we add to
// the current path. IE if it could have formed a cycle with the current node 
// then it should have traversed into the current node already before it was
// exhausted.

bool hasCycle_1visitImpl(Node* v,
                         std::unordered_set<Node*>& visited,
                         std::unordered_set<Node*>& path)
{
    if(visited.find(v) == visited.end())
    {
        // Mark the current node as visited and part of pathway
        visited.insert(v);
        path.insert(v);

        // then for all kids
        for(Node* i : v->kids_)
        {
            // if the kid is path of the path you have a loop done
            if (path.find(i) != path.end())
                return true;

            // recurse into kid
            if (hasCycle_1visitImpl(i, visited, path) )
                return true;
        }

        // remove the node from path
        path.erase(v);
    }
    return false;
}

bool hasCycle_1visit(Node* root)
{
    std::unordered_set<Node*> visited;
    std::unordered_set<Node*> path;

    if (root == NULL) return false;

    return hasCycle_1visitImpl(root, visited, path);
}

// *************************************************
// *** CALL STACK, PATH and VISIT DUPLICATION  *****
// *************************************************

// call stack, path hash and visit hash optimization -
// the recursive call stack is basically the same as the path stack and the path
// stack is the same as the visit stack all 3 data structures can be made into
// one..  using an iterative approach that stores the parent node to remove call 
// stack, and a stateful hash entry to double as visit and child idx marking which 
// kid is currently in use when in the path

bool hasCycle_iterative(Node* root)
{
    typedef std::pair<Node*,int>  ParentKidIdx;
    std::unordered_map<Node*, ParentKidIdx> path;

    if (root == NULL) return false;

    // note the oddity with the new nodes current kids we start with the end kid
    // this way it naturally ends in a negative idx when kids are exhusted
    path[root] = ParentKidIdx(NULL, root->kids_.size());
    Node* current  = root;

    while (current != NULL)
    {
        std::unordered_map<Node*, ParentKidIdx>::iterator pit
            = path.find(current);

        if (pit == path.end())
            std::cout << "IMPOSSIBLE! we have a node without a state\n";

        // just for easy of undertanding std::pair's member names suck..
        // and as u can see this is a bit confusing
        Node*  parent = pit->second.first;
        int&   kidIdx = pit->second.second;  // note its *writable*

        // find the next kid that that current node should work on ..
        Node* newKid = NULL;
        while (kidIdx >= 0 and newKid == NULL)
        {
            // try to get the next kid
            --kidIdx;
            if (kidIdx >= 0)
            {
                // extract the kid
                newKid = current->kids_[kidIdx];

                // check if the kid has a state already
                std::unordered_map<Node*, ParentKidIdx>::iterator kit
                    = path.find(newKid);


                if (kit != path.end())
                {
                    // child is has aleady been visited but whats its current
                    // state at??
                    if (kit->second.second >= 0)
                        // it is activite and in the current path..so this is a
                        // cycle
                        return true;

                    // it was visited and exhusted..  waste of time move to
                    // next one
                    newKid = NULL;
                }
                else
                {
                    // kid has never been visied before make its init state
                    // entry and then visited it
                    path[newKid] = ParentKidIdx(current,
                                                newKid->kids_.size());
                }
            }
        }

        if (newKid != NULL)
        {
            // we found a new kid to explore.. jump in to it
            current = newKid;
        }
        else
        {
            // we have exhusted all the current ones kids..  move up to the
            // parent
            current = parent;
        }
    }
    return false;
}


// *************************************************
// ******************* TEST ************************
// *************************************************

void testCase(std::string tag,
              std::function<bool(Node*)> hasCycle,
              Node* root,
              bool expected)
{
    auto start = std::chrono::system_clock::now();
    bool result = hasCycle(root);
    auto end   = std::chrono::system_clock::now();

    std::cout << tag << ": " << (expected == result ? "pass" : "FAIL")
              << " -- time:"
              << std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count()
              << " nSec" << std::endl;
}

void testCaseSet(std::string set,
                 Node* root,
                 bool expected)
{
    std::cout << "\n *** " << set << " ***\n";
    testCase("hasCycle_recurse", hasCycle_recurse,       root, expected);
    testCase("hasCycle_unord  ", hasCycle_unordPath,     root, expected);
    testCase("hasCycle_1visit ", hasCycle_1visit,        root, expected);
    testCase("hasCycle_iter   ", hasCycle_iterative,     root, expected);
}

void test()
{
    {
        testCaseSet("null",
                    NULL, false);
    }

    {
        Node a0;
        testCaseSet("single node",
                    &a0, false);
    }

    {
        Node a0;
        a0.link(a0); // cycle
        testCaseSet("single node cycle",
                    &a0, true);
    }

    {
        Node a0;
        Node a1;
        a0.link(a1);
        testCaseSet("two node",
                    &a0, false);
    }

    {
        Node a0;
        Node a1;

        a0.link(a1);
        a0.link(a1);
        testCaseSet("two node - double link",
                    &a0, false);
    }

    {
        Node a0;
        Node a1;

        a0.link(a1);
        a1.link(a0); // cycle
        testCaseSet("two node cycle",
                    &a0, true);
    }

    {
        // visit breaker
        Node a0;
        Node a1;
        Node a2;

        a0.link(a2);
        a0.link(a1);
        a1.link(a2);

        testCaseSet("triple node double end link - reversed",
                    &a0, false);
    }

    {
        // visit breaker
        Node a0;
        Node a1;
        Node a2;

        a0.link(a1);
        a0.link(a2);
        a1.link(a2);

        testCaseSet("triple node double end link",
                    &a0, false);
    }

    {
        Node a0;
        Node a1;
        Node a2;
        Node a3;

        a0.link(a2);
        a0.link(a1);
        a1.link(a2);
        a2.link(a0); // cycle..
        a2.link(a3);
        a3.link(a3); // cycle

        testCaseSet("some random tree",
                    &a0, true);
    }

    {
        // graph speed breaker wide first node with repeated long tail..
        Node head;
        Node chest;
        std::vector<Node> shoulders(100);
        for (int i = 0; i < shoulders.size(); ++i)
        {
            head.link(shoulders[i]);
            shoulders[i].link(chest);
        }

        std::vector<Node> tail(100);
        chest.link(tail[0]);
        for (int i = 1; i < tail.size(); ++i)
        {
            tail[i-1].link(tail[i]);
        }

        testCaseSet("head shoulder tail",
                    &head, false);

        Node looper;
        tail[tail.size()-1].link(looper);
        looper.link(head);

        testCaseSet("head shoulder tail - looped",
                    &head, true);
    }
    std::cout << std::endl;
}

int main()
{
    test();
}

*** null ***
hasCycle_recurse: pass -- time:2574 nSec
hasCycle_unord  : pass -- time:13314 nSec
hasCycle_1visit : pass -- time:4531 nSec
hasCycle_iter   : pass -- time:4097 nSec

 *** single node ***
hasCycle_recurse: pass -- time:4975 nSec
hasCycle_unord  : pass -- time:12330 nSec
hasCycle_1visit : pass -- time:9873 nSec
hasCycle_iter   : pass -- time:9279 nSec

 *** single node cycle ***
hasCycle_recurse: pass -- time:4614 nSec
hasCycle_unord  : pass -- time:8321 nSec
hasCycle_1visit : pass -- time:9863 nSec
hasCycle_iter   : pass -- time:8335 nSec

 *** two node ***
hasCycle_recurse: pass -- time:19714 nSec
hasCycle_unord  : pass -- time:9582 nSec
hasCycle_1visit : pass -- time:13318 nSec
hasCycle_iter   : pass -- time:11426 nSec

 *** two node - double link ***
hasCycle_recurse: pass -- time:5507 nSec
hasCycle_unord  : pass -- time:11714 nSec
hasCycle_1visit : pass -- time:13890 nSec
hasCycle_iter   : pass -- time:11678 nSec

 *** two node cycle ***
hasCycle_recurse: pass -- time:5299 nSec
hasCycle_unord  : pass -- time:7683 nSec
hasCycle_1visit : pass -- time:12348 nSec
hasCycle_iter   : pass -- time:9779 nSec

 *** triple node double end link - reversed ***
hasCycle_recurse: pass -- time:7694 nSec
hasCycle_unord  : pass -- time:13821 nSec
hasCycle_1visit : pass -- time:18194 nSec
hasCycle_iter   : pass -- time:12800 nSec

 *** triple node double end link ***
hasCycle_recurse: pass -- time:7322 nSec
hasCycle_unord  : pass -- time:13999 nSec
hasCycle_1visit : pass -- time:16998 nSec
hasCycle_iter   : pass -- time:12793 nSec

 *** some random tree ***
hasCycle_recurse: pass -- time:4840 nSec
hasCycle_unord  : pass -- time:7792 nSec
hasCycle_1visit : pass -- time:11853 nSec
hasCycle_iter   : pass -- time:15752 nSec

 *** head shoulder tail ***
hasCycle_recurse: pass -- time:18295450 nSec
hasCycle_unord  : pass -- time:14848133 nSec
hasCycle_1visit : pass -- time:615861 nSec
hasCycle_iter   : pass -- time:512595 nSec

 *** head shoulder tail - looped ***
hasCycle_recurse: pass -- time:131233 nSec
hasCycle_unord  : pass -- time:136192 nSec
hasCycle_1visit : pass -- time:242850 nSec
hasCycle_iter   : pass -- time:176559 nSec

Wednesday, August 3, 2016

Google interview: Implement an AVL tree ...

Seems to me that google is most likely to ask about Tree inserts over erase. Also they might ask for traversal, infix printing etc etc... basically the cheapest way to do this is just code the whole AVL tree eco system period...

AVL trees look complex but they basically are a BST tree with an additional pair of rebalancing rotations that you apply to them after an insert or erase.. this makes them somewhat close to the theoretical log(n) depth of a well balanced BST tree...

There are 2 rotate operations that are depicted as follows. they are reversed depending of the side that are viewing them from (some people call them 4 with i think just over complicates them), personally i know them as inside and outside rotates, you can tell by looking at the position of the nodes grand child.. if its under the grandparent its inside, otherwise its outside.

a) grandchild-outside-child (left side)
   T1, T2, T3 and T4 are subtrees. 
          z                            y 
         / \                         /   \
        y   T4  Right Rotate (z)    x      z
       / \      --------------->  /  \    /  \ 
      x   T3                     T1  T2  T3  T4
     / \
   T1   T2

   b) grandchild-inside-child (left-side)    
        z                            z                            x
       / \                         /   \                        /   \ 
      y   T4  Left Rotate (y)     x    T4   Right Rotate(z)   y      z
     / \      -------------->    /  \       -------------->  / \    / \
   T1   x                       y    T3                     T1  T2 T3  T4
       / \                     / \ 
     T2   T3                 T1   T2

The trick to knowing when to perform the rebalance ops is to check the height balance between the nodes left and right side of the current one. If the height balance is off by more than +/- 1 you need to rotate, the sign tells you the direction. Then to determine if its the inner or outer case you check the balance of the child node.. as you can see from the diagram above the outer case should already have an inbalance in the *same* direction(sign) as the parents inbalance if it does not then you have the inner case and need to adjust it to make the other case before doing the rotate to correct..

#include <stdint.h>
#include <vector>
#include <algorithm>
#include <iostream>
#include <functional>

template <typename T>
class AvlTree
{
public:
    // late binding via constructor - dont pollute type
    typedef std::function<bool(const T&, const T&)> Comparer;

private:
    struct Node
    {
        uint32_t height_;
        Node*    left_;
        Node*    right_;
        T        item_;

        Node(const T& item) :
            height_(1),
            left_(NULL),
            right_(NULL),
            item_(item)
        {}

        ~Node()
        {
            if (left_  != NULL) delete left_;
            if (right_ != NULL) delete right_;
        }

        int height() const
        {
            int leftHeight  = (left_  != NULL) ? left_->height_  : 0;
            int rightHeight = (right_ != NULL) ? right_->height_ : 0;

            return std::max(leftHeight, rightHeight) + 1;
        }

        int balance() const
        {
            int leftHeight  = (left_  != NULL) ? left_->height_  : 0;
            int rightHeight = (right_ != NULL) ? right_->height_ : 0;

            return leftHeight - rightHeight;
        }

    };
public:
    class Iterator
    {
        // infix DFS reshaped into stack for an iterator.. fun but crazy...
        //
        // Ok..  So i got creative here..  rather than use parent pointers in the
        // tree I implemented the iterator as a DFS parent stack..  now you do
        // *not* want to do this in production because its actually a lot more
        // fragile than the parent pointer solution..
        //
        // consider the following trade offs
        // - the find operation needs to create the stack which is costly and
        //  iterator returned from the find will often *not* move making it
        //  needless
        // - the iterator is less robust to tree updates.  with a parent
        //  pointer the iterator just points at one node so changing anything that
        //  is not the current node is generally ok..  However with the stack
        //  implementation any update that effects the parent chain breaks the
        //  iterators stack and makes a large issue

        // TODO can a bi-directional version be made??...

        typedef std::vector<Node*> Stack;

        Stack stack_;

        void enstackLeft(Node* node);
        void next();

    public:
        Iterator(Node* root);   // not publicily usable because Node is not public!
        Iterator();
        Iterator& operator++();
        void      operator++(int);
        const T&  operator*() const;
        const T*  operator->() const;

        T&  operator*() ;
        T*  operator->();

        int height() const; // yep mostly for testing

        bool  operator==(const Iterator& other) const;
        bool  operator!=(const Iterator& other) const;
    };

    class Handle
    {
        // a light weight *immovable* iterator .. so *not* an iterator
        // TODO it *is* possible to lazy covert to a real iterator if ++'ed
        //  but whatever the stacked based iterator is crazy
        Node* node_;
    public:
        Handle() :
            node_(NULL)
        {}

        Handle(Node* node) :
            node_(node)
        {}

        const T& operator*()  const { return node_->item_; }
              T& operator*()        { return node_->item_; }

        const T* operator->() const { return &(node_->item_); }
              T* operator->()       { return &(node_->item_); }

        operator bool () const      { return node_ != NULL; }
    };

private:
    Node*   root_;
    Comparer cmp_;

    // TODO compilation firewalling issues...
    //  ie can this be converted to a base Node without T and a factory to
    //  create and handle Nodes with T.

    void rotateLeft (Node*& node);
    void rotateRight(Node*& node);
    void rebalance  (Node*& node);

    void insertImpl (const T& item,
                     Node*& node);

    void replacementEraseRight(Node*& node,
                               Node*& other);

    void eraseImpl(const T& item,
                   Node*& node);

public:
    AvlTree() :
        root_(NULL),
        cmp_(std::less<T>())
    {}

    AvlTree(Comparer cmp) :
        root_(NULL),
        cmp_(cmp)
    {}

    ~AvlTree()
    {
        if (root_ != NULL) delete root_;
    }

    void insert(const T& item);
    void erase(const T& item);
    Handle find(const T& item);
    void print(std::ostream& os);

    Iterator begin();
    Iterator end();
};

// *************************************************
// ******************** ITERATOR *******************
// *************************************************

template <typename T>
AvlTree<T>::Iterator::Iterator() :
    stack_()
{}

template <typename T>
AvlTree<T>::Iterator::Iterator(AvlTree<T>::Node* root) :
    stack_()
{
    //when we add new nodes we go down the left side and enstack then
    enstackLeft(root);
}

template <typename T>
void AvlTree<T>::Iterator::enstackLeft(typename AvlTree<T>::Node* node)
{
    while (node != NULL)
    {
        stack_.push_back(node);
        node = node->left_;
    }
}

template <typename T>
void AvlTree<T>::Iterator::next()
{
    // node has just completed left side and self visit.. remove me from the stack
    Node* current = stack_.back();
    stack_.pop_back();

    // and enstack add my right branch
    enstackLeft(current->right_);
}

template <typename T>
const T& AvlTree<T>::Iterator::operator*() const
{
    return stack_.back()->item_;
}

template <typename T>
T& AvlTree<T>::Iterator::operator*()
{
    return stack_.back()->item_;
}

template <typename T>
const T* AvlTree<T>::Iterator::operator->() const
{
    return &(stack_.back()->item_);
}

template <typename T>
T* AvlTree<T>::Iterator::operator->()
{
    return &(stack_.back()->item_);
}

template <typename T>
int AvlTree<T>::Iterator::height() const
{
    return stack_.back()->height_;
}


template <typename T>
typename AvlTree<T>::Iterator& AvlTree<T>::Iterator::operator++()
{
    next();
    return *this;
}

// NOTE void on x++ to termiate the post iterator junk
template <typename T>
void AvlTree<T>::Iterator::operator++(int)
{
    // Node* prev = stack_.back();
    next();
    // return ...;
}

template <typename T>
bool AvlTree<T>::Iterator::operator==(const AvlTree<T>::Iterator& other) const
{
    // TODO short cut the last item?? would be quicker
    return other.stack_ == stack_;
}

template <typename T>
bool AvlTree<T>::Iterator::operator!=(const AvlTree<T>::Iterator& other) const
{
    // TODO short cut the last item?? would be quicker
    return not (other == *this);
}

template <typename T>
typename AvlTree<T>::Iterator AvlTree<T>::begin()
{
    return Iterator(root_);
}

template <typename T>
typename AvlTree<T>::Iterator AvlTree<T>::end()
{
    return Iterator();
}

// *************************************************
// ********************** FIND *********************
// *************************************************

// this is a bit of a problem using stack based iterators makes find
// more weighty using nodes exposes the internals
// lightwieght_iterator?? one that cant move but gives you access.. a "handle"
template <typename T>
typename AvlTree<T>::Handle AvlTree<T>::find(const T& item)
{
    Node* node = root_;
    while (node != NULL)
    {
        if      (cmp_(item, node->item_))   // less *than*
        {
            node = node->left_;
        }
        else if (cmp_(node->item_, item))   // greater *than*
        {
            node = node->right_;
        }
        else
        {
            return typename AvlTree<T>::Handle(node);
        }
    }
    return typename AvlTree<T>::Handle();
}

// *************************************************
// ********************** PRINT ********************
// *************************************************

template <typename T>
void AvlTree<T>::print(std::ostream& os)
{
    for (AvlTree<uint32_t>::Iterator it = begin();
         it != end();
         ++it)
    {
        os << *it << "(" << it.height()  << ")" << " ";
    }
}

template <typename T>
std::ostream& operator<<(std::ostream& os, AvlTree<T>& tree)
{
    tree.print(os);
    return os;
}

// *************************************************
// ******************** REBALANCE ******************
// *************************************************

template <typename T>
void AvlTree<T>::rotateLeft(typename AvlTree<T>::Node*& node)
{
    // given the current node make its left the top
    Node* right  = node->right_;
    if (right == NULL) return;  // this will explode.. stop!

    node->right_ = right->left_;
    right->left_  = node;

    // correct height.. *before* adjusting parents link (node)
    node->height_  = node->height();
    right->height_ = right->height();

    // keep in mind that this is using Node*& so the following updates the
    // nodes parent left/right or the root!
    node = right;
}

template <typename T>
void AvlTree<T>::rotateRight(typename AvlTree<T>::Node*& node)
{
    // given the current node make its left the top
    Node* left  = node->left_;
    if (left == NULL) return; // this will explode.. stop!

    node->left_ = left->right_;
    left->right_ = node;

    // correct height,,, *before* adjusting parents link (node)
    node->height_ = node->height();
    left->height_ = left->height();

    // keep in mind that this is using Node*& so the following updates the
    // nodes parent left/right or the root!
    node = left;
}

// attempt to rebalance in a general way for both insert delete..
template <typename T>
void AvlTree<T>::rebalance(typename AvlTree<T>::Node*& node)
{
    if (node == NULL) return;

    // correct node height..
    node->height_ = node->height();

    // and compute AVL rotations
    int balance = node->balance();

    // done if balance is good...
    if (balance > -2 and balance < 2)  return;

    Node* child = NULL;
    if (balance < -1)
    {
        // right side is heavy
        child = node->right_;
    }
    else if (balance > 1)
    {
        // left side is heavy
        child = node->left_;
    }

    int childBalance = child->balance();

    // std::cout << "   ++ childBalance: " << childBalance << "\n";

    // now rotate to fix inbalance
    if (balance > 0)
    {
        // planning to rotate right..
        if (childBalance <= 0)
            //but if the inner child left heavy or balanced correct it
            rotateLeft(node->left_);

        // left side so rotate right
        rotateRight(node);
    }
    else
    {
        // planning to rotate left..
        if (childBalance >= 0)
            //but if the inner child right heavy or balanced correct it
            rotateRight(node->right_);

        rotateLeft(node);
    }
}

// *************************************************
// ********************* INSERT ********************
// *************************************************

template <typename T>
void AvlTree<T>::insertImpl(const T& item,
                            typename AvlTree<T>::Node*& node)
{
    // BST tree insert
    if (node == NULL)
    {
        node = new Node(item);
        return;
    }

    if (cmp_(item, node->item_))
    {
        insertImpl(item, node->left_);
    }
    else
    {
        insertImpl(item, node->right_);
    }

    // now AVL tree rebalance..
    rebalance(node);
}

template <typename T>
void AvlTree<T>::insert(const T& item)
{
    insertImpl(item, root_);
}

// *************************************************
// ********************* ERASE *********************
// *************************************************

template <typename T>
void AvlTree<T>::replacementEraseRight(typename AvlTree<T>::Node*& node,
                                       typename AvlTree<T>::Node*& other)
{
    // we are no longer searching for the erase target..
    // we are now searching for the next object after it
    // the right side min

    if (other->left_ != NULL)
    {
        // go right to the end of the left branching and do replacement erase
        replacementEraseRight(node,other->left_);

        // then rebalance the tree from here up
        rebalance(other);
    }
    else
    {
        // swap replacement with target node so nothing odd happens
        std::swap(node->item_, other->item_);

        // now unlink and erase other from tree
        Node* right = other->right_;
        other->right_ = NULL;
        delete other;

        // relink others child in its place
        other = right;
    }
}

template <typename T>
void AvlTree<T>::eraseImpl(const T& item,
                           typename AvlTree<T>::Node*& node)
{
    // BST tree erase
    // didnt exist in the tree??
    if (node == NULL) return;

    //
    if (cmp_(item, node->item_))          // a < b
    {
        eraseImpl(item, node->left_);
    }
    else if (cmp_(node->item_, item))     // b < a
    {
        eraseImpl(item, node->right_);
    }
    else                                  // b == a
    {
        // match!
        if ((node->right_ == NULL) or (node->right_ == NULL))
        {
            // non special case.. zero/single link can link with parent
            Node* child = (node->right_ == NULL) ? node->left_ : node->right_;

            // unlink node.. and kill it
            node->right_ = NULL;
            node->left_ = NULL;
            delete node;

            // replace with child
            node = child;
        }
        else
        {
            // special case both right and left have stuff.. but we have 1 parent
            // means we have to take a one of the closet (in value) nodes and replace this one
            // we have 2 choices for the replacement the max of left or min of right...
            replacementEraseRight(node, node->right_);

            // TODO.. shouldn't this do a balance check and then go left or right based on the heavy branch??
        }
    }

    // and then AVL tree rebalance..
    rebalance(node);
}

template <typename T>
void AvlTree<T>::erase(const T& item)
{
    eraseImpl(item, root_);
}

// *************************************************
// ********************* AVL MAP *******************
// *************************************************

// TODO cleanup AvlTree to AvlMap coverison class
template <typename Key,
          typename Value>
class AvlMap
{
    typedef std::pair<Key,Value> Pair;
    typedef std::function<bool(const Pair&, const Pair&)> Comparer;
    typedef AvlTree<Pair> Tree;

    AvlTree<Pair> tree_;
public:
    typedef typename Tree::Iterator Iterator;

    AvlMap() :
        tree_([] (const Pair& v1, const Pair& v2) { return v1.first < v2.first; })
    {}

    Value& operator[](const Key& key)
    {
        // hmm not such a great implementation...
        Pair target(key,Value());
        typename AvlTree<Pair>::Handle it = tree_.find(target);
        if (it) return it->second;

        tree_.insert(target);
        return tree_.find(target)->second;
    }

    Iterator begin() { return tree_.begin(); }
    Iterator end()   { return tree_.end();   }
};

// *************************************************
// *********************** TEST ********************
// *************************************************

void test()
{
    // TODO honestly this inst real testing.. need to make it check the results..
    {
        AvlTree<uint32_t> tree;

        tree.begin();
        tree.end();

        tree.find(1);
    }

    {
        AvlTree<uint32_t> tree;

        tree.insert(50); std::cout << tree << "\n";
        tree.insert(25); std::cout << tree << "\n";
        tree.insert(10); std::cout << tree << "\n";
        tree.insert(40); std::cout << tree << "\n";
        tree.insert(30); std::cout << tree << "\n";
        tree.insert(45); std::cout << tree << "\n";
        tree.insert(75); std::cout << tree << "\n";

        AvlTree<uint32_t>::Handle handle = tree.find(75);
        std::cout << "find:" << *handle << "\n";

        tree.erase(50); std::cout << tree << "\n";
        tree.erase(25); std::cout << tree << "\n";
        tree.erase(10); std::cout << tree << "\n";
        tree.erase(40); std::cout << tree << "\n";
        tree.erase(30); std::cout << tree << "\n";
        tree.erase(45); std::cout << tree << "\n";
        tree.erase(75); std::cout << tree << "\n";
    }

    // outer rotate check
    {
        AvlTree<uint32_t> tree;

        tree.insert(15); std::cout << tree << "\n";
        tree.insert(77); std::cout << tree << "\n";
        tree.insert(35); std::cout << tree << "\n";

        // pair erase check
        tree.erase( 1); std::cout << tree << "\n";
        tree.erase(35); std::cout << tree << "\n";
        tree.erase(15); std::cout << tree << "\n";
        tree.erase(77); std::cout << tree << "\n";
        tree.erase( 1); std::cout << tree << "\n";
    }

    {
        AvlTree<uint32_t> tree;

        tree.insert(93); std::cout << tree << "\n";
        tree.insert(86); std::cout << tree << "\n";
        tree.insert(87); std::cout << tree << "\n";

        // pair erase check
        tree.erase(99); std::cout << tree << "\n";
        tree.erase(86); std::cout << tree << "\n";
        tree.erase(87); std::cout << tree << "\n";
        tree.erase(93); std::cout << tree << "\n";
        tree.erase(99); std::cout << tree << "\n";
    }
}

void randomTreeTest()
{
    AvlTree<uint32_t> tree;

    for (int i = 0; i < (4096-1); ++i)
    {
        uint32_t value = std::rand() % 100;
        // std::cout << value << " ";
        tree.insert(value);
    }
    std::cout << "\n";

    // frequency count the nodes at each layer..
    // thought it was ironic to use AvlTree as the map also.. just to prove that its flexible
    AvlMap<int,int> freq;
    for (AvlTree<uint32_t>::Iterator it = tree.begin();
         it != tree.end();
         ++it)
    {
        ++(freq[it.height()]);
    }
    std::cout << "\n";

    // dump it
    for (AvlMap<int,int>::Iterator it = freq.begin();
         it != freq.end();
         ++it)
    {
        std::cout << it->first << ":" << it->second  << " ";
    }
    std::cout << "\n";
}


int main()
{
    test();

    randomTreeTest();
}



And the output looks like:
50(1) 
25(1) 50(2) 
10(1) 25(2) 50(1) 
10(1) 25(3) 40(1) 50(2) 
10(1) 25(3) 30(1) 40(2) 50(1) 
10(1) 25(2) 30(1) 40(3) 45(1) 50(2) 
10(1) 25(2) 30(1) 40(3) 45(1) 50(2) 75(1) 
find:75
10(1) 25(2) 30(1) 40(3) 45(1) 75(2) 
10(1) 30(2) 40(3) 45(1) 75(2) 
30(1) 40(3) 45(1) 75(2) 
30(1) 45(2) 75(1) 
45(2) 75(1) 
75(1) 

15(1) 
15(2) 77(1) 
15(1) 35(2) 77(1) 
15(1) 35(2) 77(1) 
15(1) 77(2) 
77(1) 


93(1) 
86(1) 93(2) 
86(1) 87(2) 93(1) 
86(1) 87(2) 93(1) 
87(2) 93(1) 
93(1) 




1:1992 2:995 3:518 4:266 5:129 6:90 7:45 8:26 9:16 10:8 11:5 12:2 13:2 14:1 

Monday, July 25, 2016

google interview: compute the mininual coins - asymptotic constant version

I have cracked the asymptotically constant version of the min coin counts algorithm ..

// http://www.impactinterview.com/2009/10/140-google-interview-questions/#software_engineer
//
// Given a set of coin denominators, find the minimum number of coins to give a
// certain amount of change.

#include <cstdint>
#include <climits>
#include <vector>
#include <iostream>
#include <functional>
#include <chrono>
#include <memory>
#include <list>
#include <queue>

// I knew there was an asymptotically constant version of algorithm i just cracked
// it using top down dynamic programming combined squeeze theorem (the same way
// that Archimedes found pi)
//
// https://en.wikipedia.org/wiki/Squeeze_theorem
// https://betterexplained.com/articles/prehistoric-calculus-discovering-pi/

// how it works:

// First off we are going to search with a dynamic programming version of the
// depth first search.  We are searching by choosing a count for a coin
// denomination and then moving to the next denomination searching all the way
// down this branch until we exhaust it.  Since the algorithm is a dynamic
// version of DFS it means we are using a stack, not recursion.

// Also note that we are using squeeze theorem that means we need to track an
// upperBoound(the know best) and a lowerBound the theoretical floor that an
// unknown branch might achieve

// So by using DFS we can get down to the last coin denomination and get a
// solid result quickly and that potentially tightens the upperBound more.  As
// the upperbound tightens we are able to trim the search tree more, and as a
// result speed up the overall search more.

// 1) first we set the best and the initial upperBounds to the max number of
// coins possible + 1.  The upperbounds is the same as our best known min coins
// count.  which at the start inst anything.  Remember when the upperBounds on
// coin count is exceed the search will stop looking at that branch as a
// possible answer

// 2) Next initialize the stack.  We only stack partial computation steps.
// The first item to add is the max coins we could have for the largest coin,
// later we will decrement from this until we reach zero
//
// 3) Now start the iteration loop.  Pop off the current working item (note the
// actual algorithm is optimized and only pops from the stack if it really has
// finished that denomination).  There are 2 updates to make to the stack
//
// 3a) make a copy of the current item and reduce the coin count in it for the
// current denomination then compute the theoretical lowerBound for the
// remaining largest denomination of coins (ie compute the max coins for
// highest remaining denomination excluding current denomination and better), if
// the theoretical lowerBound of coins that have to be used plus the current
// used coins is greater than the upperBound prune the entire branch..  it can
// not possibly bet the current known min.  But if it is better than or equal
// we should push that back onto the stack as it may bet the current best
//
// 3b) Then make another copy of the current item as the next item.  moving to
// the next highest coin denomination (excluding to ones which we have already
// chosen coins for) and compute the max coins that it can use and add it to a
// next item.  Then check if its a terminal case (all coins used) if it inst
// terminal push that to the stack (it will pop off the stack as the next
// current) if it is the terminal then compare it to the upperBound if it can
// bet that update the best min and the upperBound
//
// repeat until the stack runs out.. then return the best min,

// So lets get started note i have the greedy and bot up with reach pruning
// versions for comparison
// A greedy implementation - a quick greedy way to the answer is to maximize
// the largest coin
//
// Note this can give the *wrong* answer
// Note expects reverse sorted coins (largest to smallest in denom)
std::vector<uint32_t> minCoins_greedy(uint32_t total,
                                      const std::vector<uint32_t>& denom)
{
    std::vector<uint32_t> res(denom.size()+1,0);

    if (total == 0) return res;

    for(int i = 0; i < denom.size(); ++i)
    {
        uint32_t count = total / denom[i];
        total -= (count * denom[i]);

        res[i] = count;
        res[denom.size()] += count;
    }

    if (total != 0) res[denom.size()] = INT_MAX;

    return res;
}

// ok so we can improve the bottom up DP version and shave a bit more time by
// visiting items in the order of current coin count as a result we will create
// a BFS style pruned search
//
// Note expects reverse sorted coins (largest to smallest in denom)
std::vector<uint32_t> minCoins_botUpReach(uint32_t total,
                                          const std::vector<uint32_t>& denom)
{
    // we are dividing by making the full decistion for each coin and then removing it from the list
    if (total == 0) return std::vector<uint32_t>(denom.size()+1, 0);

    // setup memory
    std::vector< std::vector<uint32_t> > cache(total + 1, std::vector<uint32_t>(denom.size()+1, 0));
    std::list<uint32_t> queue;

    queue.push_back(0);
    while(not queue.empty())
    {
        uint32_t curTotal = queue.front();
        queue.pop_front();

        for(int i = 0; i < denom.size(); ++i)
        {
            uint32_t newTotal = curTotal + denom[i];

            if (newTotal <= total and
                cache[newTotal][denom.size()] == 0)
            {
                // ok its empty
                cache[newTotal] = cache[curTotal];
                ++(cache[newTotal][i]);
                ++(cache[newTotal][denom.size()]);

                if (newTotal == total)
                    return cache[total];

                queue.push_back(newTotal);
            }
        }
    }

    // impossible case
    cache[0][denom.size()] = INT_MAX;
    return cache[0];
}

// ok another idea we can reformulation top down upperBound and lowerBound
// squeeze tree pruning. greedy initialization of the upperbound
//
// Note expects reverse sorted coins (largest to smallest in denom)
std::vector<uint32_t> minCoins_topSqueeze(uint32_t total,
                                          const std::vector<uint32_t>& denom)
{
    const int LastIdx       = denom.size() - 1;
    const int TotalCoinsIdx = denom.size();
    const int RemTotalIdx   = denom.size() + 1;
    const int WorkingIdx    = denom.size() + 2;
    const int Size          = denom.size() + 3;

    // prune the total of 0 corner case
    if (total == 0) return std::vector<uint32_t>(denom.size()+1, 0);

    std::vector<uint32_t> best(Size);
    best[TotalCoinsIdx] = INT_MAX;

    // remove 1 coin corner case
    if (denom.size() < 2)
    {
        if (denom.size() == 1)
        {
            best[0] = total / denom[0];
            best[TotalCoinsIdx] = (best[0] + denom[0]) == total ? best[0] : INT_MAX;
        }
        return best;
    }

    // whats my best guess min (upperbounds)
    // dont use INT_MAX we are doing maths on it (make it overflowed max)
    uint32_t upperBounds = total + 1;

    // since we move through the denom vector its size is also the upper limit to 
    // the stack size 
    std::vector< std::vector<uint32_t> > stack(denom.size(), std::vector<uint32_t>(Size));
    int stackTopIdx = 0;

    // compute the max coins for the first layer thats the starting point
    stack[0][0]             = total / denom[0];
    stack[0][TotalCoinsIdx] = stack[0][0];                     // total coin count
    stack[0][RemTotalIdx]   = total - (stack[0][0]*denom[0]);  // remaining total
    stack[0][WorkingIdx]    = 0;                               // denom working offset

    while (stackTopIdx >= 0)
    {
        if (stackTopIdx >= stack.size()) std::cout << "Stack size assumption failed!\n";

        std::vector<uint32_t>& current = stack[stackTopIdx];

        uint32_t workingIdx = current[WorkingIdx];

        // first generate the current coins reduction case for this level
        if (current[workingIdx] > 0)    // we have coin left in this layer
        {
            // compute is the absolute lower bonds the next coin level..
            uint32_t nextCoinsLowerBounds
                = current[RemTotalIdx] / denom[workingIdx+1];

            // can this new count and the lower bounds of the next level of coins
            // possibly bet the curent upper bounds?
            if (current[TotalCoinsIdx]-1 + nextCoinsLowerBounds <= upperBounds)
            {
                // update the current top but first push a copy ahead of it
                // for the next level of coins computation.
                stack[stackTopIdx+1] = current;

                // ok so remove one of current levels coins
                current[workingIdx]    -= 1;
                current[TotalCoinsIdx] -= 1;
                current[RemTotalIdx]   += denom[workingIdx];

                // move stack forward
                ++stackTopIdx;
            }
        }

        // now generate the max case for the next level
        ++workingIdx;

        std::vector<uint32_t>& next = stack[stackTopIdx];

        // compute the max coins we can use for it and queue that
        next[workingIdx]     = next[RemTotalIdx] / denom[workingIdx];
        next[TotalCoinsIdx] += next[workingIdx];
        next[RemTotalIdx]   -= denom[workingIdx] * next[workingIdx];
        next[WorkingIdx]     = workingIdx;

        // check if this result is a terminal of the search
        if (next[RemTotalIdx] == 0)
        {
            // it was the end and solves the problem
            // remove it from the stack
            --stackTopIdx;

            // check if it bets the best
            if (upperBounds > next[TotalCoinsIdx])
            {
                best = next;
                upperBounds = best[TotalCoinsIdx];
            }
        }
        else if(workingIdx == LastIdx)  // because it can fail!
        {
            // its on the last coin and didnt correctly match totals.  So its a
            // broken version. Remove it.
            --stackTopIdx;
        }
    }

    return best;
}

void test(const char* label,
          std::function<std::vector<uint32_t> (uint32_t, const std::vector<uint32_t>&)> func,
          uint32_t n,
          const std::vector<uint32_t>& denom)
{
    auto start = std::chrono::system_clock::now();
    std::vector<uint32_t> res = func(n, denom);
    auto end = std::chrono::system_clock::now();

    std::cout << label << ": result:";
    for (int i = 0; i < denom.size(); ++i)
        std::cout << denom[i] << "x" << res[i] << " ";
    std::cout << " count:" << res[denom.size()] << "\n";

    std::cout << " --  time:"
              << std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count()
              << " nSec\n";
}

void testSet(uint32_t n,
             const std::initializer_list<uint32_t>& dlist)
{
    std::vector<uint32_t> denom(dlist);

    std::cout << "\n*** test set for " << n << " *** \n";
    if (n < 2001)  test("botUpReach", minCoins_botUpReach ,n, denom);
    else           std::cout << "skipped botUpReach\n";

    test("topSqueeze", minCoins_topSqueeze, n, denom);
}

int main()
{
    // breakers
    testSet(0,   {20,15,5,4,1});
    testSet(1,   {2});

    testSet(23,  {20,15,5,4,1}); // attack greedy
    testSet(31,  {20,15,5,4,1}); // attack greedy
    testSet(43, {20,15,5,4,1}); // attack greedy
    testSet(46,  {20,15,5,4,1}); // attack greedy

    testSet(100, {20,15,5,4,1}); // attack bottom up
    testSet(1000, {20,15,5,4,1});

    testSet(1000, {13,7,1});
    testSet(2000, {20,15,5,4,1});

    testSet(10000, {20,15,5,4,1});

    testSet(100000000, {20,15,5,4,1});
    testSet(100000023, {20,15,5,4,1});
    testSet(100000031, {20,15,5,4,1});
    testSet(100000043, {20,15,5,4,1});
    testSet(100000046, {20,15,5,4,1});
    testSet(100000100, {20,15,5,4,1});
}


*** test set for 0 *** 
botUpReach: result:20x0 15x0 5x0 4x0 1x0  count:0
 --  time:4100 nSec
topSqueeze: result:20x0 15x0 5x0 4x0 1x0  count:0
 --  time:1614 nSec

*** test set for 1 *** 
botUpReach: result:2x0  count:2147483647
 --  time:14201 nSec
topSqueeze: result:2x0  count:2147483647
 --  time:2656 nSec

*** test set for 23 *** 
botUpReach: result:20x0 15x1 5x0 4x2 1x0  count:3
 --  time:49285 nSec
topSqueeze: result:20x0 15x1 5x0 4x2 1x0  count:3
 --  time:17680 nSec

*** test set for 31 *** 
botUpReach: result:20x0 15x2 5x0 4x0 1x1  count:3
 --  time:60896 nSec
topSqueeze: result:20x0 15x2 5x0 4x0 1x1  count:3
 --  time:21468 nSec

*** test set for 43 *** 
botUpReach: result:20x1 15x1 5x0 4x2 1x0  count:4
 --  time:96826 nSec
topSqueeze: result:20x1 15x1 5x0 4x2 1x0  count:4
 --  time:21117 nSec

*** test set for 46 *** 
botUpReach: result:20x2 15x0 5x1 4x0 1x1  count:4
 --  time:99908 nSec
topSqueeze: result:20x2 15x0 5x1 4x0 1x1  count:4
 --  time:26414 nSec

*** test set for 100 *** 
botUpReach: result:20x5 15x0 5x0 4x0 1x0  count:5
 --  time:187759 nSec
topSqueeze: result:20x5 15x0 5x0 4x0 1x0  count:5
 --  time:15795 nSec

*** test set for 1000 *** 
botUpReach: result:20x50 15x0 5x0 4x0 1x0  count:50
 --  time:2350739 nSec
topSqueeze: result:20x50 15x0 5x0 4x0 1x0  count:50
 --  time:18175 nSec

*** test set for 1000 *** 
botUpReach: result:13x76 7x1 1x5  count:82
 --  time:2124334 nSec
topSqueeze: result:13x76 7x1 1x5  count:82
 --  time:19523 nSec

*** test set for 2000 *** 
botUpReach: result:20x100 15x0 5x0 4x0 1x0  count:100
 --  time:4793008 nSec
topSqueeze: result:20x100 15x0 5x0 4x0 1x0  count:100
 --  time:21261 nSec

*** test set for 10000 *** 
skipped botUpReach
topSqueeze: result:20x500 15x0 5x0 4x0 1x0  count:500
 --  time:22749 nSec

*** test set for 100000000 *** 
skipped botUpReach
topSqueeze: result:20x5000000 15x0 5x0 4x0 1x0  count:5000000
 --  time:23337 nSec

*** test set for 100000023 *** 
skipped botUpReach
topSqueeze: result:20x5000000 15x1 5x0 4x2 1x0  count:5000003
 --  time:65017 nSec

*** test set for 100000031 *** 
skipped botUpReach
topSqueeze: result:20x5000000 15x2 5x0 4x0 1x1  count:5000003
 --  time:50749 nSec

*** test set for 100000043 *** 
skipped botUpReach
topSqueeze: result:20x5000001 15x1 5x0 4x2 1x0  count:5000004
 --  time:42286 nSec

*** test set for 100000046 *** 
skipped botUpReach
topSqueeze: result:20x5000002 15x0 5x1 4x0 1x1  count:5000004
 --  time:44130 nSec

*** test set for 100000100 *** 
skipped botUpReach
topSqueeze: result:20x5000005 15x0 5x0 4x0 1x0  count:5000005
 --  time:14530 nSec

Sunday, July 24, 2016

google interview: compute the minual coins

Over all the result of this one is workable but I expect that there should be a way equal the greedy's performance in the asymptotic, because the fine details of least significant numbers have stop changing once you get far enough out of range of the largest coin.. hence it seems very plausible there is is a better algorithm larger numbers... I just dont have the time to work it out, maybe something that greedy guesses the largest coin value to some limit and then does the real search for the rest..

Update: I have just found some more time and added one more improvement that cuts of more time (bottom up to reachable only) .. i had another improvement in mind (reachable + astar distance until end via max usable coin).. BUT this had 2 issues the astar is a greedy so not perfect and the use of a priority queue added significant overhead, destroying all benefit of it anyway..

Update2: I cracked the asymptotic constant version.. kicks these out of the room.. check out http://code-slim-jim.blogspot.com/2016/07/google-interview-compute-mininual-coins.html (next post after this one)


// http://www.impactinterview.com/2009/10/140-google-interview-questions/#software_engineer
//
// Given a set of coin denominators, find the minimum number of coins to give a
// certain amount of change.

#include <cstdint>
#include <climits>
#include <vector>
#include <iostream>
#include <functional>
#include <chrono>
#include <memory>
#include <list>

// This question is quite interesting, even though it simple to ask the range
// of potential solutions is wide as far as i see it you have 2 main choices:
// 1) you can treat it as a graph and perform BFS, astar, etc to search on it
// 2) you can treat it as a recursion and dynamic programming question

// Given that this can form a tree you can also use greedy search to find a
// solution.  BUT since you are searching for the *minimal* length the result
// from greedy is likely to be sub optimal.

// Also note that we can have multiple solutions to the problem depending on
// value and denominations 6 => 1+5 = 2+4 = ...  etc
// Its also possible to have no solutions (for example exclude 1 from the denominations)

// So lets get started
// A greedy implementation - a quick greedy way to the answer is to maximize
// the largest coin
//
// Note this can give the *wrong* answer
// Note expects reverse sorted coins (largest to smallest)
std::vector<uint32_t> minCoins_greedy(uint32_t total,
                                      const std::vector<uint32_t>& denom)
{
    std::vector<uint32_t> res(denom.size()+1,0);

    if (total == 0) return res;

    for(int i = 0; i < denom.size(); ++i)
    {
        uint32_t count = total / denom[i];
        total -= (count * denom[i]);

        res[i] = count;
        res[denom.size()] += count;
    }

    if (total != 0) res[denom.size()] = INT_MAX;

    return res;
}

// The recursive implementation - simply walk the entire tree of possibilities
// First we need to formulate the induction.  We will add one coin each time
// and then recurse.  Hence the induction v[n] number of coins for total n is:
//   v[n] = 1 + v[n - c]
//
// Note denoms need to be in reverse order.. max to min!
std::vector<uint32_t> minCoins_naive(uint32_t total,
                                     const std::vector<uint32_t>& denom)
{
    std::vector<uint32_t> res(denom.size()+1,0);

    if (total == 0) return res;

    res[denom.size()] = INT_MAX;

    for(int i = 0; i < denom.size(); ++i)
    {
        if (total >= denom[i])
        {
            std::vector<uint32_t> pass = minCoins_naive(total - denom[i],
                                                        denom);

            if ((pass[denom.size()]  + 1) < res[denom.size()])
            {
                res = pass;
                ++(res[i]);
                ++(res[denom.size()]);
            }
        }
    }

    return res;
}

// Now the naive recursion is also computing items that we know are already
// larger than our current known best min hence we can the prune tree here -
// simply walk the entire tree until we reach a depth greater than the current
// best min then stop because it cant be down there anymore
//
// Note denoms need to be in reverse order.. max to min!
std::vector<uint32_t> minCoins_prunedImpl(uint32_t total,
                                          const std::vector<uint32_t>& denom,
                                          uint32_t depth,
                                          uint32_t maxDepth)
{
    // found trim point..
    if (depth > maxDepth)
    {
        // terminate this search path
        std::vector<uint32_t> res(denom.size()+1,0);
        res[denom.size()] = INT_MAX;
        return res;
    }

    // compute the greedy answer..
    std::vector<uint32_t> res = minCoins_greedy(total,denom);

    // found loop termination..
    if (res[denom.size()] == 1)
        return res;

    for(int i = 0; i < denom.size(); ++i)
    {
        if (total >= denom[i])
        {
            uint32_t newMaxDepth = (maxDepth < depth + res[denom.size()] ) ? maxDepth : (depth + res[denom.size()] );

            std::vector<uint32_t> pass = minCoins_prunedImpl(total - denom[i],
                                                             denom,
                                                             depth+1,
                                                             newMaxDepth );

            if ((pass[denom.size()]  + 1) < res[denom.size()])
            {
                res = pass;
                ++(res[i]);
                ++(res[denom.size()]);
            }
        }
    }

    return res;
}

std::vector<uint32_t> minCoins_pruned(uint32_t total,
                                      const std::vector<uint32_t>& denom)
{
    return minCoins_prunedImpl(total,
                               denom,
                               0,
                               INT_MAX);
}

// Ok so we know now that the naive isn't smart, and pruning improves this but
// the approach is still wasting allot of time...  what if approach it as a
// divide and conquer problem
//
// To achieve this we first reformulate the induction slightly
//   v[n] = 1 + v[n - c]
// into
//   v[n] = v[c] + v[n - c]
//
// now we have moved from 1 coin at a time on the left and many on the right to
// a full divide and conquer with many coins on both left and right. Since we are
// now choosing multiple coins at one time we may as well pick all the coins from
// each denomination and then remove it from the list.
//
// Note denoms need to be in reverse order.. max to min!
std::vector<uint32_t> minCoins_divAndConqImpl(uint32_t total,
                                              const uint32_t* denomStart,
                                              const uint32_t* denomEnd)
{
    std::size_t denomSize = denomEnd - denomStart;
    uint32_t maxCount = total / *denomStart;

    std::vector<uint32_t> res(denomSize+1,0);
    res[denomSize] = INT_MAX;

    if (denomSize == 1)
    {
        // last case cant search... so just give the only result possible
        uint32_t leftTotal = total - (maxCount * *denomStart);

        if (leftTotal == 0)
        {
            res[0] = maxCount;
            res[1] = maxCount;
        }

        return res;
    }


    uint32_t count = maxCount+1;
    {
        // first case might be special... no need to check it every time
        if (total == (maxCount * *denomStart))
        {
            res[0] = maxCount;
            res[denomSize] = maxCount;
            count--;
        }
    }

    while (count > 0)
    {
        --count;
        uint32_t leftTotal = total - (count * *denomStart);

        std::vector<uint32_t> sub_res = minCoins_divAndConqImpl(leftTotal,
                                                                denomStart + 1,
                                                                denomEnd);

        if (sub_res[denomSize-1] != INT_MAX and
            (sub_res[denomSize - 1] + count) < res[denomSize])
        {
            res[0] = count;
            for (int i = 0; i < denomSize; ++i)  // NOTE the oddness.. because its copying total size as well
                res[i+1] = sub_res[i];

            res[denomSize] += count;
        }
    }

    return res;
}


std::vector<uint32_t> minCoins_divAndConq(uint32_t total,
                                          const std::vector<uint32_t>& denom)
{
    // we are dividing by making the full decision for each coin and then removing it from the list
    const uint32_t* denomStart = &(denom[0]);
    const uint32_t* denomEnd   = &(denom[denom.size()-1]);
    denomEnd += 1;

    return minCoins_divAndConqImpl(total,
                                   denomStart,
                                   denomEnd);
}

// ok so the divide and conqueror makes a big difference..  but what if we stop
// looking at this as a recursive problem and look at it from a dynamic
// programming perspective, we will start with the top down approach and simply
// cache the results from the prior recursive calls
//
// Note denoms need to be in reverse order.. max to min!
std::vector<uint32_t>& minCoins_topImpl(uint32_t total,
                                        const std::vector<uint32_t>& denom,
                                        std::vector<std::vector<uint32_t> >& cache)
{
    std::vector<uint32_t>& res = cache[total];

    if (res[denom.size()] > 1 or total == 0)
    {
        return res;
    }

    res[denom.size()] = INT_MAX; // max and over flow the count..

    for(int i = 0; i < denom.size(); ++i)
    {
        if (total >= denom[i])
        {
            std::vector<uint32_t>& pass = minCoins_topImpl(total - denom[i],
                                                           denom,
                                                           cache);

            if ((pass[denom.size()] + 1) < res[denom.size()])
            {
                res = pass;
                ++(res[i]);
                ++(res[denom.size()]);
            }
        }
    }

    return res;
}

std::vector<uint32_t> minCoins_top(uint32_t total,
                                   const std::vector<uint32_t>& denom)
{
    // setup memory
    std::vector<std::vector<uint32_t> > cache;
    cache.resize(total + 1);
    for (int i = 0; i < total+1; ++i)
        cache[i].resize(denom.size()+1);

    return minCoins_topImpl(total, denom, cache);
}

// ok but as always we can reverse the approach and produce a bottom-up
// dynamic programming implementation and save our selves all the call overheads
// since in this problem the tree search is not sparse and every case will be
// visited
//
// Note denoms need to be in reverse order.. max to min!
std::vector<uint32_t> minCoins_botImpl(uint32_t total,
                                       const std::vector<uint32_t>& denom,
                                       std::vector<std::vector<uint32_t> >& cache)
{
    // compute
    for (int target = 1; target <= total; ++target)
    {
        cache[target][denom.size()] = INT_MAX; // set count to max and overflowed

        for(int i = 0; i < denom.size(); ++i)
        {
            if (target >= denom[i])
            {
                uint32_t delta = target - denom[i];

                if (cache[delta][denom.size()] + 1 < cache[target][denom.size()])
                {
                    cache[target] = cache[delta];
                    ++(cache[target][i]);
                    ++(cache[target][denom.size()]);
                }
            }
        }
    }

    return cache[total];
}

std::vector<uint32_t> minCoins_bot(uint32_t total,
                                   const std::vector<uint32_t>& denom)
{
    // setup memory
    std::vector<std::vector<uint32_t> > cache;
    cache.resize(total + 1);
    for (int i = 0; i < total+1; ++i)
        cache[i].resize(denom.size()+1);

    return minCoins_botImpl(total,
                            denom,
                            cache);
}

// ok so we can improve the bottom up DP version and shave a bit more time by
// visiting items in the order of current coin count as a result we will create
// only the counts that reach towards the
//
std::vector<uint32_t> minCoins_botUpReach(uint32_t total,
                                          const std::vector<uint32_t>& denom)
{
    // we are dividing by making the full decistion for each coin and then removing it from the list
    if (total == 0) return std::vector<uint32_t>(denom.size()+1, 0);

    // setup memory
    std::vector< std::vector<uint32_t> > cache(total + 1, std::vector<uint32_t>(denom.size()+1, 0));
    std::list<uint32_t> queue;

    queue.push_back(0);
    while(not queue.empty())
    {
        uint32_t curTotal = queue.front();
        queue.pop_front();

        for(int i = 0; i < denom.size(); ++i)
        {
            uint32_t newTotal = curTotal + denom[i];

            if (newTotal <= total and
                cache[newTotal][denom.size()] == 0)
            {
                // ok its empty
                cache[newTotal] = cache[curTotal];
                ++(cache[newTotal][i]);
                ++(cache[newTotal][denom.size()]);

                if (newTotal == total)
                    return cache[total];

                queue.push_back(newTotal);
            }
        }
    }

    // impossible case
    cache[0][denom.size()] = INT_MAX;
    return cache[0];
}

// over all the results are:
// * the greedy continues to kick butt but can be wrong..  this was expected as it
//   guess and looks at 1 path to the solution
// * recursive and depth pruned implementation work but are awful because they
//   repeat the same work over and over
// * the divide and conquer performs well and bets the dynamic programming
//   implementations when the target n is small...  this was a surprising result to
//   me as the top down implementation effectively tries to exhaust the largest
//   coins first, and does this by calling the cache value mid point..  hence i
//   expected it to bet the divide and conquer, because it does effectively
//   the same with caching
// * the dynamic programming versions work best in the long term they:
//  + are pruning because they use the cache results to effect early termination
//  + effectively implement the divide and conquer version by taking largest
//    coin first and it should be faster then the div/conq because it can use the
//    the cached result mid point and can trim the computations of multiple coins draws
//    with the same value (hence its pointless to DP covert the div/conq version)
//  + The bottom up DP also optimizes further by removing the function call overhead

void test(const char* label,
          std::function<std::vector<uint32_t> (uint32_t, const std::vector<uint32_t>&)> func,
          uint32_t n,
          const std::vector<uint32_t>& denom)
{
    auto start = std::chrono::system_clock::now();
    std::vector<uint32_t> res = func(n, denom);
    auto end = std::chrono::system_clock::now();

    std::cout << label << ": result:";
    for (int i = 0; i < denom.size(); ++i)
        std::cout << denom[i] << "x" << res[i] << " ";
    std::cout << " count:" << res[denom.size()] << "\n";

    std::cout << " --  time:"
              << std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count()
              << " nSec\n";
}

void testSet(uint32_t n,
             const std::initializer_list<uint32_t>& dlist)
{
    std::vector<uint32_t> denom(dlist);

    std::cout << "\n*** test set for " << n << " *** \n";

    test("greedy    ", minCoins_greedy,n, denom);
    if (n < 30) test("naive     ", minCoins_naive ,n, denom);
    else        std::cout << "skipping naive\n";
    if (n < 200) test("pruned    ", minCoins_pruned,n, denom);
    else         std::cout << "skipping pruned\n";
    if (n < 2000) test("divAndConq", minCoins_divAndConq,n, denom);
    else         std::cout << "skipping div and conqurer\n";
    test("top       ", minCoins_top   ,n, denom);
    test("bot       ", minCoins_bot   ,n, denom);
    test("botUpReach", minCoins_botUpReach ,n, denom);
}

int main()
{
    // breakers
    testSet(0,   {20,15,5,4,1});
    testSet(1,   {2});

    testSet(23,  {20,15,5,4,1}); // attack greedy
    testSet(46,  {20,15,5,4,1}); // attack greedy
    testSet(31,  {20,15,5,4,1}); // attack greedy
    testSet(43, {20,15,5,4,1}); // attack greedy
    testSet(100, {20,15,5,4,1}); // attack bottom up
    testSet(1000, {20,15,5,4,1});

    testSet(1000, {13,7,1});

    testSet(10000, {20,15,5,4,1});

}
The output looks like:
*** test set for 0 *** 
greedy    : result:20x0 15x0 5x0 4x0 1x0  count:0
 --  time:1852 nSec
naive     : result:20x0 15x0 5x0 4x0 1x0  count:0
 --  time:572 nSec
pruned    : result:20x0 15x0 5x0 4x0 1x0  count:0
 --  time:987 nSec
divAndConq: result:20x0 15x0 5x0 4x0 1x0  count:0
 --  time:443 nSec
top       : result:20x0 15x0 5x0 4x0 1x0  count:0
 --  time:3814 nSec
bot       : result:20x0 15x0 5x0 4x0 1x0  count:0
 --  time:1658 nSec
botUpReach: result:20x0 15x0 5x0 4x0 1x0  count:0
 --  time:329 nSec

*** test set for 1 *** 
greedy    : result:2x0  count:2147483647
 --  time:548 nSec
naive     : result:2x0  count:2147483647
 --  time:388 nSec
pruned    : result:2x0  count:2147483647
 --  time:563 nSec
divAndConq: result:2x0  count:2147483647
 --  time:420 nSec
top       : result:2x0  count:2147483647
 --  time:2380 nSec
bot       : result:2x0  count:2147483647
 --  time:1881 nSec
botUpReach: result:2x0  count:2147483647
 --  time:3839 nSec

*** test set for 23 *** 
greedy    : result:20x1 15x0 5x0 4x0 1x3  count:4
 --  time:529 nSec
naive     : result:20x0 15x1 5x0 4x2 1x0  count:3
 --  time:5941762 nSec
pruned    : result:20x0 15x1 5x0 4x2 1x0  count:3
 --  time:69194 nSec
divAndConq: result:20x0 15x1 5x0 4x2 1x0  count:3
 --  time:9601 nSec
top       : result:20x0 15x1 5x0 4x2 1x0  count:3
 --  time:26959 nSec
bot       : result:20x0 15x1 5x0 4x2 1x0  count:3
 --  time:19781 nSec
botUpReach: result:20x0 15x1 5x0 4x2 1x0  count:3
 --  time:14906 nSec

*** test set for 46 *** 
greedy    : result:20x2 15x0 5x1 4x0 1x1  count:4
 --  time:534 nSec
skipping naive
pruned    : result:20x2 15x0 5x1 4x0 1x1  count:4
 --  time:798560 nSec
divAndConq: result:20x2 15x0 5x1 4x0 1x1  count:4
 --  time:46203 nSec
top       : result:20x2 15x0 5x1 4x0 1x1  count:4
 --  time:56899 nSec
bot       : result:20x2 15x0 5x1 4x0 1x1  count:4
 --  time:37923 nSec
botUpReach: result:20x2 15x0 5x1 4x0 1x1  count:4
 --  time:32182 nSec

*** test set for 31 *** 
greedy    : result:20x1 15x0 5x2 4x0 1x1  count:4
 --  time:561 nSec
skipping naive
pruned    : result:20x0 15x2 5x0 4x0 1x1  count:3
 --  time:134656 nSec
divAndConq: result:20x0 15x2 5x0 4x0 1x1  count:3
 --  time:16872 nSec
top       : result:20x0 15x2 5x0 4x0 1x1  count:3
 --  time:38468 nSec
bot       : result:20x0 15x2 5x0 4x0 1x1  count:3
 --  time:25553 nSec
botUpReach: result:20x0 15x2 5x0 4x0 1x1  count:3
 --  time:19032 nSec

*** test set for 43 *** 
greedy    : result:20x2 15x0 5x0 4x0 1x3  count:5
 --  time:480 nSec
skipping naive
pruned    : result:20x1 15x1 5x0 4x2 1x0  count:4
 --  time:731592 nSec
divAndConq: result:20x1 15x1 5x0 4x2 1x0  count:4
 --  time:34070 nSec
top       : result:20x1 15x1 5x0 4x2 1x0  count:4
 --  time:54569 nSec
bot       : result:20x1 15x1 5x0 4x2 1x0  count:4
 --  time:35912 nSec
botUpReach: result:20x1 15x1 5x0 4x2 1x0  count:4
 --  time:30253 nSec

*** test set for 100 *** 
greedy    : result:20x5 15x0 5x0 4x0 1x0  count:5
 --  time:567 nSec
skipping naive
pruned    : result:20x5 15x0 5x0 4x0 1x0  count:5
 --  time:7856557 nSec
divAndConq: result:20x5 15x0 5x0 4x0 1x0  count:5
 --  time:371883 nSec
top       : result:20x5 15x0 5x0 4x0 1x0  count:5
 --  time:107881 nSec
bot       : result:20x5 15x0 5x0 4x0 1x0  count:5
 --  time:79875 nSec
botUpReach: result:20x5 15x0 5x0 4x0 1x0  count:5
 --  time:60365 nSec

*** test set for 1000 *** 
greedy    : result:20x50 15x0 5x0 4x0 1x0  count:50
 --  time:728 nSec
skipping naive
skipping pruned
divAndConq: result:20x50 15x0 5x0 4x0 1x0  count:50
 --  time:1654224850 nSec
top       : result:20x50 15x0 5x0 4x0 1x0  count:50
 --  time:838802 nSec
bot       : result:20x50 15x0 5x0 4x0 1x0  count:50
 --  time:766986 nSec
botUpReach: result:20x50 15x0 5x0 4x0 1x0  count:50
 --  time:736739 nSec

*** test set for 1000 *** 
greedy    : result:13x76 7x1 1x5  count:82
 --  time:651 nSec
skipping naive
skipping pruned
divAndConq: result:13x76 7x1 1x5  count:82
 --  time:1220761 nSec
top       : result:13x76 7x1 1x5  count:82
 --  time:704132 nSec
bot       : result:13x76 7x1 1x5  count:82
 --  time:685573 nSec
botUpReach: result:13x76 7x1 1x5  count:82
 --  time:686982 nSec

*** test set for 10000 *** 
greedy    : result:20x500 15x0 5x0 4x0 1x0  count:500
 --  time:559 nSec
skipping naive
skipping pruned
skipping div and conqurer
top       : result:20x500 15x0 5x0 4x0 1x0  count:500
 --  time:8379256 nSec
bot       : result:20x500 15x0 5x0 4x0 1x0  count:500
 --  time:7830593 nSec
botUpReach: result:20x500 15x0 5x0 4x0 1x0  count:500
 --  time:7763225 nSec

Google interview: Compute the Fibonacci sequance

// Write code that computes the Fibonacci sequance upto n

#include <cstdint>
#include <vector>
#include <iostream>
#include <functional>
#include <chrono>

// Recursive implementations of Fibonacci are all the rage in a teaching
// environment as it demonstrates recursion nicely. The problem is that a recursive
// implementation is completely a inefficient way to code the task in practice.
// Each call of function calls 2 more and those 2 call 2 more each resulting in 4.
// And those 4 call 4 more each resulting in 8 so at this point your space and time
// complexity is 1 + 2 + 4 + 8 + ... which is exponential growth and just awful

uint32_t fib_naive(uint32_t n)
{
    if (n == 0) return 0;
    if (n == 1) return 1;

    return fib_naive(n-1) + fib_naive(n-2);
}

// So what google is really testing here is the "dynamic programming" paradigm.
// https://en.wikipedia.org/wiki/Dynamic_programming.  Unfortunately the term
// "dynamic programming" has had its meaning diluted and corrupted a bit.  In
// this context what it means is the breaking of a larger problem into sub
// parts and solving each sub part to solve the whole.  This sounds exactly
// like recursion and it is, the distinction is that recursive implementations
// tend to stop at raw mathematical conversion where as DP implementations look
// at lazy caching results to cut off repeated computations

// There are two textbook approaches to this; top-down and bottom-up

// in the top-down approach you start working from the answer point and compute
// the values of the breakdowns, you deploy memory as needed to cache the
// results of prior computations so that if they are used again you use the
// cached value rather than recomputing it from scratch again

uint32_t fib_top_core(uint32_t n, std::vector<uint32_t>& cache)
{
    if (n > 0 and cache[n] == 0)
    {
        cache[n] = fib_top_core(n-2,cache) + fib_top_core(n-1,cache);
    }

    return cache[n];
}

uint32_t fib_top(uint32_t n)
{
    if (n == 0) return 0;

    std::vector<uint32_t> cache(n+1, 0);
    cache[1] = 1;

    return fib_top_core(n, cache);
}

// in the bottom-up approach you start working from the known terminators
// and work your way up to the answer point.  You do this by computing
// the values of all the breakdowns you could possibly need and memorize
// these the results for use in the next computations.  Often you can optimize
// the memory to forget values that we know we will not revisit

uint32_t fib_bot(uint32_t n)
{
    if (n == 0) return 0;

    uint32_t fib_minus_2 = 0;
    uint32_t fib_minus_1 = 0;
    uint32_t fib_current = 1;

    while (n > 1)
    {
        fib_minus_2 = fib_minus_1;
        fib_minus_1 = fib_current;
        fib_current = fib_minus_2 + fib_minus_1;
        --n;
    }

    return fib_current;
}

void test(std::function<uint32_t(uint32_t)> func, uint32_t n)
{
    auto start = std::chrono::system_clock::now();
    std::cout << "result:" << func(n);
    auto end = std::chrono::system_clock::now();
    std::cout << " --  time:"
              << std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count()
              << "nSec\n";
}

int main()
{
    int n = 40;

    test(fib_naive,n);
    test(fib_top  ,n);
    test(fib_bot  ,n);
}

and the output looks like:
result:102334155 --  time:1282786361 nSec
result:102334155 --  time:51012 nSec
result:102334155 --  time:690 nSec