weijjcg
BAN USERI also prefer using the hash table to solve the problem, and the expected time complexity is O(n), and here is my code in C++:
#include <iostream>
#include <vector>
#include <list>
struct node
{
int key;
int beg, end;
node( int key ) : key(key), beg(key), end(key){}
int size() const{ return end + 1 - beg; }
};
struct hash_table
{
const int divisor;
std::vector< std::list<node> > buckets;
hash_table( int divisor ) : divisor(divisor), buckets( divisor, std::list<node>() ) {}
typedef std::list<node>::iterator nd_iter;
std::pair< bool, nd_iter > insert( const node& nd )
{
int i = nd.key % divisor;
for( nd_iter it = buckets[i].begin(); it != buckets[i].end(); ++it )
if( it->key == nd.key ) return std::make_pair( false, it );
buckets[i].push_back(nd);
return std::make_pair( true, std::prev( buckets[i].end() ) );
}
std::pair< bool, nd_iter > search( int key )
{
int i = key % divisor;
for( nd_iter it = buckets[i].begin(); it != buckets[i].end(); ++it )
if( it->key == key ) return std::make_pair( true, it );
return std::make_pair( false, buckets[i].end() );
}
void find_longest()
{
int max_size = 0;
std::list<node>::iterator kt;
for( std::vector< std::list<node> >::iterator it = buckets.begin(); it != buckets.end(); ++it )
{
for( std::list<node>::iterator jt = it->begin(); jt != it->end(); ++jt )
{
if( jt->size() > max_size )
{
max_size = jt->size();
kt = jt;
}
}
}
for( int i = kt->beg; i <= kt->end; ++i ) std::cout << i << " ";
std::cout << std::endl;
}
};
void longest_increasing_sequence( int a[], int n )
{
hash_table ht(11);
std::pair< bool, hash_table::nd_iter > ait, lait, rait;
for( int i = 0; i < n; ++i )
{
ait = ht.insert( node(a[i]) );
if( ait.first )
{
lait = ht.search(a[i]-1);
rait = ht.search(a[i]+1);
if( lait.first ) ait.second->beg = lait.second->beg;
if( rait.first )
{
ait.second->end = rait.second->end;
rait.second->beg = ait.second->beg;
}
if( lait.first ) lait.second->end = ait.second->end;
}
}
ht.find_longest();
}
template< std::size_t n >
std::size_t size( int (&a)[n] )
{
return n;
}
int main( int argc, char* argv[] )
{
int a[] = { 5, 1, 9, 3, 8, 20, 4, 10, 2, 11, 3 };
longest_increasing_sequence( a, size(a) );
return 0;
}
I thin the question is not very difficult, and here is the code and it is self-explanatory:
#include <iostream>
#include <vector>
#include <string>
std::pair<int,int> pos( char c )
{
int n = c - 'a';
return std::make_pair( n / 5, n % 5 );
}
void move( const std::pair<int,int>& cp, const std::pair<int,int>& p )
{
int y = p.first - cp.first;
int x = p.second - cp.second;
if( x > 0 ) while( x > 0 ) { std::cout << 'r'; --x; }
else while( x < 0 ) { std::cout << 'l'; ++x; }
if( y > 0 ) while( y > 0 ){ std::cout << 'd'; --y; }
else while( y < 0 ) { std::cout << 'u'; ++y; }
std::cout << '!' << std::endl;
}
void key_sequence( const std::string& word )
{
std::pair<int,int> cp = std::make_pair( 0, 0 ), p;
for( int i = 0; i < word.size(); ++i )
{
p = pos(word[i]);
move( cp, p );
cp = p;
}
}
int main( int argc, char* argv[] )
{
int k = 0;
for( int i = 0; i < 5; ++i )
{
for( int j = 0; j < 5; ++j )
{
std::cout << static_cast<char>( 'a' + k ) << " ";
++k;
}
std::cout << std::endl;
}
std::cout << static_cast<char>( 'a' + k ) << std::endl;
std::cout << std::endl;
key_sequence("word");
return 0;
}
My idea is same as Miguel Oliveira's, and the time complexity is O(nlogn), here is my code:
#include <iostream>
#include <vector>
#include <queue>
#include <cmath>
struct quadruple
{
int i, j, k, l;
int v;
quadruple( int i, int j, int k, int l ) : i(i), j(j), k(k), l(l),
v( static_cast<int>( pow( 2.0f, i ) * pow( 3.0f, j ) * pow( 5.0f, k ) * pow( 7.0f, l ) ) ) {}
bool operator!=( const quadruple& q )
{
return v != q.v;
}
};
struct quadruple_greater
{
bool operator()( const quadruple& p, const quadruple& q ) const
{
return p.v > q.v;
}
};
void number( int n ) // time complexity O(nlogn)
{
std::vector<quadruple> result;
result.reserve(n);
std::priority_queue< quadruple, std::vector<quadruple>, quadruple_greater > min_heap;
quadruple q( 0, 0, 0, 0 );
min_heap.push( q );
result.push_back(q);
while( !min_heap.empty() )
{
q = min_heap.top();
min_heap.pop();
if( result.back() != q ) result.push_back(q);
if( result.size() == n ) break;
min_heap.push( quadruple( q.i + 1, q.j, q.k, q.l ) );
min_heap.push( quadruple( q.i, q.j + 1, q.k, q.l ) );
min_heap.push( quadruple( q.i, q.j, q.k + 1, q.l ) );
min_heap.push( quadruple( q.i, q.j, q.k, q.l + 1 ) );
}
for( std::vector<quadruple>::iterator it = result.begin(); it != result.end(); ++it )
std::cout << it->v << " (" << it->i <<"," << it->j << "," << it->k << "," << it->l << ")" << std::endl;
}
int main( int argc, char* argv[] )
{
number(9);
return 0;
}
I find an algorithm could achieve the goal in O(n) time, and here is the procedure:
1) scan the tgt array to find out the target position of i( i is an element in src ), and store it as an array tgt_index, so tgt[2] means where 2 should be in the tgt array;
2) scan the src array to find out the position of zero, denoted as zero_pos;
3) for each element src[i], let its target position in tgt be ti = tgt_index[src[i]]
3.1) if src[i] == 0, just swap src[i] and src[ti], and change zero_pos to ti;
3.2) if src[i] == 0, first swap src[ti] with src[zero_pos]( src[ti]has zero now ), then swap src[i] with src[ti]( src[i] has zero now ), and at last swap src[zero_pos] with src[i]( src[zero_pos] has zero new ).
Here is my code in C++:
#include <iostream>
#include <vector>
void print( int a[], int n )
{
for( int i = 0; i < n; ++i ) std::cout << a[i] << " ";
std::cout << std::endl;
}
void rearrange( int src[], int tgt[], int n )
{
std::vector<int> tgt_index( n, - 1 );
for( int i = 0; i < n; ++i ) tgt_index[ tgt[i] ] = i;
int zero_pos = 0;
for( ; zero_pos < n; ++zero_pos ) if( src[zero_pos] == 0 ) break;
std::cout << "src : ";
print(src,n);
int ti;
for( int i = 0; i < n; ++i )
{
ti = tgt_index[ src[i] ];
if( src[i] == 0 )
{
std::swap( src[i], src[ti] );
zero_pos = ti;
}
else
{
std::swap( src[ti], src[zero_pos] );
std::swap( src[i], src[ti] );
std::swap( src[zero_pos], src[i] );
}
std::cout << "src : ";
print(src,n);
}
std::cout << "tgt : ";
print(tgt,n);
}
int main( int argc, char* argv[] )
{
int src[] = { 1, 0, 2, 3 };
int tgt[] = { 0, 2, 3, 1 };
rearrange( src, tgt, 4 );
return 0;
}
Brillant idea of sonesh, and here is my implementation code in C++:
#include <iostream>
#include <stack>
#include <random>
#include <vector>
struct node
{
int data;
node *left, *right;
node( int data, node* left = nullptr, node* right = nullptr ) : data(data), left(left), right(right){}
};
typedef node* BST_Iter;
struct BST
{
private:
void delete_subtree( BST_Iter it )
{
if( it != nullptr )
{
delete_subtree(it->left);
delete_subtree(it->right);
delete it;
}
}
public:
BST_Iter root;
BST() : root(nullptr) {}
~BST(){ delete_subtree(root); }
void push( int v )
{
if( root == nullptr )
{
root = new node(v);
return;
}
BST_Iter prev_it, it = root;
while( it != nullptr )
{
prev_it = it;
if( v <= it->data ) it = it->left;
else it = it->right;
}
if( v < prev_it->data ) prev_it->left = new node(v);
else prev_it->right = new node(v);
}
};
void sum_of_two( const BST& tr, int x ) // time complexity is O(n), because each node at most is push and pop from the stack by once
{
if( tr.root == nullptr ) return;
std::stack<BST_Iter> min_stack, max_stack;
BST_Iter it = tr.root;
while( it != nullptr )
{
min_stack.push(it);
it = it->left;
}
BST_Iter jt = tr.root;
while( jt != nullptr )
{
max_stack.push(jt);
jt = jt->right;
}
it = min_stack.top(), jt = max_stack.top();
min_stack.pop(), max_stack.pop();
BST_Iter kt;
while( it != jt && it->data <= jt->data )
{
if( it->data + jt->data == x ) break;
if( it->data + jt->data < x )
{
kt = it->right;
while( kt != nullptr )
{
min_stack.push(kt);
kt = kt->left;
}
if( min_stack.empty() ) break;
it = min_stack.top();
min_stack.pop();
}
else
{
kt = jt->left;
while( kt != nullptr )
{
max_stack.push(kt);
kt = kt->right;
}
if( max_stack.empty() ) break;
jt = max_stack.top();
max_stack.pop();
}
}
if( it->data + jt->data == x )
std::cout << "Elements : " << it->data << "+" << jt->data << "=" << x << std::endl;
else
std::cout << "No Elements makes the sum : " << x << std::endl;
}
int main( int argc, char* argv[] )
{
int n = 10;
int x;
std::vector<int> vec;
vec.reserve(n);
std::default_random_engine gen;
std::uniform_int_distribution<int> dist( 0, 100 );
std::cout << "Elements : ";
for( int i = 0; i < n; ++i )
{
vec.push_back( dist(gen) );
std::cout << vec.back() << " ";
}
std::cout << std::endl;
while(true)
{
std::cout << "------------------------" << std::endl;
std::cout << "Target : ";
std::cin >> x;
std::cout << std::endl;
BST tr;
for( std::vector<int>::iterator it = vec.begin(); it != vec.end(); ++it ) tr.push(*it);
sum_of_two( tr, x );
std::cout << "------------------------" << std::endl;
}
return 0;
}
As for the first question, I think the "select" or "median of medians" algorithm could solves it perfectly, as the time complexity is O(n) to find the kth order statistic. For the second one, I think Ahbi's idea is good, and I think an AVL search tree could also do the job, because the root divides the elements in two subtrees, the left tree is less than the root element and the right tree is larger than the root element, and the difference of the subtree tree's size should be less than 1. The median could be get in three cases:
1) the balance factor of the root is 0, then the root is the median, which is O(1);
2) the balance factor of the root is -1, which means the size of the right subtree has one more element than the left subtree, and the root should be the median;
3) the balance factor of the root is 1, and this case maybe tricky, because the root is the upper median instead of the lower median(the number of element is even, definitely), and in order to get the lower median, we need to get the rightest element of the left subtree, whose time complexity is O(logn).
The insertion time complexity of the AVL search tree is O(logn), of course.
My idea is that the range's lower bound is the minimum of the largest element in the k lists, so it is range_lower_bound = min( 26, 20, 30 ) in the example. Then we try to find a set(call it smallest_larger_than_lower_bound_set, ={ 24, 22 } in the example ) of elements, who is the smallest element that is larger than the range_lower_bound in every array, if we find that the array contains range_lower_bound, we emit the array because it means the range will contain at least one element of the array(the range_lower_bound, actually), then range_upper_bound = max( smallest_larger_than_lower_bound_set ).
The proof is simple, if lower bound v > range_lower_bound, then array that contains the range_lower_bound as largest element will not have a element in the range, so range_lower_bound is the optimal lower bound;
if the upper bound v < range_upper_bound, and obviously v >= range_lower_bound, and the array contains the range_upper_bound will not have a element in the range, because the there're only two kinds of element in the array: elements < range_lower_bound and elements >= range_upper_bound > v, so no elements in the array will occurs in the range.
And here is my code in C++, the time complexity is O(kn), where k is the number of lists, and n is the number of elements in the list:
#include <iostream>
#include <vector>
void min_range( const std::vector< std::vector<int> >& vec )
{
int k = vec.size();
std::vector< int > ranges( k, 0 );
int min = vec[0].back();
for( int i = 0; i < k; ++i ) min = std::min( min, vec[i].back() );
for( int i = 0; i < k; ++i )
{
for( std::size_t j = 0; j < vec[i].size(); ++j )
{
if( vec[i][j] == min )
{
ranges[i] = -1;
break;
}
else if( vec[i][j] > min )
{
ranges[i] = j;
break;
}
}
}
int max = min;
for( int i = 0; i < k; ++i )
{
if( ranges[i] == -1 ) continue;
max = std::max( max, vec[i][ranges[i]] );
}
std::cout << "[" << min << "," << max << "]" << std::endl;
}
int main( int argc, char* argv[] )
{
int a0[] = { 4, 10, 15, 24, 26 };
int a1[] = { 0, 9, 12, 20 };
int a2[] = { 5, 18, 22, 30 };
std::vector< std::vector<int> > vec;
std::vector<int> tmp0( a0, a0 + 5 );
vec.push_back(tmp0);
std::vector<int> tmp1( a1, a1 + 4 );
vec.push_back(tmp1);
std::vector<int> tmp2( a2, a2 + 4 );
vec.push_back(tmp2);
min_range(vec);
return 0;
}
I think Loler is right, the algorithm could be implemented in O(n), and here is my code:
#include <iostream>
#include <vector>
void max_sum_diff( int a[], int n ) // time Complexity: O(n)
{
std::vector<int> pmax(n,0), //max contiguous sum end with a[i]
pmin(n,0), //min contiguous sum end with a[i]
smax(n,0), //max contiguous sum begin with a[i]
smin(n,0); //min contiguous sum begin with a[i]
int sum = a[0];
pmax[0] = pmin[0] = a[0];
for( int i = 1; i <= n-1; ++i )
{
pmax[i] = std::max( pmax[i-1] + a[i], a[i] );
pmin[i] = std::min( pmin[i-1] + a[i], a[i] );
sum += a[i];
}
smax[n-1] = smin[n-1] = a[n-1];
for( int i = n-2; i >=0; --i )
{
smax[i] = std::max( smax[i+1] + a[i], a[i] );
smin[i] = std::min( smin[i+1] + a[i], a[i] );
}
//change pmax has the max sum in range[0,i], smax has the max sum in range[i,n-1]
//so is pmin and smin
for( int i = 1; i <= n-1; ++i )
{
pmax[i] = std::max( pmax[i-1], pmax[i] );
pmin[i] = std::min( pmin[i-1], pmin[i] );
}
for( int i = n-2; i >= 0; --i )
{
smax[i] = std::max( smax[i+1], smax[i] );
smin[i] = std::min( smin[i+1], smin[i] );
}
int diff1, diff2, max_diff = abs(sum);
for( int i = 0; i <= n - 2; ++i )
{
diff1 = abs( pmax[i] - smin[i+1] );
diff2 = abs( pmin[i] - smax[i+1] );
max_diff = std::max( diff1, max_diff );
max_diff = std::max( diff2, max_diff );
}
std::cout << "Max Diff: " << max_diff << std::endl;
}
int main( int argc, char* argv[] )
{
int a[] = { 2, -1, -2, 1, -4, 2, 8 };
max_sum_diff( a, 7 );
return 0;
}
I found that the max element is at the rightmost position(call pos) whose value is zero in the array b, so everytime I insert the max element of the remaining elements, and all the elements on the right of pos in b should decrement by one, because one max element is eliminated on the left, and carry on untill no elements left in a, and here is my code:(O(n^2), I think my algorithm supports duplicate heights)
#include <iostream>
#include <vector>
#include <algorithm>
#include <functional>
//time complexity is n^2
void rank( int a[], int b[], int n )
{
std::vector<int> c( n, -1 );
std::sort( a, a+n, std::greater<int>() );
int j;
for( int i = 0; i < n; ++i )
{
j = n - 1;
for( ; j >= 0; --j )
if( b[j] == 0 && c[j] == -1 ) break;
for( int k = j + 1; k < n; ++k ) --b[k];
c[j] = a[i];
}
for( int i = 0; i < n; ++i ) std::cout << c[i] << " ";
std::cout << std::endl;
}
int main( int argc, char* argv[] )
{
/*int a[] = { 3, 2, 1 };
int b[] = { 0, 1, 1 };
int n = 3;*/
int a[] = { 7, 4, 5, 9, 8, 1, 4, 3 };
int b[] = { 0, 0, 2, 2, 2, 4, 2, 2 };
int n = 8;
rank( a, b, n );
return 0;
}
Yes, the code is very elegant, but is it more efficient than a more straightforward one? As I know, the KMP String Matcher takes O(m+n) time, where m is the number's length, and n is the length of "123456789 9876543210". While for the straightforward one, we could just scan the number's length and determine whether it is a valid number, which will takes only O(m).
Here is my code in C++ for the two version:
- weijjcg October 09, 2013