trent.tong
BAN USER/// C++ implementation.
/// this problem can be solved by first indexing all elements
/// into an unordered_map. and for every element, try to find
/// its pair such that they add up to sum, i.e. sum = a[i]+a[k]
///
/// Time Complexity O(n).
/// Space Complexity O(n).
///
#include<cstdio>
#include<unordered_map>
using namespace std;
void print_all_sumpair(int *a, int size, int sum) {
/// enter every element into the unordered_map. this is get rid
/// of duplicates as well.
std::unordered_map<int,int> m;
for(int i=0;i<size;++i)
m[a[i]]=1;
for(int i=0;i<size;++i) {
std::unordered_map<int,int>::iterator it = m.find(sum-a[i]);
if (it != m.end()) {
/// remove the current element in the unordered_map so that we do
/// not end up printing duplicates.
m.erase(a[i]);
m.erase(sum-a[i]);
/// lastly. print the pair.
printf("%d %d\n", a[i], sum-a[i]);
}
}
return;
}
int main() {
int a[] = {2,5,3,2,9,7,8,9};
print_all_sumpair(a, sizeof(a)/sizeof(a[0]), 11);
return 0;
}
#include <stdio.h>
#include <limits.h>
///
/// this problem can be solved with a sliding window.
///
/// essentially. we want to find the maximum we can do for
/// every possible starting index.
///
/// as the algorithm illustrates, everytime we recompute maxlen
/// that can be achieved with a starting index, we incur O(1) cost.
/// as we simply need to slide out the last starting index, and if
/// we free a fill. we can use that fill to go further.
///
/// NOTE. e - end index here will always advance, as when we free
/// up fills on the left side (start index). there is no way we could
/// do worse. i.e. go backward.
//
/// when we run out of fill, i.e. can not advance further.
/// we need to move the start to try to free up some fills.
///
/// NOTE. we can not move the end further as we do not have fills.
/// and moving end backwards will simply gives us a subpoptimal
/// solution.
///
int maxbit_length(char* a, int size, int m) {
int maxlen = INT_MIN;
char fill[size];
for(int i=0;i<size;++i)
fill[i] = 0;
int s,e=0;
for(s=0;s<size;++s) {
while(1) {
/// try to fill it.
if (!a[e]) {
if (m>0 && e<size) {
fill[e] = a[e] = 1;
++e;
--m;
} else {
// run out of fills. update the max len;
maxlen = maxlen < (e-s) ? (e-s) : maxlen;
break;
}
} else {
++e;
}
}
/// advance s. and see whether we can reclaim a fill.
if (fill[s]) {
fill[s] = a[s] = 0;
++m;
}
}
return maxlen;
}
int main() {
char a[] = {1,1,0,1,1,0,0,1,1,1};
int m = 2;
printf("%d\n", maxbit_length(a, sizeof(a)/sizeof(a[0]), m));
return 0;
}
#include <stdio.h>
#include <limits.h>
/// to find the solution, we need a start and end index.
/// lets first assume the start index is 0. then we can
/// find the end index by going over the array and stop
/// at the position where the sum of all previous #s are
/// greater than the sum.
//
/// once we reach this. it makes no sense to go forward
/// more as that will increase the sequence length.
/// so we try to adjust the start index.
///
/// as we are shrinking the start index, we do seqsum - a[s].
/// NOTE. there is some sort of dynamic programming going on here.
/// as when we shrink the start index, we do NOT recompute the seqsum
/// for the entire subsequence, instead we only factor in the
/// delta, i.e. a decrease of 1 element.
///
/// we will eventually run into when the seqsum falls below sum.
/// when this happens, we need to increase the end index.
///
//////////////////////////////////
/// WHY DOES THIS WORK ? ///
//////////////////////////////////
///
/// we are essentially computing for every start index, whats the
/// best end index we can have.
///
/// e.g. a[] = {2,1,1,4,3,6} target sum = 8.
///
/// we first try to find end index for startindex = 0. endindex is 4
/// here as 2+1+1+4+3>8.
///
/// now what about startindex=1, if endindex is 4. seqsum is 1+1+4+3=9>8
///
/// now what about startindex=2, if endindex is 4, seqsum is 1+4+3=8==8
/// so we need to increase endindex.
///
/// Everytime we move startindex, we have a O(1) cost to recompute seqsum.
/// so Time Complexity O(n) && Space complexity O(1).
///
/// Below is the implementation.
///
int minsum_length(int *a, int size, int sum) {
int minlen = INT_MAX;
int s=0,e=0;
int seqsum = 0;
for(s=0;s<size;++s) {
while(1) {
/// current seqsum still bigger than sum. update minlen ?
if (seqsum > sum) {
minlen = minlen > (e-s) ? (e-s) : minlen;
break;
}
/// current seqsum falls below sum. need to see whether we can.
/// advance e.
if (e<size)
seqsum += a[e++];
else
break;
}
/// advancing start index. kick out current start element.
seqsum-=a[s];
}
return minlen;
}
int main() {
int a[] = {2,1,1,4,3,6};
printf("%d\n", minsum_length(a, sizeof(a)/sizeof(a[0]), 8));
return 0;
}
This is the way i feel this can be solved assuming we know the maximum length of all words in the dictionary (assume k for now)
STEP 1.
1. try aaaa...aaa # = k to find # of a's in the word
2. try bbbb...bbb # = k to find # of b's in the word
...
26. try zzzz...zzz # = k to find # of z's in the word
we can terminate early if we have found k letters.
STEP 2.
build a trie out of all words' permutations, e.g. for car we need to use acr, arc, rac, rca, cra, car to build the trie.
Index all the letters found in step 1 into the trie. NOTE. order does not matter here as we the trie contains all the possible ways in which the letter can be arranged.
we could get multiple words. this is an artifact of missing ordering from the machine.
#include <stdio.h>
/// return the number of pairs (a,b) of which a^2+b^2==sum
///
/// Time Complexity O(n)
/// Space Complexity O(n^0.5)
///
int double_sqrt_count(int sum) {
int l=0, h=0;
int elements[sum];
/// these are the numbers whose sqrt is smaller or equal to sum.
for (int i=0;i<sum;++i) {
if (i*i >sum)
break;
elements[h++] = i;
}
/// h points to the last valid element.
h = h-1;
int count=0;
while (l<h) {
int curr = elements[l]*elements[l] + elements[h]*elements[h];
if (curr == sum) {
/// found a solution. need to increase a and decrease
/// b at the same time. as only increasing a or decreasing
/// b is going to find a bigger or smaller sum respectively.
++count;
++l;
--h;
continue;
}
else if (curr > sum) {
/// need to have a smaller b.
--h;
continue;
} else {
/// need to have a bigger a.
++l;
continue;
}
}
return count;
}
int main() {
printf("%d\n", double_sqrt_count(1393940););
return 0;
}
#include <stdio.h>
void print_ndigit(int d, int n, int count) {
int curr = d % n;
for (int i=0;i<count;++i) {
curr *=10;
printf("%d ", curr/n);
curr = curr % n;
}
printf("\n");
}
int main() {
print_ndigit(3227,555,6);
return 0;
}
//////////////////////////////////////////////////////////////
/// solution 1.
///
/// Time : O(n) n = number of elements.
/// Space: O(n).
//
//////////////////////////////////////////////////////////////
//
#define MAX(a,b) (a>b ? a: b)
/// go over the array from left to right and from right to left.
/// take the maximum of the computed array. This is necessary as
/// the maximum is needed to accomdate the longest increasing or
/// decreasing sequence.
///
/// if current element is bigger than the next element. restart from
/// 1. the reverse order will catch what the next element should be.
int get_min_candy(int *a, int size) {
int lr[size], rl[size];
int sum = 0;
for (int i=0;i<size;++i) {
lr[i] = rl[i] = 1;
}
for (int i=0;i<size-1;++i) {
if (a[i] < a[i+1])
lr[i+1] = lr[i]+1;
}
for (int i=size-1;i>0;--i) {
if (a[i] < a[i-1])
rl[i-1] = rl[i]+1;
}
for (int i=0;i<size;++i) {
sum += MAX(lr[i], rl[i]);
}
return sum;
}
//////////////////////////////////////////////////////////////
// main.
//////////////////////////////////////////////////////////////
int main() {
int a[] = {2,6,1,2,9,1,1,4,9,6,3,5,1};
printf("%d\n", get_min_candy(a, sizeof(a)/sizeof(a[0])));
return 0;
}
std::vector<int> inorder_list;
void inorder_traverse(treenode *root)
{
if (!root) return;
inorder_traverse(root->left);
inorder_list.push_back(root->val);
inorder_traverse(root->right);
}
bool is_bst(treenode *root)
{
inorder_traverse(root);
return is_sorted(inorder_list);
}
This question is basically asking to implement a read-write lock.
class rwRam
{
private:
unsigned rcount;
unsigned wcount;
void *opaque; // the payload/data.
public:
rwRam(void *o): rcount(0), wcount(0), opaque(o) {}
void init_read();
void fini_read();
void init_write();
void fini_write();
} rwRam;
void rwRam::init_read()
{
/// indicate there is read pending.
rcount ++;
/// wait till there is no writer.
while(!wcount);
}
void rwRam::fini_read()
{
rcount --;
}
void rwRam::init_write()
{
/// need to make sure no reader and no writer.
while(wcount); wcount ++; /// this needs to be a atomic, i.e. done with cmp_xchg on x86.
/// wait for all readers to finish.
while(rcount);
}
void rwRam::fini_write()
{
wcount --;
}
I am not sure i got the what this question is asking for entirely. why can not one copy the linked list based on the next pointer first and fix up the random node pointer after the linked list is copied.
- trent.tong September 14, 2013
- trent.tong May 15, 2015