yaojingguo
BAN USER
A trie is much more efficient than a general hash table. Pseudocode for this solution:
match(A)
n = len(A)
if n == 0
return true
for i in [0..n-1]
if trie.has_key(A[0..i]) and match(A[i+1..n-1])
return true
else if !trie.has_prefix(A[0..i]
break
return false
This is how I reason about this solution.
Think of drawing points from k lists on a X axis line. We first consider
the minimum point from all lists. Lets assume it is from list A. For any
range involved this point, the minimum range is the range involving the
other k-1 points, each of which is the minimum of the other k-1
different lists. Let use R to indicate this range. So the minimum range
involving all k lists is among R and the minimum range involving the k-1
lists and list A with the minimum removed. Repeat this process until one
of k lists is empty.
The solution in Python:
def generate(A):
n = len(A)
bitset = 2 ** n
for i in xrange(1, bitset):
s = []
j = 0
while i > 0:
if i & 1 == 1 :
s.append(A[j])
i = i >> 1
j = j + 1
print("".join(s))
generate("abc")
Code @loler solution in Python:
def pair_find(A, k):
assert(k > 0)
counts = [0] * k
for x in A:
counts[x % k] += 1
if counts[0] % 2 != 0:
return False
if k % 2 == 0 and counts[k / 2] % 2 != 0:
return False
for i in xrange(1, k / 2 + 1):
if counts[i] != counts[k - i]:
return False
return True
A = [0, 1, 2, 3]
assert(pair_find(A, 3))
A = [0, 1, 2, 2, 3, 4]
assert(pair_find(A, 3))
Make some simplification of Alva0930's code:
import java.util.*;
public class ProbabilityOfAlive {
public static double aliveProb(int x, int y, int N, int n) {
if (x < 0 || x > (N - 1) || y < 0 || y > (N - 1) || N < 1) return 0.0;
return aliveProb(x, y, N, n, new HashMap<String, Double>());
}
private static double aliveProb(int x, int y, int n, int step, Map<String, Double> map) {
if (0 == step) return 1.0;
String key = x + "," + y + "," + step;
if (map.containsKey(key)) return map.get(key);
double p = 0.0;
if (x > 0) p += 0.25 * aliveProb(x - 1, y, n, step - 1, map);
if (x < n - 1) p += 0.25 * aliveProb(x + 1, y, n, step - 1, map);
if (y > 0) p += 0.25 * aliveProb(x, y - 1, n, step - 1, map);
if (y < n - 1) p += 0.25 * aliveProb(x, y + 1, n, step - 1, map);
map.put(key, p);
return p;
}
public static void main(String[] args) {
System.out.println("Alive probability = " + aliveProb(0, 0, 1, 1));
System.out.println("Alive probability = " + aliveProb(0, 0, 2, 1));
System.out.println("Alive probability = " + aliveProb(0, 0, 3, 3));
System.out.println("Alive probability = " + aliveProb(1, 1, 3, 1));
System.out.println("Alive probability = " + aliveProb(2, 2, 10, 20));
}
}
No algorithms are involved. Only some coding.
def pos(a):
"""Compute coordinate for a
---- x
|
|
y
"""
code = ord(a) - ord("a")
return (code // 5, code % 5)
def line(a, b, less, bigger):
diff = abs(b - a)
if diff == 0: return
if a < b:
arrow = less
elif a > b:
arrow = bigger
print(arrow * diff)
# Move in x axis
def move_x(a, b):
line(a, b, "Right ", "Left ")
# Move in y axis
def move_y(a, b):
line(a, b, "Down ", "Up ")
# Move from a to b
def move(a, b):
(a_y, a_x) = pos(a)
(b_y, b_x) = pos(b)
# if a is above b, move in x axis and then in y axis. Otherwise, move in y
# axis and then in x axis. It is for handling invalid movements when "z" is
# involved.
if a_y <= b_y:
move_x(a_x, b_x)
move_y(a_y, b_y)
else:
move_y(a_y, b_y)
move_x(a_x, b_x)
print("Select {}".format(b))
def traverse(start, word):
print("From {} to traverse {}".format(start, word))
seq = start + word
n = len(seq)
for i in xrange(n - 1):
move(seq[i], seq[i + 1])
traverse("b", "con")
traverse("u", "zq")
traverse("c", "zza")
O(n) only if any keys in the dictionary does not share a common prefix. If common prefixes are allowed, it seems that it is impossible to do it in O(n) time. More conditions should be provided for this problem.
- yaojingguo December 16, 2013So for, the post on leetcode (ctrla-ctrlc-ctrlv.html) is the best solution.
- yaojingguo December 16, 2013If bsr assembly instruction is available, my solution runs in O(M). M is the count of 1 bit in the integer. M = O(logN).
#include <stdio.h>
#include <assert.h>
// Time complexity: O(log(pow)). Use it if bsr assembly instruciton is
// unavailable.
int log(unsigned pow)
{
assert(pow > 0);
int i = 0;
while (pow > 1) {
pow >>= 1;
i++;
}
return i;
}
// O(1)
int bsr(int n)
{
assert(n > 0);
int idx;
asm("bsr %1, %0": "=r" (idx) : "r" (n));
return idx;
}
int longest_seq_of_zero(unsigned n)
{
assert(n > 0);
unsigned pow;
int max_len, len, prev_log, cur_log;
max_len = 0;
prev_log = -1;
while (n > 0) {
pow = n & -n;
n ^= pow;
// cur_log = log(pow);
cur_log = bsr(pow);
len = cur_log - prev_log - 1;
if (max_len < len)
max_len = len;
prev_log = cur_log;
}
return max_len;
}
int main(int argc, const char *argv[])
{
unsigned int N;
N = 15; // 1111
assert(longest_seq_of_zero(N) == 0);
N = 0x85; // 10000101
assert(longest_seq_of_zero(N) == 4);
N = 0x48; // 1001000
assert(longest_seq_of_zero(N) == 3);
return 0;
}
log N is not a constant time operation.
- yaojingguo December 14, 2013A solution based on 4-state DFA. Python code:
def accept(s):
dic = {"h": 0, "i": 1, "r": 2, "e": 3}
s = map(lambda x: dic[x], s)
# q_0 is 0, A is {3}
dfa = [
[0, 1, None, None],
[None, 1, 2, None],
[None, None, 2, 3],
[0, None, None, 3],
]
q = 0
for a in s:
q = dfa[q][a]
if q == None:
return False
return a == 3
s = "hhirre"
print(accept(s))
s = "hirehhhirre"
print(accept(s))
s = "hir"
print(accept(s))
Algorithm is based on Josephus_problem#The_general_case on wikipedia. Time complexity is O(n). The code in C:
#include <stdio.h>
#include <assert.h>
struct Node {
int val;
Node* next;
Node(int val_, Node* next_ = NULL) :
val(val_), next(next_) {}
};
int josephus(int n, int k)
{
int f = 0;
for (int i = 2; i <= n; i++)
f = (f + k) % i;
return f;
}
int len(Node* head)
{
if (head == NULL) return 0;
int n = 0;
Node* q = head;
do {
q = q->next;
n++;
} while (q != head);
return n;
}
Node* kill_kth(Node* head, int k)
{
int n = len(head);
if (n == 0) return NULL;
int idx = josephus(n, k);
printf("idx: %d\n", idx);
for (int i = 0; i < idx; i++)
head = head->next;
return head;
}
Node* list_5_nodes()
{
Node* node5 = new Node(5);
Node* node4 = new Node(4, node5);
Node* node3 = new Node(3, node4);
Node* node2 = new Node(2, node3);
Node* node1 = new Node(1, node2);
node5->next = node1;
return node1;
}
void test_josephus()
{
assert(josephus(5, 3) == 3);
assert(josephus(1, 3) == 0);
}
void test_len()
{
assert(len(list_5_nodes()) == 5);
}
void test_kill_kth()
{
Node* head = list_5_nodes();
Node* only = kill_kth(head, 3);
printf("%d\n", only->val);
}
int main(int argc, const char *argv[])
{
test_josephus();
test_len();
test_kill_kth();
return 0;
}
Let's use N represent the number of the matches that a team must win to
ensure semi final. Let divide the 8 teams into 2 4-team groups: A and
B. All the teams in group A enter semi final. N is obtained when the
following two conditions are satisfied:
1. Group A wins the most possible points.
2. The points of group A are distributed evenly to all 4 teams.
Condition 1 is satisfied when all teams in group A wins in games against
teams in group B. In such cases, group A has more than 32(4 * 8) points
than group B. Group A also has 12 points for games against each other
within group A. So group A has 44 (32 + 12) points in total. Distribute
44 points evenly to 4 teams. 44/4 is 11. So N is 11 points.
If a much rigorous solution is needed, condition 1 and 2 need to be
proved.
bool is_power_of_2(int n)
{
if (n < 0) n = -n;
return (n & -n) == n; // extract the rightmost bit 1
}
The number of the candidates for the next smallest item is not constant. For details, you can check my solution.
- yaojingguo December 12, 2013Time complexity for the following algorithm is N^2*lg(PriorityQueue.size). It is similar to the general k-way merge. But it only puts an item into the priority queue if there are no items adjacent to it from left or up. Since PriorityQueue.size is less than or equal to N, this algorithm is faster than the general k-way merge.
import heapq
def convert_2D_to_1D(aa, n):
a = [None] * (n * n)
idx = 0
pq = []
visits = {}
heapq.heappush(pq, (aa[0][0], (0, 0)))
while len(pq) > 0:
# print("priority queue size: ", len(pq))
top = heapq.heappop(pq)[1]
x = top[0]
y = top[1]
a[idx] = aa[x][y]
idx += 1
if x < n - 1:
item = (x + 1, y)
if y == 0:
heapq.heappush(pq, (aa[x + 1][y], item))
elif item in visits:
heapq.heappush(pq, (aa[x + 1][y], item))
del visits[item]
else:
visits[item] = True
if y < n - 1:
item = (x, y + 1)
if x == 0:
heapq.heappush(pq, (aa[x][y + 1], item))
elif item in visits:
heapq.heappush(pq, (aa[x][y + 1], item))
del visits[item]
else:
visits[item] = True
return a
# 4 * 4 2D array
n = 4
aa = [
[1, 2, 4, 6],
[2, 4, 5, 8],
[4, 6, 12, 13],
[8, 10, 18, 19]
]
print(aa)
print(convert_2D_to_1D(aa, n))
# 10 * 10 2D array
n = 10
aa = [[j for j in xrange(n)] for i in xrange(n)]
for i in xrange(n):
for j in xrange(n):
aa[i][j] += i
print(aa)
print(convert_2D_to_1D(aa, n))
This solution does not use all the conditions provided in the problem. For this solution to be viable, either being row-wise sorted or being column-wise sorted is enough.
- yaojingguo December 12, 2013I come up with the same algorithm as @Loler. I implement it in Python. Here is the code:
def max_contiguous_subarray_abs_diff(A):
n = len(A)
assert n > 0
left_min, left_max = left_min_max(A)
right_min, right_max = left_min_max(A[::-1])
max_diff = 0
for i in xrange(n - 1):
smallest = min(left_min[i], right_min[n - 2 - i])
largest = max(left_max[i], right_max[n - 2 - i])
diff = abs(largest - smallest)
if max_diff < diff:
max_diff = diff
return max_diff
def left_min_max(A):
n = len(A)
pre_min = [0] * (n + 1)
pre_max = [0] * (n + 1)
pre = [0] * (n + 1) # :i
for i in xrange(0, n):
pre[i + 1] = pre[i] + A[i]
pre_min[i + 1] = pre_min[i]
if pre_min[i + 1] > pre[i + 1]:
pre_min[i + 1] = pre[i + 1]
pre_max[i + 1] = pre_max[i]
if pre_max[i + 1] < pre[i + 1]:
pre_max[i + 1] = pre[i + 1]
left_min = [None] * (n - 1)
left_max = [None] * (n - 1)
for i in xrange(0, n - 1):
left_min[i] = pre[i + 1] - pre_max[i]
left_max[i] = pre[i + 1] - pre_min[i]
return (left_min, left_max)
A = [2, -1, -2, 1, -4, 2, 8]
print(A, max_contiguous_subarray_abs_diff(A))
A = [8, 1, 1, 1]
print(A, max_contiguous_subarray_abs_diff(A))
A = [4, -1, 7]
print(A, max_contiguous_subarray_abs_diff(A))
@oOZz's solution does work for an array containing only positive numbers. For example, [8, 1, 1, 1].
- yaojingguo December 11, 2013Some changes on @nihaldps' code.
1. Remove the use of the modulus operator %.
2. Remove the use of the result array.
#include <stdio.h>
void increment_by_one(int a[], int n)
{
int i;
int carry;
carry = 1;
for (i = n - 1; i >= 0; i--) {
a[i] += carry;
if (a[i] >= 10) {
carry = 1;
a[i] = 0;
} else {
carry = 0;
}
}
if (carry == 1)
printf("%d", carry);
for (i = 0; i < n; i++)
printf("%d", a[i]);
printf("\n");
}
int main()
{
int a[4] = {9,9,9,9};
increment_by_one(a, 4);
int b[] = {2, 7, 8, 9};
increment_by_one(b, 4);
int c[] = {2, 8, 1};
increment_by_one(c, 3);
return 0;
}
@Jason. I like your three-reversion solution.
- yaojingguo December 10, 2013Coding with O(n) space and O(n) time is too easy. Let's use A to the original array. Create a new array B.
1 Scan A and copy negative integers into B.
2 Scan A and copy positive integers into B.
Only the elements above or on the main diagonal in the k*k matrix need to be considered.
- yaojingguo December 10, 2013Algorithm from page 392 of The Art of Computer Programming, Volume 4A.
def partitions_of_n_in_reverse_lexicographic_order_partition(n):
a = [None] * (n + 1)
# P1. [Initialize.]
for m in xrange(2, n + 1):
a[m] = 1
m = 1
a[0] = 0
while True:
# P2. [Store the final part.]
a[m] = n
if n == 1:
q = m - 1
else:
q = m
while True:
# P3. [Visit.]
yield a[1:m+1]
if a[q] == 2:
# P4. [Change 2 to 1+1.]
a[q] = 1
q = q - 1
m = m + 1
else:
break
# P5
if q == 0:
return
x = a[q] - 1
a[q] = x
n = m - q + 1
m = q + 1
# P6
while n > x:
a[m] = x
m = m + 1
n = n - x
def test(n):
print("n: {}".format(n))
for p in partitions_of_n_in_reverse_lexicographic_order_partition(n):
if (len(p) > 1):
print(p)
test(1)
test(4)
test(5)
Node {
int id;
int parent;
int weight;
int subtree_weight;
}
Create a direct address table which maps node id to node object.
for node x in node list
y = x
while y is not null:
y.subtree_weight = x.weight
y = hash(y.parent)
Complexity is O(nh) (h is the height of the tree). If the tree is balanced, O(nlgn).
- yaojingguo December 06, 2013Besides the file, generate a digest for the file. A node performs a digest check after receiving the file and the digest.
The data can be pipelined between all nodes. GFS does data transfer in this way.
Node1 ==> Node2 ==> Node3 ==> Nod4
Or the node owning the file can act as a central server:
Node1 ==> Node2
|=> Node3
|=> Nod34
How data flow between nodes depends on the network and server settings.
Another sophisticated approach is to employ the peer-to-peer approach like BitTorrent.
import unittest
def paren(n):
if n == 0: yield ""
for i in xrange(n):
for j in paren(i):
for k in paren(n - i - 1):
yield j + "(" + k + ")"
def generate(n):
print("Generating: 2 * {}".format(n))
for s in paren(n):
if len(s) == 2 * n:
print(s)
class Test(unittest.TestCase):
def test_generate(self):
generate(0)
generate(1)
generate(2)
generate(3)
if __name__ == "__main__":
unittest.main()
def mini_sum(A):
n = len(A)
if n == 0:
return 0
A.sort()
if n % 2 == 1:
total = A[0]
start = 1
else:
total = 0
start = 0
for i in xrange(start, n, 2):
total = total * 10 + A[i] + A[i+1]
return total
A = [1, 2, 7, 8, 9]
print(mini_sum(A))
This idea is really clever. Code it in python:
- yaojingguo December 27, 2013