Sunny
BAN USER
A simple recursive algorithm. Not checking for null because I am assuming Node[] children only contains non-null.
public class RightSibling {
class Node {
public Node[] children;
public Node right;
}
public void populateRightSiblings(Node node) {
for(int i=0; i<children.length; i++) {
Node child = node.children[i];
if(i < children.length - 1) {
child.right = node.children[i+1];
}
populateRightSiblings(child);
}
}
}
Given no other constraint, I would just store the list of toggles in a LinkedList, then iterate through the LinkedList to determine whether the current state is on or off.
public class LightBulbs {
class Interval {
int start;
int end;
public Interval(int _start, int _end) {
start = _start;
end = _end;
}
public boolean contains(int n) {
return n >= start && n <= end;
}
}
LinkedList<Interval> intervals = new LinkedList<Interval>();
public LightBulbs(int n) {
}
public boolean isOn(int i) {
boolean on = false;
for(Interval interval : intervals) {
if(interval.contains(i)) {
on = !on;
}
}
return on;
}
public void toggle(int start, int end) {
intervals.add(new Interval(start, end));
}
}
The problem statement seems ambiguous because it doesn't specify whether all elements in A appear in B and if so, whether they will appear in the same order as in B.
While the example seems to suggest this is the case, maybe the candidate is supposed to ask follow up questions to clarify.
If all elements in A appear in same order as in B, then other than the straightforward HashSet solution, we can simply iterate both arrays and compare the elements one by one. No need for any fancy encoding or data structure.
My solution is similar to the one above that uses multiple arrays. Basically I use an array to store all the current elements. I also have a HashMap that stores the position of each element. The only tricky part is when removing an element, we need to swap it with the last element in the array so that the array has no gap in it. The HashMap supports the insert/delete/search operations in O(1) while the array supports the random operation in O(1).
class EfficientOperations {
static int count = 0;
static HashMap<Integer, Integer> positions = new HashMap<Integer, Integer>();
static int[] elements = new int[1000];
static void insert(int n) {
if(contains(n))
return;
positions.put(n, count);
elements[count++] = n;
}
static void delete(int n) {
if(!contains(n))
return;
int pos = positions.get(n);
elements[pos] = elements[count-1];
count--;
positions.remove(n);
positions.put(elements[pos], pos);
}
static boolean contains(int n) {
return positions.containsKey(n);
}
static Integer random() {
if(count == 0)
return null;
int pos = (int)Math.random()*count;
return elements[pos];
}
}
Based on Anonymous's O(sqrt(n)) algorithm. Not entirely sure about the ceil(sqrt(n) + 0.0001) part though.
from math import *
def num_factors(n):
factors = 2
for i in range(2, int(ceil(sqrt(n) + 0.0001))):
if n % i == 0:
factors += 1 if i*i == n else 2
return factors
print num_factors(12)
Not sure if I udnerstand the question correctly, but basically I throw all the start & end points into a set. Then I iterate through the sorted set. If I am at the first element then I set variable "prev" to it. Otherwise I add a new interval (prev, curr) and set prev to be curr + 1 (or the day after curr).
def divide(intervals):
s = set()
for interval in intervals:
s.add(interval[0])
s.add(interval[1])
results = []
prev = None
for d in sorted(s):
if prev == None:
prev = d
else:
results.append([prev, d])
prev = d + 1
return results
print divide([[34, 51], [37, 64]])
This seems like a rather long problem, unless I miss a simpler way to do this. Basically I first construct a tree from the array. Then I compute the answer recursively by passing a set of nodes I can traverse from in the recursive call.
class CountIdenticalTrees {
static class Node {
int value;
Node left;
Node right;
public Node(int value) {
this.value = value;
}
public void add(int value) {
Node n = this;
while(true) {
if(n.value > value) {
if(n.left == null) {
n.left = new Node(value);
break;
} else {
n = n.left;
}
} else {
if(n.right == null) {
n.right = new Node(value);
break;
} else {
n = n.right;
}
}
}
}
}
static public int count(int[] arr) {
Node root = null;
for(int i : arr) {
if(root == null)
root = new Node(i);
else root.add(i);
}
HashSet<Node> nodes = new HashSet<Node>();
nodes.add(root);
return countIdentical(nodes, arr.length);
}
static public int countIdentical(HashSet<Node> nodes, int num) {
if(nodes.size() == num)
return 1;
int result = 0;
HashSet<Node> clone = (HashSet<Node>)nodes.clone();
for(Node n : clone) {
for(int i=0; i<2; i++) {
Node child = (i == 0 ? n.left : n.right);
if(child == null || nodes.contains(child))
continue;
nodes.add(child);
result += countIdentical(nodes, num);
nodes.remove(child);
}
}
return result;
}
public static void main(String[] args) {
int[] arr = new int[args.length];
for(int i=0; i<args.length; i++)
arr[i] = Integer.parseInt(args[i]);
System.out.println(count(arr));
}
}
Maintain 2 hashmaps of "before" and "after" relationships. In this example, before['A'] = {'B','C'} while after['B'] = {'A'} etc. Keep finding the next module m where before[m] is empty. Then print this module, look up the modules that depend on it from after[m], and remove m from all the corresponding hashmap values.
def build_order(orders):
before = {}
after = {}
for order in orders:
for module in order:
before[module] = set()
after[module] = set()
for order in orders:
module = order[0]
dependents = order[1:]
for d in dependents:
before[module].add(d)
after[d].add(module)
while len(before) > 0:
for module in before.keys():
if len(before[module]) == 0:
print module
for d in after[module]:
before[d].remove(module)
del before[module]
break
print build_order(['ABC', 'BDEF', 'DF'])
Assuming that "sum of numbers from array" means this positive integer cannot be one of the numbers in the array nor the sum of any of them. Then I am going to create a set that contains all possible sums from the array. If the array has n elements then the set can potentially contain as much as 2^n of them. After that, I will just keep checking which smallest positive integer doesn't exist in this set.
def smallest_missing(arr):
sums = {0}
for i in arr:
sums |= {i+x for x in sums}
i = 1
while(i in sums):
i+=1
return i
print smallest_missing([1,2,3])
That's a good point and I forgot about that. Instead of setting variable p to each "landing cell", I should have set it to the "opponent cell" instead.
So for the first if(direction == 0) I should set have set:
p = new Position(r-1, c-1);
I will edit the code above accordingly. Thanks for catching and let me know if you spot any other error.
I am not the one who posted this question but here are my thoughts:
(1) It's relatively awkward to assume that 3 consecutive letters map to one alphabet.
(2) The input pattern may have only 2 alphabets, but why not challenge yourself and generalize it to have all 26 letters?
(3) This question is ambiguous. For example, can 2 different letters map to the same string? Can a letter map to an empty string?
Since you aren't dealing with a real interviewer, and so can't clarify some of your doubts, maybe just make some reasonable assumptions on your end and start writing some code while stating your assumptions. If you think it's a very simple problem with your two assumptions, then don't make those two assumptions.
Create 2 hashtables that store the children/parents relationships among the letters. Then run topological sort. Code seems kinda long though.
import java.util.*;
class NewOrder {
static String findOrdering(String[] words) {
HashMap<Character, HashSet<Character>> children = new HashMap<Character, HashSet<Character>>();
HashMap<Character, HashSet<Character>> parents = new HashMap<Character, HashSet<Character>>();
for(String word : words) {
for(Character ch : word.toCharArray()) {
if(children.containsKey(ch))
continue;
children.put(ch, new HashSet<Character>());
parents.put(ch, new HashSet<Character>());
}
}
for(int i=1; i<words.length; i++) {
String prev = words[i-1];
String curr = words[i];
int len = prev.length();
for(int j=0; j<len; j++) {
char ch = prev.charAt(j);
char ch2 = curr.charAt(j);
if(ch != ch2) {
children.get(ch).add(ch2);
parents.get(ch2).add(ch);
break;
}
}
}
StringBuilder sb = new StringBuilder();
LinkedList<Character> noParents = new LinkedList<Character>();
for(Character ch : parents.keySet()) {
if(parents.get(ch).isEmpty())
noParents.add(ch);
}
while(!noParents.isEmpty()) {
char ch = noParents.removeFirst();
sb.append(ch);
for(Character ch2 : children.get(ch)) {
parents.get(ch2).remove(ch);
if(parents.get(ch2).isEmpty())
noParents.add(ch2);
}
}
return sb.toString();
}
public static void main(String[] args) {
System.out.println(findOrdering(args));
}
}
First add a "fence" around the original grid, so that the original grid is surrounded by 2 rows each on the top & bottom, and 2 cols each on the left & right. Then set the value for these fence positions to be -1. That way you can check the opponent cell and landing cell without worrying about array out of bounds. After that, just do DFS.
The following code is untested. It also seems quite long, which would be hard to implement during an actual interview.
import java.util.*;
class MinSteps {
static class Position {
int r;
int c;
public Position(int r, int c) {
this.r = r;
this.c = c;
}
public int hashCode() {
return 1001*r + c;
}
public boolean equals(Position p) {
return r == p.r && c == p.c;
}
}
static int maxJumps(int[][] grid, int r, int c) {
int numRows = grid.length;
int numCols = grid[0].length;
// create new grid that contains an extra 'fence'
// with value -1 to help check for boundary conditions
int[][] newGrid = new int[numRows+4][numCols+4];
for(int i=0; i<numRows+4; i++) {
for(int j=0; j<numCols+4; j++) {
if(i <= 1 || i >= numRows+2 || j <= 1 || j >= numCols+2)
newGrid[i][j] = -1;
else newGrid[i][j] = grid[i-2][j-2];
}
}
r++;
c++;
HashSet<Position> seen = new HashSet<Position>();
seen.add(new Position(r, c));
return helper(grid, r, c, seen);
}
static int helper(int[][] grid, int r, int c, HashSet<Position> seen) {
int max = 0;
for(int k=0; k<4; k++)
max = Math.max(max, helper2(grid, r, c, seen, k));
return max;
}
static int helper2(int[][] grid, int r, int c, HashSet<Position> seen, int direction) {
int orig = grid[r][c];
int opponent = (orig == 1 ? 2 : 1);
int over;
int land;
Position p;
if(direction == 0) {
over = grid[r-1][c-1];
land = grid[r-2][c-2];
p = new Position(r-1, c-1);
} else if(direction == 1) {
over = grid[r-1][c+1];
land = grid[r-2][c+2];
p = new Position(r-1, c+1);
} else if(direction == 2) {
over = grid[r+1][c-1];
land = grid[r+2][c-2];
p = new Position(r+1, c-1);
} else {
over = grid[r+1][c+1];
land = grid[r+2][c+2];
p = new Position(r+1, c+1);
}
if(over != opponent || land != 0 || seen.contains(p))
return 0;
seen.add(p);
int jumps = 1 + helper(grid, p.r, p.c, seen);
seen.remove(p);
return jumps;
}
}
BFS approach. Didn't really test the code.
import java.util.*;
class MinSteps {
static class Position {
int r;
int c;
public Position(int r, int c) {
this.r = r;
this.c = c;
}
}
static int minSteps(int[][] map) {
int m = map.length;
int n = map[0].length;
LinkedList<Position> curr = new LinkedList<Position>();
curr.add(new Position(0, 0));
int steps = 0;
while(steps < m+n) {
LinkedList<Position> next = new LinkedList<Position>();
for(Position p : curr) {
int r = p.r;
int c = p.c;
if(r == m-1 && c == n-1)
return steps;
check(r+1, c, map, next);
check(r, c+1, map, next);
check(r+1, c+1, map, next);
curr = next;
}
steps++;
}
// we shouldn't reach here
return Integer.MAX_VALUE;
}
static void check(int r, int c, int[][] map, LinkedList<Position> next) {
int m = map.length;
int n = map[0].length;
if(r < m && c < n && map[r][c] == 1) {
next.add(new Position(r, c));
map[r][c] = -1;
}
}
public static void main(String[] args) {
}
}
My recursive solution. Hopefully the code is self-explanatory, but basically I am using a hashmap to keep track of all the letters I have seen and what values I assume they have.
import java.util.*;
class MatchPattern {
// assumes both pattern & input are non-null
static boolean match(String pattern, String input, HashMap<Character, String> seen) {
int len = pattern.length();
if(len == 0)
return input.equals("");
char ch = pattern.charAt(0);
boolean seenAlready = seen.containsKey(ch);
if(len == 1) {
if(seenAlready)
return input.equals(seen.get(ch));
else return true;
} else if(seenAlready) {
String existing = seen.get(ch);
if(input.startsWith(existing) && !input.equals(existing))
return match(pattern.substring(1), input.substring(existing.length()), seen);
else return false;
} else {
int len2 = input.length();
for(int i=1; i<len2; i++) {
String prefix = input.substring(0, i);
HashSet<String> used = new HashSet<String>(seen.values());
if(used.contains(prefix))
continue;
seen.put(ch, prefix);
if(match(pattern.substring(1), input.substring(i), seen))
return true;
seen.remove(ch);
}
return false;
}
}
public static void main(String[] args) {
HashMap<Character, String> seen = new HashMap<Character, String>();
System.out.println(match(args[0], args[1], seen) ? "1" : "0");
}
}
Create an array that holds the births-deaths offset for each year between 1900 and 2000. Iterate through the birth/death years and increment/decrement the corresponding count. Then just iterate through the counts and keep track of the max count and the corresponding year.
static int maxYear(int[] births, int[] deaths) {
int counts[] = new int[101];
for(int i : births)
counts[i-1900]++;
for(int i : deaths)
counts[i-1900]--;
int max = counts[0];
int year = 0;
int pop = counts[0];
for(int i=1; i<counts.length; i++) {
pop += counts[i];
if(pop > max) {
max = pop;
year = i;
}
}
return year + 1900;
}
Here's a simple approach. For each email sent, we note the recipients. Suppose the recipients are A,B,C. We then consider all the pairs among them (order matters). In this case the pairs are (A, B) (B, A) (A, C) (C, A) (B, C) and (C, B). We then use a hashtable (call it ht) to record these pairs. For pair (A, B), we first set ht[A][B] to 0 if this value doesn't already exist. Then we increment ht[A][B] by one. Similarly for all the other pairs.
Now suppose the user is composing an email to recipients A & B. We then examine all the values in ht[A] and ht[B]. Each value will basically be a (recipient, count) pair such as (C, 3) or (D, 5). We then combine pairs with the same recipients, like (C, 3) and (C, 2) into (C, 5). Finally, we sort these pairs by the count and simply suggest say the top 5 recipients.
This is quite inefficient and also treats an email with recipients A, B, C the same as 3 emails with recipients A & B and A & C and B & C, so it discounts the significance of A, B, C occurring together in that email.
O(n^2) solution. Basically for each number that is 0 to N-1, we want to keep track of which subsets of the input will add up to that number after modulo N. So that's what my code does below with the HashMap, and the parameter M can actually be N or 2N.
import java.util.*;
class SubsetSum {
static List<Integer> findSubset(int[] numbers, int M) {
int N = numbers.length;
HashMap<Integer, LinkedList<Integer>> subsets = new HashMap<Integer, LinkedList<Integer>>();
subsets.put(0, new LinkedList<Integer>());
for(int n : numbers) {
HashSet<Integer> keys = new HashSet<Integer>(subsets.keySet());
for(int i : keys) {
int sum = (i+n) % M;
if(sum == 0) {
subsets.get(i).add(n);
return subsets.get(i);
}
if(subsets.containsKey(sum))
continue;
LinkedList<Integer> list = (LinkedList<Integer>)subsets.get(i).clone();
list.add(n);
subsets.put(sum, list);
}
}
return null;
}
public static void main(String[] args) {
int len = args.length;
int[] numbers = new int[len];
for(int i=0; i<len; i++) {
numbers[i] = Integer.parseInt(args[i]);
}
List<Integer> subset = findSubset(numbers, 2*len);
if(subset == null) {
System.out.println("None found");
} else {
for(int n : subset)
System.out.print(n + " ");
}
}
}
Okay here's my Java implementation of your idea:
import java.util.*;
class LongestStretch2 {
static class Seg { int ID; int NextID; }
static int longest(Seg[] segments) {
int longest = 0;
HashMap<Integer, Integer> next = new HashMap<Integer, Integer>();
HashMap<Integer, Integer> counts = new HashMap<Integer, Integer>();
for(Seg seg : segments) {
next.put(seg.ID, seg.NextID);
}
for(int id : next.keySet()) {
int len = 1;
int currId = id;
while(true) {
int nextId = next.get(currId);
if(counts.containsKey(nextId)) {
len += counts.get(nextId);
break;
}
if(nextId < 0)
break;
currId = nextId;
len++;
}
counts.put(id, len);
longest = Math.max(longest, len);
}
return longest;
}
public static void main(String[] args) {
int len = args.length;
Seg[] segments = new Seg[len/2];
for(int i=0; i<len; i+=2) {
Seg seg = new Seg();
seg.ID = Integer.parseInt(args[i]);
seg.NextID = Integer.parseInt(args[i+1]);
segments[i/2] = seg;
}
System.out.println(longest(segments));
}
}
(1) Create a hashtable that keeps track of each (ID, NextID) pair.
(2) Identify the set of IDs that represent the heads of each list.
(3) For each ID in (2), recursively traverse the hashtable until you arrive at a negative number.
(4) Return the longest traversal length while iterating in (3).
class LongestStretch {
static class Seg { int ID; int NextID; }
static int longest(Seg[] segments) {
HashSet<Integer> heads = new HashSet<Integer>();
HashSet<Integer> tails = new HashSet<Integer>();
HashMap<Integer, Integer> next = new HashMap<Integer, Integer>();
for(Seg seg : segments) {
next.put(seg.ID, seg.NextID);
heads.add(seg.ID);
tails.add(seg.NextID);
}
heads.removeAll(tails);
int longest = 0;
for(int head : heads) {
int len = 0;
int current = head;
while(current >= 0) {
len++;
current = next.get(current);
}
longest = Math.max(longest, len);
}
return longest;
}
public static void main(String[] args) {
int len = args.length;
Seg[] segments = new Seg[len/2];
for(int i=0; i<len; i+=2) {
Seg seg = new Seg();
seg.ID = Integer.parseInt(args[i]);
seg.NextID = Integer.parseInt(args[i+1]);
segments[i/2] = seg;
}
System.out.println(longest(segments));
}
}
After digesting the comments by guruqu & gen-y-s, here's how I understood the dynamic programming solution.
(1) First convert each point to some angle such that they can be sorted, while keeping track of which segments they correspond to - O(n)
(2) Sort the angles - O(nlogn)
(3) You should now end up with an array (eg. [5 2 3 4 2 1 4 3 5 1]). The numbers correspond to the line segments.
(4) Note that in the example above, {534} is the solution. Also notice how the "longest non-contiguous palindrome" in the array is 5x34xx435. It should be easy to see that the numbers constituting the "longest non-contiguous palindrome" are actually the same line segments as the solution.
(5) Finally, due to the cyclic nature, the array could have been shifted like [2 3 4 2 1 4 3 5 1 5] instead. To accommodate this, you actually need to find the 2 "longest non-contiguous palindromes" in the array. In this case they are 34xx43 and 5x5. So the solution is simply the line segments from the 2 "longest non-contiguous palindromes".
(6) The optimal substructure should now be obvious. Basically DP[i][j] contains the optimal solution from point i to point j (inclusive). You then build the DP matrix starting from length 1 to 2n. At each step, set DP[i][j] = (arr[i] == arr[j] ? 1 : 0) + (max(DP[i][k] + DP[k+1][j]) for k=i to j-1). Populating this matrix requires O(n^3) time.
The algorithm takes O(n^3) time and O(n^2) space. Hope I haven't missed anything.
With the same 5-segments example I had above, these are how many intersections each segment has:
(1) 3 intersections
(2) 2 intersections
(3) 2 intersections
(4) 2 intersections
(5) 1 intersection
With your approach, we would first remove (1). Then depending on the ordering, we might remove (3) & (4), thus ending up with (5) & (2), as opposed to the optimal solution of (5), (3), (4).
Consider 5 segments: 2 vertical and 3 horizontal. In other words, we have the following intersecting pairs:
(1, 3)
(1, 4)
(1, 5)
(2, 3)
(2, 4)
(2, 5)
According to this approach, I believe we will have:
S(1) = {1}
S(2) = {1, 2}
S(3) = {1, 2}
S(4) = {1, 2}
S(5) = {1, 2}
So we would picked the 2 vertical ones instead of the 3 horizontal ones.
The equation for S(N+1)_case2 = S(N) + (N+1)th line - I(N+1) should raise a red flag, because basically it's adding 1 line but taking away potentially many. S(N+1)_case2 would only be greater than S(N+1)_case1 if the N+1 line doesn't intersect with any line in S(N). So this is basically a greedy algorithm.
Now that I think about it, isn't this the same as finding the "maximum independent set", which is NP-hard?
To see this, first represent the line segments as a graph instead. Let each line segment be a vertex, and let there be an edge between 2 vertices if the corresponding line segments intersect. Isn't the original problem of finding the maximum subset of L equivalent to finding the maximum subset of vertices such that they cover the whole graph, without any 2 vertices being adjacent?
Thanks. Now I don't have to struggle to prove it. Here's another counter-example I found:
(1) A vertical line segment
(2) Another vertical line segment
(3) A horizontal line segment that intersects both (1) & (2)
(4) Another horizontal line segment that intersects both (1) & (2)
(5) A line segment that intersects (1)
In this case, the optimal solution would be (5), (3), (4) but my algorithm would probably have returned (5), (2).
Not sure if this works (especially since it's quite simple), so let me know if you can find a counter-example.
(1) For each segment, count how many times it intersects with the other segments - O(n^2)
(2) Sort the segments by their number of intersections - O(nlogn)
(3) Iterate through the segments and include each segment as long as it doesn't intersect any of the current ones - O(n)
So O(n^2) total.
I re-read the problem statement again. It says "every time we eat and discard there are new neighbors being formed per piece. "So I am no sure if my interpretation in previous comment is correct, because not all pieces will have new neighbors each time we eat according to my understanding.
- Sunny June 01, 2014The example is good but if my interpretation of this "3n pieces" problem is correct, then with the greedy approach you would first pick 10, which makes the 2 adjacent 9s disappear. The array is now 7 11, so you would pick 7 and now the array is empty. So total is 17.
If you first pick the 9 in front, the array will now be 9 7 1, so you would again pick 9, so the total in this case is 18.
I could only come up with the straightforward approach. Basically try eating each slice, marking itself and its two neighbors unavailable, and then compute its volume plus the maximum volume you get from making the recursive call.
class EatPizza {
static int maxVolume(int[] arr) {
boolean[] available = new boolean[arr.length];
for(int i=0; i<available.length; i++)
available[i] = true;
return maxVolumeHelper(arr, available, 0);
}
static int maxVolumeHelper(int[] vol, boolean[] available, int n) {
int len = vol.length;
if(n == len/3)
return 0;
int max = 0;
for(int i=0; i<len; i++) {
if(available[i]) {
int right = i+1;
while(!available[right%len]) {
right++;
}
int left = i-1;
while(!available[(left+len)%len]) {
left--;
}
available[i] = false;
available[(left+len)%len] = false;
available[right%len] = false;
int volume = vol[i] + maxVolumeHelper(vol, available, n+1);
max = Math.max(max, volume);
available[i] = true;
available[(left+len)%len] = true;
available[right%len] = true;
}
}
return max;
}
public static void main(String[] args) {
int[] arr = new int[args.length];
for(int i=0; i<args.length; i++)
arr[i] = Integer.parseInt(args[i]);
System.out.println(maxVolume(arr));
}
}
This is my interpretation of the problem. Suppose pizza has 1 2 3 4 5 6 where n=2. If you eat piece 6, then you have to immediately discard its 2 neighbors and so the pizza now becomes 2 3 4, so you would pick piece 4 in the 2nd iteration, resulting in 10 total. But if you were to eat piece 5 originally, you would end up with 1 2 3, at which point you would pick 3, resulting in 8 total.
- Sunny May 31, 2014I just use recursion plus memoization, by using a HashSet to store all the (index, velocity) pairs I have seen.
import java.util.*;
class CrossArray {
private static HashSet<String> visited = new HashSet<String>();
public static boolean canCross(boolean arr[]) {
return canCrossHelper(arr, 0, 1);
}
public static boolean canCrossHelper(boolean arr[], int index, int velocity) {
String key = "" + index + "-" + velocity;
if(visited.contains(key))
return false;
for(int v=velocity-1; v<=velocity+1; v++) {
if(v == 0)
continue;
int newIndex = v+index;
if(newIndex >= arr.length)
return true;
if(arr[newIndex] && canCrossHelper(arr, newIndex, v))
return true;
}
visited.add(key);
return false;
}
public static void main(String[] args) {
boolean arr[] = new boolean[]
{true, true, false, true, false, true, false, false, false, true};
boolean arr2[] = new boolean[]
{true, true, false, true, false, true, true, false, false, true, false};
System.out.println(canCross(arr));
System.out.println(canCross(arr2));
}
}
I think it means the cache is a LRU (Least Recently Used). It's implemented as linked list + hash map (eg. LinkedHashMap in Java). When the cache is presented with an URL, it first checks if the URL is present in the hash map. If so, it returns the corresponding content, and also moves the corresponding element in the linked list to the front to indicate that this URL has just been fetched. Otherwise, it removes the last element from the linked list and also from the hash map, and inserts this new URL to the front of the linked list and to the hash map.
- Sunny March 07, 2014I think of it this way:
1-26 (26 of them)
AA-ZZ (26*26 of them)
AAA-ZZZ (26*26*26 of them)
So it's relatively easy once we figure out which "level" we are in.
We do that with several variables:
(1) offset - it keeps getting multiplied by 26 in each iteration (eg. 26*26*26 for AAA-ZZZ)
(2) numDigits - number of characters in the current "level" (eg. 3 for AAA-ZZZ)
In each iteration, we first decrement the number by the previous offset, then multiply offset by 26 and check if number-1 is now less than the new offset.
For example, when number is 702, we first decrement it by offset (26) and get 676. Now 676-1=675, which is less than the next offset 26*26=676.
So the task becomes converting 675 to a 2 digit string, which is base arithmetic and you should get ZZ.
It should be relatively simple to do the reverse of finding out the index given the string.
class ExcelColumn {
public static String getColumnName(int num){
if(num<=26)
return "" + num;
int offset = 26;
int numDigits = 1;
while(true) {
num -= offset;
numDigits++;
offset *= 26;
if(num - 1 < offset) {
return helper(num-1, numDigits);
}
}
}
public static String helper(int num, int numDigits) {
if(numDigits == 0)
return "";
int digit = num%26;
return helper(num/26, numDigits-1) + (char)(digit + 'A');
}
public static void main(String[] args) {
System.out.println(getColumnName(Integer.parseInt(args[0])));
}
}
(1) Create a Coord class that represents a (row, col) pair
(2) Iterate through the matrix and store the occurrences of each character in a HashMap<Character, List<Coord>>
(3) Define a recursive method find(String word, HashSet<Coord> seen, int index, Coord prev). The HashSet<Coord> seen represents the coordinates we have seen already (null initially). The index is the current index of the word we want to find (0 initially). The Coord prev is the previous Coord (null initially). In each recursive call, we want to look up all possible coordinates for the character at current index. If no coordinates is found then return false. Otherwise iterate through the coordinates from the map and perform recursive call.
More details in the code below, but I doubt this is the proper solution as it requires too much code.
class FindWord {
static HashMap<Character, LinkedList<Coord>> map = new HashMap<Character, LinkedList<Coord>>();
public static boolean isPresent(String[] matrix, String word) {
int len = matrix[0].length();
for(int r=0; r<matrix.length; r++) {
for(int c=0; c<len; c++) {
char ch = matrix[r].charAt(c);
if(!map.containsKey(ch))
map.put(ch, new LinkedList<Coord>());
map.get(ch).add(new Coord(r, c));
}
}
return helper(word, new HashSet<Coord>(), 0, null);
}
public static boolean helper(String word, HashSet<Coord> seen, int index, Coord prev) {
if(index >= word.length())
return true;
char ch = word.charAt(index);
if(!map.containsKey(ch))
return false;
for(Coord coord : map.get(ch)) {
if(prev == null || prev.adjacent(coord)) {
if(seen.contains(coord))
continue;
seen.add(coord);
if(helper(word, seen, index+1, coord))
return true;
seen.remove(coord);
}
}
return false;
}
public static void main(String[] args) {
int len = args.length;
String[] matrix = new String[len-1];
System.arraycopy(args, 0, matrix, 0, len-1);
System.out.println(isPresent(matrix, args[len-1]));
}
static class Coord {
public int r;
public int c;
public Coord(int r, int c) {
this.r = r;
this.c = c;
}
public int hashCode() {
return r*c;
}
public boolean equals(Coord coord2) {
return r == coord2.r && c == coord2.c;
}
public boolean adjacent(Coord coord2) {
if(this.equals(coord2))
return false;
return (Math.abs(r - coord2.r) <= 1) &&
(Math.abs(c - coord2.c) <= 1);
}
}
}
First store all elements of A (and their counts) in a HashMap. Then traverse B and if the count for current element is greater than 0, then add this element to the result and decrement the count.
- Sunny August 21, 2015