Sean Locke
BAN USERI think this can be solved use BFS with a canary, i.e. nullptr, for each layer.
push nodes into queue, when the level ends (reaching an canary, e.g. nullptr), propagate the canary
relocate the original from (0, 0) to fix point S(x1,y1), then the coordinate of M in the new coordinate system will be (x2-x1, y2-y1), let us say (x', y')
0. if x'=y'=0, no move.
1. if abs(x') = abs(y'), then one move
2. otherwise, two moves.
It looks to me that it would be solvable using graph, because swapping indices represent the connection between nodes. But, no clue how to use this graph ...
The best solution I think is: recursively apply N rules to generate new strings and it will be a tree from the original string. The branch will be cut if the string appears before. So,
1. a hashMap to track all strings have been generated
2. the lexic largest string so far
3. a queue to track all candidate strings (leaves of the tree)
Alg:
1. recursive apply N rules to all strings in the queue until the queue is empty
2. if a new string generated by applying a rule, push it into the queue; otherwise discard
It seems not efficient ... any better idea?
Use a heap (for tracking n most frequent words) and a hashMap (for recording the frequency of every word)
two steps:
1. update hash table:
a. if word never appears, add to hash table
b. if word exists, increase the frequency
2. update the heap (three cases)
case 1: the word in the top list already - redo the heap
case 2: the word is not in the top list and the top list is under filled - add it to the list
case 3: the word is not in the top list and the top list reaches the limit and the new one has a higher frequency than some one in the top list - replace the min and redo the heap
class WordSt {
public:
string word;
int freq;
bool inlist;
WordSt(const string &w, int f, bool in) : word(w), freq(f), inlist(in) {}
};
class WordCount {
void injectWord(const string& word)
{
if (wordMap.find(word) == wordMap.end())
wordMap[word] = new WordSt(word, 1, false);
else
wordMap[word]->freq++;
WordSt* ws = wordMap[word];
if (ws->inlist) { // in the list, update
make_heap(tops.begin(), tops.end(), WordStComp());
} else if (tops.size() < toprank) { // not full
tops.push_back(ws);
make_heap(tops.begin(), tops.end(), WordStComp());
ws->inlist = true;
} else if (ws->freq > tops.front()->freq) { // not equal
pop_heap(tops.begin(), tops.end(), WordStComp());
WordSt* discard = tops.back();
discard->inlist = false;
tops.pop_back();
tops.push_back(ws);
push_heap(tops.begin(), tops.end(), WordStComp());
ws->inlist = true;
}
}
vector<string> getTopWords() const
{
vector<string> words;
for_each(tops.begin(), tops.end(),
[&words](WordSt* ws) { words.push_back(ws->word); });
return words;
}
~WordCount()
{
map<string, WordSt*>::const_iterator it;
for (it = wordMap.begin(); it != wordMap.end(); it++)
delete it->second;
}
private:
int toprank;
// vector<WordSt&> tops; // Error! Reference is not assignable
vector<WordSt*> tops;
map<string, WordSt*> wordMap;
};
my solution is generally similar to the one siva.sai.2020 proposed. the ides is following:
1. either the longest path exists in left or right subtree, or the path includes the root
2. get the size of longest path of both left and right (recursive case)
3. if the path includes the root, then the size of longest path is calculated by the depth of left subtree plus the depth of right subtree plus 1
Here is my implementation with a testing function:
struct _node {
int key;
struct _node *left;
struct _node *right;
};
typedef struct _node Node;
class Solution {
public:
int getLargestPath(Node *root)
{
int depth = 0;
if (root == nullptr) return 0;
return getLargestPath(root, depth);
}
private:
int getLargestPath(Node *root, int& depth)
{
if (root == 0) { depth = 0; return 0; }
int dp_l = 0, dp_r = 0;
int left = getLargestPath(root->left, dp_l);
int right = getLargestPath(root->right, dp_r);
depth = max(dp_l, dp_r) + 1;
// either the longest path exists in left/right subtree
// or the one includes this root, but get the longest
return max(max(left, right), dp_l + dp_r + 1);
}
};
class Testing {
public:
// random generate subtree with:
// 0 - no child; 1 - left child;
// 2 - right child; 3 - both
static Node *genBinaryTree(int& len, int& depth, int limit)
{
Node *root = new Node;
root->key = 0;
root->left = nullptr;
root->right = nullptr;
if (limit < 1) { len = 0; depth = 0; return root; }
random_device rd;
mt19937 gen(rd());
uniform_int_distribution<> dis(0, 3);
int guard = dis(gen);
int left = 0, right = 0;
int d_l = 0, d_r = 0;
root->key = guard; // just for record
if (guard & 1)
root->left = genBinaryTree(left, d_l, limit-1);
if (guard & 2)
root->right = genBinaryTree(right, d_r, limit-1);
len = max(max(left, right), d_l + d_r + 1);
depth = max(d_l, d_r) + 1;
return root;
}
};
int main(int argc, char** argv)
{
Solution s;
int gd_len = 0, gd_depth = 0;
Node *root = Testing::genBinaryTree(gd_len, gd_depth, 100);
int path = s.getLargestPath(root);
if (path == gd_len) cout << "Correct!" << endl;
cout << "Largest Path: " << path << endl;
return 0;
}
column wise greedy alg
1. each column we have winners
(those with 1s if there are some with 0; otherwise, everyone is a winner if all 1 or all 0)
2. winners carry to next column and only the winners are competing in the new column
3. whoever left in the pool is the winner
Time complexity should be O(n*m) - worst case, but average case might be much better.
int maxDecimal(int a[][], int n, int m)
{
vector<int> win(n); // initially, we have all candidates
iota(win.begin(), win.end(), 0); // add 0, 1, ..., n-1 to the winner pool
for (int j = 0; j < m; j++) {
vector<int> t;
for (auto i : win) // winner carried to next round
if (a[i][j] == 1) t.push_back(i);
if (t.size() != 0) win.swap(t); // new winners
}
return win[0];
}
column wise greedy alg
1. each column we have winners
(those with 1s if there are some with 0; otherwise, everyone is a winner if all 1 or all 0)
2. winners carry to next round (column) and only the winners are competing in the new column
3. whoever left in the pool is the winner
Time complexity should be O(n*m) - worst case, but average case might be much better.
int maxDecimal(int a[][], int n, int m)
{
vector<int> win(n); // initially, we have all candidates
iota(win.begin(), win.end(), 0); // add 0, 1, ..., n-1 to the winner pool
for (int j = 0; j < m; j++) {
vector<int> t;
for (auto i : win) // winner carried to next round
if (a[i][j] == 1) t.push_back(i);
if (t.size() != 0) win.swap(t); // new winners
}
return win[0];
}
vector<int> add(int a[], int m, int b[], int n)
{
vector<int> ans;
int k = min(m, n);
for (int i = 0; i < k; i++) {
int s = a[i] + b[i];
if (s > 9) {
ans.push_back(1);
ans.push_back(s - 10);
} else {
ans.push_back(s);
}
}
// copy the rest to answer
if (m > n) copy_n(a+n, m-n, ans.begin() + n);
else copy_n(b+m, n-m, ans.begin() + m);
return ans;
}
class Graph {
public:
vector<Node> nodes;
vector<vector<int>> edges;
};
vector<vector<int>> cliques(Graph& g, int s)
{
vector<vector<int>> ans = {{1}};
for (auto i : g.edges[s]) {
vector<vector<int>> tmp;
for (auto v : ans) {
vector<int> t;
for (auto k : v) if (isConnected(i, k)) t.push_back(i);
t.push_back(k);
tmp.push_back(t);
if (t.size() == v.size() - 1) tmp.push_back(v);
}
ans.swap(tmp);
}
return ans;
}
c++ solution:
Idea: if we expand one slot each time, it will require n times to get one instance. Each time (each slot) has m possibilities, where m be the size of set.
1. start from empty set
2. run n rounds, each round expand one slot based on previous set
3. In i-th round, for each existing instance in the set, we have m choices for the i-th slots. therefore one instance generates i-th instances
4. save the result of one round for next round (1 slot more).
Is this DP? I don't know. Anyone knows? But it is definitely using memorization technique. Time complexity is \sum_{i=1, n} m^i, which is m^n?
class Solution {
public:
vector<vector<int>> npermute(vector<int>& arr, int n)
{
vector<vector<int>> ans = {{}};
for (int i = 0; i < n; i++) {
vector<vector<int>> tmp;
for (auto v : ans) {
for(int j = 0; j < arr.size(); j++) {
vector<int> t = v;
t.push_back(arr[j]);
tmp.push_back(t);
}
}
ans.swap(tmp);
}
return ans;
}
};
int main(int argc, char** argv)
{
vector<int> a = {4, 2, 3, 1};
Solution s;
vector<vector<int>> res = s.npermute(a, 3);
for (auto v : res) {
for (auto a : v)
std::cout << a << " ";
std::cout << "\n";
}
return 0;
}
class Solution {
public:
class vecComp {
public:
bool operator()(vector<int>& v1, vector<int>& v2) {
if (v1.size() == 0) return !(v2.size() == 0);
if (v2.size() == 0) return (v1.size() == 0);
int i = v1.size()-1, j = v2.size()-1;
while (i >=0 && j >= 0) {
if (v1[i] > v2[j]) return false;
if (v1[i] < v2[j]) return true;
if (v1[i] == v2[j]) {
if (i > 0) i--;
if (j > 0) j--;
}
}
return true;
}
};
int maxNum(vector<int> nums)
{
int ndigits = 0, ans = 0;
priority_queue<vector<int>, vector<vector<int>>, vecComp> digits;
for (int num : nums) {
vector<int> d;
while (num != 0) {
d.push_back(num%10);
num /= 10;
ndigits++;
}
digits.push(d);
}
int p = pow(10, ndigits - 1); // overflow!!!!
while (!digits.empty()) {
vector<int> d = digits.top();
digits.pop();
for (int i = d.size() - 1; i >= 0; i--) {
ans += p * d[i];
p /= 10;
}
}
return ans;
}
};
class Node {
public:
int key;
Node(int k) : key(k) {}
};
class Graph {
public:
std::stack<int> topSort()
{
std::stack<int> ret;
std::vector<int> st (nodes.size(), 0);
for (int i = 0; i < heads.size(); i++)
dfs(heads[i], ret, st);
return ret;
}
private:
void dfs(int root, std::stack<int>& ret, std::vector<int>& st)
{
st[root] = 1;
for (int j = 0; j < adjacency[root].size(); j++)
if (st[adjacency[root][j]] == 0)
dfs(adjacency[root][j], ret, st);
st[root] = 2;
ret.push(root);
}
std::vector<Node> nodes;
std::vector<int> heads;
std::vector<std::vector<int>> adjacency;
};
The time complexity would be O(tn) where t is the size of query and n is the size of the tag list. It can be improved if tags is stored in Hash tab with each tag as a key mapped to a list of [start, end] for that particular tag.
- Sean Locke March 20, 2016the idea is: compute the overlapping of segments of one tag and that of its previous tag in the query list, until all tags have been considered.
algorithm works as following:
1. push all segments of first tags into queue
2. push a canary into the queue, i.e. -1
3. for each tag of the rest in the query, do the following:
a. copy the queue
b. for each segment [low, high] of this tag, compute the overlapping with each segment in the queue and push them into the queue until hit the canary
b. move the canary to the end of the queue
class Solution {
public:
queue<int> seekTags(vector<vector<int>>& tags, vector<int>& query)
{
queue<int> myq;
if (query.size() == 0) return myq;
for (int i = 0; i < tags.size(); i++) {
if (query[0] == tags[i][2]) {
myq.push(tags[i][0]);
myq.push(tags[i][1]);
}
}
if (query.size() == 1) return myq;
myq.push(-1); // canary
for (int t = 1; t < query.size(); t++) {
queue<int> copy = myq;
for (int i = 0; i < tags.size(); i++) {
if (tags[i][2] != t) continue;
/* if tags sorted, tags[i][1] < low to stop (optimize) */
queue<int> tmpq = copy;
while (!tmpq.empty() && tmpq.front() != -1 && tmpq.size() > 2) {
int low = tmpq.front(); tmpq.pop();
int high = tmpq.front(); tmpq.pop();
if (tags[i][0] < high && tags[i][1] > low) {
myq.push(max(low, tags[i][0]));
myq.push(min(high, tags[i][1]));
}
}
}
while (myq.front() != -1) myq.pop();
myq.pop(); // pop canary
if (myq.size() == 0) break; // no more interset
myq.push(-1);
}
return myq;
}
void printQueue(queue<int>& ans) {
std::cout << "Queue: " << std::endl;
while (!ans.empty()) {
std::cout << ans.front() << std::endl;
ans.pop();
}
}
};
sorry for repeating post, but there are some mistakes in the previous code ...
class Solution {
public:
vector<vector<int>> sortMatrix(vector<vector<int>>& M)
{
{
if (M.size() == 0) return vector<vector<int>>();
// skip if one dimension array as it is trivial
if (M.size() < 2 || M[0].size() < 2) return M;
int rows = M.size(), cols = M[0].size();
vector<int> arr = vector<int>(rows * cols, 0);
for (int i = 0; i < rows; i++) {
int k = i * cols;
for (int j = 0; j < cols; j++)
arr[k+j] = M[i][j];
}
std::sort(arr.begin(), arr.end());
M[0][0] = arr[0];
rearrange(M, rows, cols, arr, 1, 1);
return M;
}
void rearrange(vector<vector<int>>& M, int rows, int cols,
vector<int>& arr, int r, int c)
{
int k = 0;
std::queue<int> reserv;
if (c == cols - 1 && r > c) k = r * cols;
else if (r == rows - 1 && c > r) k = rows * c;
else k = r * c;
M[r][c] = arr[(c + 1) * r + c];
// fill all (r-1) elems in j-th column
if (c >= r) {
for (int i = 0; i < r; i++) {
while (k < (c + 1) * r + c && M[i][c-1] == arr[k]) {
reserv.push(k); k++; }
//if ((c + r - k) < (r - i - 1)) // enough left
M[i][c] = arr[k++];
}
}
// fill all (c-1) elems in i-th row
if (c <= r) {
for (int j = 0; j < c; j++) {
if (!reserv.empty()) {
M[r][j] = reserv.front();
reserv.pop();
} else M[r][j] = arr[k++];
}
}
// reach the end - we are done;
if (r == rows - 1 && c == cols - 1) return;
if (r < rows - 1) r += 1; // only advance if more row
if (c < cols - 1) c += 1; // only advance if more col
rearrange(M, rows, cols, arr, r, c);
}
}
class Solution {
public:
vector<vector<int>> sortMatrix(vector<vector<int>>& M)
{
{
if (M.size() == 0) return vector<vector<int>>();
// skip if one dimension array as it is trivial
if (M.size() < 2 || M[0].size() < 2) return M;
int rows = M.size(), cols = M[0].size();
vector<int> arr = vector<int>(rows * cols, 0);
for (int i = 0; i < rows; i++) {
int k = i * cols;
for (int j = 0; j < cols; j++)
arr[k+j] = M[i][j];
}
std::sort(arr.begin(), arr.end());
M[0][0] = arr[0];
rearrange(M, rows, cols, arr, 1, 1);
return M;
}
void rearrange(vector<vector<int>>& M, int rows, int cols,
vector<int>& arr, int r, int c)
{
int k = 0;
std::queue<int> reserv;
if (c == cols - 1 && r > c) k = r * cols;
else if (r == rows - 1 && c > r) k = rows * c;
else k = r * c;
M[r][c] = arr[(c + 1) * r + c];
// fill all (r-1) elems in j-th column
if (c >= r) {
for (int i = 0; i < r; i++) {
while (k < (c + 1) * r + c && M[i][c-1] == arr[k]) {
reserv.push(k); k++; }
//if ((c + r - k) < (r - i - 1)) // enough left
M[i][c] = arr[k++];
}
}
// fill all (c-1) elems in i-th row
if (c <= r) {
for (int j = 0; j < c; j++) {
if (!reserv.empty()) {
M[r][j] = reserv.front();
reserv.pop();
} else M[r][j] = arr[k++];
}
}
// reach the end - we are done;
if (r == rows - 1 && c == cols - 1) return;
if (r < rows - 1) r += 1; // only advance if more row
if (c < cols - 1) c += 1; // only advance if more col
rearrange(M, rows, cols, arr, r, c);
}
}
partial string match, I would choose trie
- Sean Locke April 21, 2016insert: add first name, last name to construct the trie
search: whenever user inputs a letter, traverse the trie and return the subtree; prone the tree when get one input; show the results from the tree using dfs
delete: find the path, remove the label
modify: delete & insert