Omri.Bashari
BAN USER
std::vector<double> get_medians(int** mat, int m, int n, int k) {
std::vector<double> medians;
std::multiset<int> window;
for (int i = 0; i <= m - k; ++i) {
window.clear();
for (int l = 0; l < k; ++l) {
for (int j = 0; j < k; ++j) {
window.insert(mat[l + i][j]);
}
}
medians.push_back(get_median(window));
for (int j = 1; j <= n - k; ++j) {
for (int l = 0; l < k; ++l) {
int val = mat[l + i][j - 1];
window.erase(window.lower_bound(val)); // only delete one instace
window.insert(mat[l + i][j + k - 1]);
}
medians.push_back(get_median(window));
}
}
return medians;
}
O(n^3) time, O(n^2) space
int num_cycles(int** m, int n) {
int count = 0;
for (int i = 0; i < n; ++i) {
std::queue<std::pair<int, std::unordered_set<int>>> frontier;
frontier.push({ i, {} });
int steps = 0;
while (!frontier.empty() && steps < n - i) {
int src = frontier.front().first;
std::unordered_set<int> visited = frontier.front().second;
frontier.pop();
if (m[src][i] == 1) {
++count;
}
for (int j = i + 1; j < n; ++j) {
if (visited.find(j) == visited.end() && m[src][j] == 1) {
auto new_visited(visited);
new_visited.insert(j);
frontier.push({ j, new_visited });
}
}
++steps;
}
}
return count;
}
void bottom_up_level_traversal_aux(tree t, std::vector<std::vector<int>>& res, int level) {
if (t == nullptr) {
return;
}
bottom_up_level_traversal_aux(t->left, res, level + 1);
bottom_up_level_traversal_aux(t->right, res, level + 1);
if (res.size() < level + 1) {
res.resize(level + 1);
}
res[level].push_back(t->value);
}
std::vector<std::vector<int>> bottom_up_level_traversal(tree t) {
std::vector<std::vector<int>> output;
bottom_up_level_traversal_aux(t, output, 0);
std::reverse(output.begin(), output.end());
return output;
}
O(n) solution
int count_subarrays_product_less_than_k(const std::vector<int> v, int k) {
int prev_end = 0;
int start = 0;
int end = 0;
int count = 0;
int curr_product = 1;
while (end < v.size()) {
if (curr_product * v[end] < k) {
curr_product *= v[end];
}
else {
int len = end - start;
count += (len) * (len - 1) / 2 + len;
int overlap_len = std::max(prev_end - start, 0);
count -= (overlap_len) * (overlap_len - 1) / 2 + overlap_len;
prev_end = end;
curr_product *= v[end];
while (curr_product >= k) {
curr_product /= v[start];
++start;
}
}
++end;
}
int len = end - start;
count += (len) * (len - 1) / 2 + len;
int overlap_len = len - std::max(prev_end - start, 0);
count -= (overlap_len) * (overlap_len - 1) / 2 + overlap_len;
return count;
}
int next_permutation(int v) {
std::string s = std::to_string(v);
int i = s.length() - 1;
while (i > 0) {
if (s[i - 1] > s[i]) {
--i;
}
else {
int min = i;
for (int j = i + 1; j < s.length(); ++j) {
if (s[j] > s[i - 1] && s[j] < s[min]) {
min = j;
}
}
std::swap(s[min], s[i - 1]);
std::sort(s.begin() + i, s.end());
break;
}
}
return atoi(s.c_str());
}
void print_k_closest_to_origin(const std::vector<Point>& points, int k) {
std::vector<Point> heap(points);
auto cmp = [](const Point& a, const Point& b) { return a.x * a.x + a.y * a.y > b.x * b.x + b.y * b.y; };
std::make_heap(heap.begin(), heap.end(), cmp);
for (int i = 0; i < k; ++i) {
std::pop_heap(heap.begin(), heap.end(), cmp);
const Point& top = heap.back();
std::cout << top.x << " " << top.y << std::endl;
heap.pop_back();
}
}
void print_tree(tree t) {
int d = depth(t);
int level = 2;
int indent = (std::pow(2, d) * 5) / 2 + 1;
std::vector<std::pair<int, tree>> frontier = { {indent, t} };
std::vector<std::pair<int, tree>> new_frontier;
std::vector<std::pair<int, tree>>& curr_frontier = frontier;
std::vector<std::pair<int, tree>>& next_frontier = new_frontier;
while (!curr_frontier.empty()) {
int curr_indent = 0;
for (auto& pair : curr_frontier) {
print_indent(pair.first - curr_indent);
std::cout << pair.second->value;
curr_indent = pair.first;
}
std::cout << std::endl;
curr_indent = 0;
for (auto& pair : curr_frontier) {
if (pair.second->left != nullptr) {
print_indent(pair.first - (d - level + 1) - curr_indent);
std::cout << "/";
next_frontier.push_back({ pair.first - 2 * (d - level + 1), pair.second->left });
curr_indent = pair.first - (d - level + 1);
}
if (pair.second->right != nullptr) {
print_indent(pair.first + (d - level + 1) - curr_indent);
std::cout << "\\";
next_frontier.push_back({ pair.first + 2 * (d - level + 1), pair.second->right });
curr_indent = pair.first + (d - level + 1);
}
}
std::cout << std::endl;
curr_frontier.clear();
std::swap(curr_frontier, next_frontier);
++level;
}
}
int time_covered(const std::vector<std::pair<int, int>>& intervals) {
if (intervals.empty()) {
return 0;
}
auto sorted_intervals = intervals;
std::sort(sorted_intervals.begin(), sorted_intervals.end(), [](const std::pair<int, int>& pa, const std::pair<int, int>& pb) { return pa.first < pb.first; });
int sum = 0;
int start = sorted_intervals[0].first;
int end = sorted_intervals[0].second;
for (int i = 1; i < sorted_intervals.size(); ++i) {
if (sorted_intervals[i].first <= end) {
end = std::max(sorted_intervals[i].second, end);
}
else {
sum += end - start;
start = sorted_intervals[i].first;
end = sorted_intervals[i].second;
}
}
sum += end - start;
return sum;
}
{{
void digit_distance_permutations_aux(int n, std::vector<std::vector<int>>& results) {
std::vector<int> candidate(n * 2, 0);
digit_distance_permutations(n, 1, candidate, results);
}
void digit_distance_permutations(int n, int digit, const std::vector<int>& candidate, std::vector<std::vector<int>>& results) {
if (digit == n + 1) {
results.push_back(candidate);
return;
}
for (int i = 0; i < candidate.size(); ++i) {
if (candidate[i] == 0 && i + digit + 1 < candidate.size() && candidate[i + digit + 1] == 0) {
std::vector<int> new_candidate = candidate;
new_candidate[i] = digit;
new_candidate[i + digit + 1] = digit;
digit_distance_permutations(n, digit + 1, new_candidate, results);
}
}
}
}}
int count_subarrays(int l) {
return (l * (l - 1)) / 2 + l;
}
int count_subarrays_less_than_k(std::vector<int> a, int k) {
int count = 0;
int start = 0;
for (int i = 0; i <= a.size(); ++i) {
if (i == a.size() || a[i] >= k) {
int l = i - start;
count += count_subarrays(l);
start = i + 1;
}
}
return count;
}
int count_non_overlapping_subarrays(int l) {
int count = 0;
for (int i = 1; i < l; ++i) {
count += count_subarrays(i) * (l - i);
}
return count;
}
int count_non_overlapping_subarrays_less_than_k(std::vector<int> a, int k) {
std::vector<int> counts;
int count = 0;
int start = 0;
for (int i = 0; i <= a.size(); ++i) {
if (i == a.size() || a[i] >= k) {
int l = i - start;
if (l > 0) {
counts.push_back(count_subarrays(l));
count += count_non_overlapping_subarrays(l);
}
start = i + 1;
}
}
for (int i = 0; i < counts.size(); ++i) {
for (int j = i + 1; j < counts.size(); ++j) {
count += counts[i] * counts[j];
}
}
return count;
}
std::cout << count_subarrays_less_than_k({ 1, 6, 12, 13, 4, 2, 5, 13, 2 }, 7) << std::endl;
std::cout << count_non_overlapping_subarrays_less_than_k({ 1, 6, 12, 13, 4, 2, 5, 13, 2 }, 7) << std::endl;
10
33
There's an awful lot of algorithmic code here for a question that requests to define an interface.
I would treat this question as a design question.
First, I would ask a bunch of questions, some of them obvious, some are not.
1. How many services do I need to support?
2. How many requests do I need to support per second?
3. How much data does a typical request return?
4. Can I assume the result fits in memory?
5. Is there a disparity between the speeds in the services response?
6. Do different services use different formats? Are they able to convert it to a uniform format like XML/JSON?
7. What is the format in which I need to return the dates/info? How will this data be used?
8. What is the format of the user id? Is it global to all services or does it need conversion? If it isn’t global, do I have an API to translate global ids to local ids?
9. Is the way a user id is represented likely to be changed? How about date and info?
10. Is the user info in different languages? Are the dates in different time zones?
11. What should happen if one service is not responding / reports an error?
12. What are the APIs for these services? Do I need to design an API for them as well?
13. Should I validate the input? What happens if the date range is invalid?
Then, based on the answers (or lack thereof), I would propose the following ideas:
We want to be able to access the services uniformly so we need to have a base interface for all of them. In the more likely case that the services are not available for extension this would be an interface to an adaptor class. This interface will be very similar to the aggregator interface. The main method of this interface is something like
struct service_interface
{
virtual map<date_t, info_t> get_user_data_in_range(uid_t uid, date_t start, date_t end) = 0;
virtual ~service_interface() = default;
};
We have to define a virtual destructor for the derived / implementing classes, since we have a virtual method. The types date_t, info_t and uid_t are the aggregator defined types that the different services need to convert their representation of ids/dates/info from and into. For maximum compatibility these types should not be complex classes but simple strings / ints. It may also be useful to use simple types to avoid exposing a complex class representation to the services if it may be changed later. Either way these types may be typedefs. If I knew nothing about the services and could assume nothing about the usage / requirements of the system I would simply use strings for everything, possibly unicode strings if the info is international. The important thing is that all services need to conform to a specific format for the dates and info they return and the user id they receive. This is the part of the system that shouldn’t change and that’s why wide strings are attractive, because they are fairly abstract and scalable. For example, if we use JSON or XML as the format of the string and for some reason a new service appears and wants to add new data to the user info representation, it doesn’t break the interface and only the parser has to account for this possibility. Now as for the aggregator service, if it is to be used in a very specific way that isn’t expected to change it may define a similar method with different data types, maybe even complex data types that make it easy to use. It may even provide a single data type but be open for extension via overloading. Now this depends on the language but generally it could provide a template method of the form
struct aggregator
{
template<class D, class I>
void get_user_data_in_range(uid_t uid, D start, D end, map<D,I>& result)
};
Notice that the signature is slightly different because practically for c++ we can’t use the data representation as the return argument because of conflicting specialization overloads.
Now we can provide specific template specialization for different D/I return types. The basic implementation throws “not implemented”. Another option is that it would use basic types again and let the clients do the parsing. This seems like unnecessary delegation but it may be more flexible in case the service representations are changed often and only the clients know how they have changed. It would make the aggregator code very simple but would add more code to the client. These two approaches do not conflict and can be used together. Additionally, if the number of services is changing dynamically we may want the aggregator interface to support this, i.e. provide add and remove methods for the services of the form
void add_service(shared_pointer<service_interface> s);
void remove_service(shared_pointer<service_interface> s);
The return value may be boolean if we want to return previous existence or not. We will also need then to keep these interfaces in a data structure for the implementation.
Now about the aggregator, notice that this isn’t an interface but rather an implementation as it assumes only one way to combine the data, perhaps to be represented in different types. If we plan to have many different aggregators and we want to use them in some polymorphic fashion then the template solution is not suitable. We need to define an aggregator interface and we may need to resort to the solution of delegating data representation to the client. The interface will contain a set of virtual methods to get the data in relatively simple types that will be implemented differently by different aggregator implementations. For example, one aggregator may return the range in days and another in months, or one aggregator will do some processing on the data to convert it to a different format. This means that the previously defined methods will be virtual and again we need a virtual destructor. Additionally, we won’t have a data structure in the interface and we will let the different aggregators decide how / if they want to store their service interfaces.
Additionally, if some services are slow network services or disk services and the aggregator is read-heavy, i.e. there are many overlapping requests at a time it may be useful to implement an aggregator with an in-memory cache like memcached with LRU or some similar method. This may make the interface faster at the cost of more RAM and more complicated algorithms. It may also make things difficult in case the services update the data asynchronously, in which case they will have to provide publish/subscribe methods in the service interface to the aggregator so it can be informed of invalid cache entries.
Finally, if some results cannot fit in memory the aggregator implementation may need to perform some kind of external sort and the aggregator interface, rather than providing the entire map in memory, would provide an iterator to the map. This would have to be an extension to the interface.
Let’s see, what would be the brute force solution here? any range we choose will have one of the array elements in it, so we can select each element a[i], and check how many elements are in the range a[i]+1 and a[i]-1. If we do this for every element the solution time complexity is O(n^2) with O(1) extra space. Now how can we make this better? If we sort the array we will have all elements that are close next to each other. We can then select the first element in the sorted array and count how many elements are inside a[0],a[0]+1. Now when we get to an element that’s outside the range, we check if we remove a[0] were back to a range smaller than one. So we have to pointers, left and right, and we expand when the range is smaller than one and de-queue when the range is bigger than one. Now this solution is better because it only has O(nlgn) time complexity and still O(1) extra space if we do the sorting in-place. Now we should use the fact that the range has to be of size one, this implies we can somehow hash the values into buckets. If all the numbers were in range 0-m we could make an array of size m with counters for all the numbers with the same round/floor number. If the number’s range isn’t bound we can still do this with a hash table, with the round/floor number as the key. Now the problem is that doesn’t give us the optimal range, consider for example 0.1,0.7,0.8,1.1,1.5,1.6,1.7 neither bucket 0 or 1 contains the count of the maximum range [0.7,1.7] and we can’t find it only with the counts. What we can change is that we can hash the elements themselves and then check every pair of consecutive buckets. We can go through every bucket and check if bucket with (key+1) exists. Then we can check a pair of buckets again using sorting. If the numbers are randomly distributed checking a pair of buckets can be done in constant time, and we check O(n) buckets so the time complexity drops to O(n). To be more specific, if the average number of elements in a bucket is k, that is to say the load factor is k, the overall complexity would be O(n/k*klgk), so O(nlgk). It would be O(nlgn) worst case when all elements hash to two consecutive buckets. The tradeoff we made is that now we use O(n) extra space. We can make one more improvement if we knew that the range has to be quantized in some way, i.e. all maximum ranges are rounded somehow to d decimal digits then we can just use buckets to count the rounded numbers to their d decimal digits. For example, if the range returned has to be of the form [a.x, c.y], then 12.63 adds to the 12.6 bucket. Now we can get the maximum range using only the counters by iterating the buckets and checking if 10^d buckets exist and accumulating their counters. That wouldn’t require sorting and would reduce the time complexity to O(n*(10^d)) which is especially attractive if d is small. It would reduce the extra space complexity O(n/k). Note that in the case of randomly distributed numbers, k is a decreasing function of d.
- Omri.Bashari June 28, 2015bool fill_with_char(char** a, int m, int n, int x, int y, char c)
{
if (x < 0 || y < 0 || x >= m || y >= n) return false;
if (a[x][y] == 'X' || a[x][y] == c) return true;
a[x][y] = c;
bool r = true;
r &= fill_with_char(a, m, n, x+1, y, c);
r &= fill_with_char(a, m, n, x-1, y, c);
r &= fill_with_char(a, m, n, x, y+1, c);
r &= fill_with_char(a, m, n, x, y-1, c);
return r;
}
void fill_shape(char** a, int m, int n)
{
for (int i = 0; i < m; ++i)
for (int j = 0; j < n; ++j)
if (a[i][j] == 'O')
if (fill_with_char(a, m, n, i, j, 'V'))
fill_with_char(a, m, n, i, j, 'X');
for (int i = 0; i < m; ++i)
for (int j = 0; j < n; ++j)
if (a[i][j] == 'V')
a[i][j] = 'O';
}
void print_matrix(char** a, int m, int n)
{
std::cout << std::endl;
for (auto i = 0; i < m; ++i)
{
for (auto j = 0; j < n; ++j)
std::cout << a[i][j] << " ";
std::cout << std::endl;
}
std::cout << std::endl;
}
void test_fill_shape()
{
char a[5][5] =
{
{ 'X', 'X', 'X', 'X', 'X' },
{ 'X', 'X', 'O', 'O', 'X' },
{ 'X', 'X', 'O', 'O', 'X' },
{ 'X', 'X', 'X', 'X', 'X' },
{ 'O', 'X', 'X', 'X', 'X' } };
char* ap[5] = { a[0], a[1], a[2], a[3], a[4] };
fill_shape(ap, 5, 5);
print_matrix(ap, 5, 5);
char b[5][5] =
{
{ 'X', 'X', 'X', 'X', 'X' },
{ 'X', 'X', 'O', 'O', 'X' },
{ 'X', 'X', 'O', 'O', 'O' },
{ 'X', 'X', 'X', 'X', 'X' },
{ 'O', 'X', 'X', 'X', 'X' } };
char* bp[5] = { b[0], b[1], b[2], b[3], b[4] };
fill_shape(bp, 5, 5);
print_matrix(bp, 5, 5);
char c[5][6] =
{
{ 'X', 'O', 'X', 'X', 'X', 'X' },
{ 'X', 'X', 'O', 'O', 'X', 'X' },
{ 'X', 'X', 'O', 'O', 'X', 'O' },
{ 'X', 'O', 'X', 'O', 'X', 'O' },
{ 'O', 'X', 'X', 'X', 'X', 'X' } };
char* cp[5] = { c[0], c[1], c[2], c[3], c[4] };
fill_shape(cp, 5, 6);
print_matrix(cp, 5, 6);
}
This doesn't look like O(lg(m+n)) at all. For simplicity sake assume m=n, and mark the size of the input 2n=N. According to the code the recursive function is
T(N) = 2*T((3/4)*N) + O(1)
T(0) = O(1)
Which is actually O(n^(lg(2) in base (4/3)) = O(n^(2.4094)) which is far worst than linear.
- Omri.Bashari June 26, 2015O(lgk) solution
int combined_kth(int* a, int n, int* b, int m, int k)
{
if (m + n < k) return -1;
if (k <= 0) return -1;
while (k > 1)
{
int ak = std::min(n - 1, k/2-1);
int vak = std::numeric_limits<int>::max();
if (ak >= 0) vak = a[ak];
int bk = std::min(m - 1, k/2-1);
int vbk = std::numeric_limits<int>::max();
if (bk >= 0) vbk = b[bk];
if (vak < vbk)
{
n -= (ak + 1);
a += (ak + 1);
k -= (ak + 1);
}
else
{
m -= (bk + 1);
b += (bk + 1);
k -= (bk + 1);
}
}
int rv = std::numeric_limits<int>::max();
if (n > 0) rv = std::min(rv, a[0]);
if (m > 0) rv = std::min(rv, b[0]);
return rv;
}
void test_combined_kth()
{
int a[] = { 1, 3, 5, 11, 30, 47, 66, 108 };
int b[] = { -6, 2, 7, 11, 32, 43, 61 };
assert(-6 == combined_kth(a, sizeof(a) / sizeof(int), b, sizeof(b) / sizeof(int), 1));
assert(3 == combined_kth(a, sizeof(a) / sizeof(int), b, sizeof(b) / sizeof(int), 4));
assert(5 == combined_kth(a, sizeof(a) / sizeof(int), b, sizeof(b) / sizeof(int), 5));
assert(30 == combined_kth(a, sizeof(a) / sizeof(int), b, sizeof(b) / sizeof(int), 9));
assert(61 == combined_kth(a, sizeof(a) / sizeof(int), b, sizeof(b) / sizeof(int), 13));
assert(108 == combined_kth(a, sizeof(a) / sizeof(int), b, sizeof(b) / sizeof(int), 15));
assert(-1 == combined_kth(a, sizeof(a) / sizeof(int), b, sizeof(b) / sizeof(int), 16));
assert(-1 == combined_kth(a, sizeof(a) / sizeof(int), b, sizeof(b) / sizeof(int), -2));
}
O(nlgn) solution with O(n) extra space.
This solution probably has some trouble with duplicate value end/start points but it can be improved to support these as well.
template <typename K, typename V>
using map = boost::unordered_map<K, V>;
struct interval_t
{
int s;
int e;
};
struct point_t
{
int v;
bool s;
int iid;
};
interval_t get_max_intersect(const std::vector<interval_t>& intervals)
{
if (intervals.empty()) return { 0, 0 };
map<int, interval_t> iid_map;
std::vector<point_t> order;
int id = 0;
for (const auto& i : intervals)
{
iid_map[id] = i;
order.push_back({ i.s, true, id });
order.push_back({ i.e, false, id });
}
std::sort(order.begin(), order.end(),
[](const point_t& p1, const point_t& p2) { return p1.v < p2.v; });
int count = 0, max_count = 0, max_id = 0;
for (const auto& p : order)
{
if (p.s)
{
++count;
}
else
{
if (count > max_count)
{
max_count = count;
max_id = p.iid;
}
--count;
}
}
return iid_map[max_id];
}
void test_get_max_intersect()
{
std::vector<interval_t> intervals;
intervals.push_back({ 2, 3 });
intervals.push_back({ 4, 11 });
interval_t expected = { 1, 6 };
intervals.push_back(expected);
interval_t result = get_max_intersect(intervals);
assert(result.s == expected.s && result.e == expected.e);
intervals.push_back({ -3, 1 });
intervals.push_back({ -5, 8 });
intervals.push_back({ 9, 11 });
intervals.push_back({ 11, 13 });
expected = { 4, 12 };
intervals.push_back(expected);
result = get_max_intersect(intervals);
assert(result.s == expected.s && result.e == expected.e);
}
BFS from every guard. Not very efficient as I missed the idea of putting all of the guards in the queue and only doing one run using the zeros as visited indicators. But it works :)
void update_from_guard(char** a, int m, int n, int gx, int gy, bool first)
{
bool** visited = new bool*[m];
for (auto i = 0; i < m; ++i) visited[i] = new bool[n];
for (auto i = 0; i < m; ++i)
for (auto j = 0; j < n; ++j) visited[i][j] = false;
std::queue<pt_t> q;
q.push(pt_t(gx, gy, 0));
visited[gx][gy] = true;
while (!q.empty())
{
pt_t p = q.front();
q.pop();
int i = std::get<0>(p);
int j = std::get<1>(p);
int d = std::get<2>(p);
if (a[i][j] >= '0' && a[i][j] <= '9')
{
if (first) a[i][j] = (char)('0' + d);
else a[i][j] = std::min(a[i][j], (char)('0' + d));
}
else if (i != gx || j != gy) continue;
if (i > 0 && !visited[i - 1][j])
{
q.push(pt_t(i - 1, j, d + 1));
visited[i - 1][j] = true;
}
if (i < m - 1 && !visited[i + 1][j])
{
q.push(pt_t(i + 1, j, d + 1));
visited[i + 1][j] = true;
}
if (j > 0 && !visited[i][j - 1])
{
q.push(pt_t(i, j - 1, d + 1));
visited[i][j - 1] = true;
}
if (j < n-1 && !visited[i][j + 1])
{
q.push(pt_t(i, j + 1, d + 1));
visited[i][j + 1] = true;
}
}
for (auto i = 0; i < m; ++i) delete[] visited[i];
delete[] visited;
}
void calc_min_guard_dist(char** a, int m, int n)
{
bool seen_guard = false;
for (auto i = 0; i < m; ++i)
for (auto j = 0; j < n; ++j)
if (a[i][j] == 'G')
{
update_from_guard(a, m, n, i, j, !seen_guard);
seen_guard = true;
}
}
void print_matrix(char** a, int m, int n)
{
for (auto i = 0; i < m; ++i)
{
for (auto j = 0; j < n; ++j)
std::cout << a[i][j] << " ";
std::cout << std::endl;
}
}
void test_calc_min_guard_dist()
{
const int m = 3, n = 3;
char** a = new char*[m];
for (auto i = 0; i < m; ++i) a[i] = new char[n];
for (auto i = 0; i < m; ++i)
for (auto j = 0; j < n; ++j)
a[i][j] = '0';
a[1][0] = 'B';
a[2][0] = 'B';
a[1][1] = 'G';
a[1][2] = 'G';
print_matrix(a, m, n);
calc_min_guard_dist(a, m, n);
print_matrix(a, m, n);
}
O(d) solution in the number of digits, which is limited to 10 due to int representation limits, so O(1) solution.
int next_larger_asc(int num)
{
if (num <= 0) return 1;
int t = num + 1;
std::vector<int> d;
while (t > 0)
{
d.push_back(t % 10);
t = t / 10;
}
int dn = d.size();
int carry = 0;
int ldi = dn; // largest discrepency index
for (auto i = dn - 1; i > 0; --i)
{
if (d[i] >= d[i - 1])
{
if (9 == d[i]) carry = 1;
ldi = i;
break;
}
}
if (dn == ldi) return num + 1;
int sai = dn; // smallest ascension index
for (auto i = ldi; i < dn; ++i)
{
if (i + d[i] + carry < 10)
{
sai = i;
break;
}
}
int rv = 0;
if (dn == sai)
{
// have to make a longer number
if (dn + 1 >= 10) return -1; // can't do it
for (auto i = 1; i <= dn + 1; ++i)
{
rv *= 10;
rv += i;
}
}
else
{
if (ldi != sai) carry = 1;
// make ascending sequence from sai
d[sai] += carry;
for (auto i = sai - 1; i >= 0; --i)
{
d[i] = d[i + 1] + 1;
}
// get the new number
for (auto i = dn - 1; i >= 0; --i)
{
rv *= 10;
rv += d[i];
}
}
return rv;
}
void test_next_larger_asc()
{
assert(1234 == next_larger_asc(853));
assert(5678 == next_larger_asc(5432));
assert(1567 == next_larger_asc(1543));
assert(1 == next_larger_asc(-3));
assert(1678 == next_larger_asc(1589));
assert(16789 == next_larger_asc(15831));
assert(-1 == next_larger_asc(923456781));
}
I coded a solution to a similar problem a while back. This one only finds one expression, but can be easily modified to return all expressions.
bool find_exp_aux(std::vector<int> & v, std::vector<std::string>& e, const int t, std::string& r)
{
if (v.empty()) return false;
auto n = v.size();
if (1 == n)
{
if (t == v[0])
{
r = e[0];
return true;
}
return false;
}
for (auto i = 0; i < n; ++i)
{
auto v1 = v[i];
const auto& e1 = e[i];
for (auto j = i + 1; j < n; ++j)
{
auto v2 = v[j];
const auto& e2 = e[j];
auto vc(v);
auto ec(e);
vc.erase(vc.begin() + j);
vc.erase(vc.begin() + i);
ec.erase(ec.begin() + j);
ec.erase(ec.begin() + i);
auto vnew = 0;
std::string enew = "";
vnew = v1 + v2;
enew = "(" + e1 + ")" + "+" + "(" + e2 + ")";
vc.push_back(vnew);
ec.push_back(enew);
if (find_exp_aux(vc, ec, t, r)) return true;
ec.pop_back();
vc.pop_back();
vnew = v1 * v2;
enew = "(" + e1 + ")" + "*" + "(" + e2 + ")";
vc.push_back(vnew);
ec.push_back(enew);
if (find_exp_aux(vc, ec, t, r)) return true;
ec.pop_back();
vc.pop_back();
vnew = v1 - v2;
enew = "(" + e1 + ")" + "-" + "(" + e2 + ")";
vc.push_back(vnew);
ec.push_back(enew);
if (find_exp_aux(vc, ec, t, r)) return true;
ec.pop_back();
vc.pop_back();
vnew = v2 - v1;
enew = "(" + e2 + ")" + "-" + "(" + e1 + ")";
vc.push_back(vnew);
ec.push_back(enew);
if (find_exp_aux(vc, ec, t, r)) return true;
ec.pop_back();
vc.pop_back();
if (0 != v2)
{
vnew = v1 / v2;
enew = "(" + e1 + ")" + "/" + "(" + e2 + ")";
vc.push_back(vnew);
ec.push_back(enew);
if (find_exp_aux(vc, ec, t, r)) return true;
ec.pop_back();
vc.pop_back();
}
if (0 != v1)
{
vnew = v2 / v1;
enew = "(" + e2 + ")" + "/" + "(" + e1 + ")";
vc.push_back(vnew);
ec.push_back(enew);
if (find_exp_aux(vc, ec, t, r)) return true;
ec.pop_back();
vc.pop_back();
}
}
}
return false;
}
bool find_exp(const std::vector<int>& v, const int t, std::string& r)
{
std::vector<int> vc(v);
std::vector<std::string> ec;
for (auto i : vc)
{
ec.push_back(std::to_string(i));
}
return find_exp_aux(vc, ec, t, r);
}
O(n) solution
int array_hopper(int* a, const int n)
{
if (nullptr == a || n <= 1) return 0;
int ei = 0, nei = 0, count = 0;
for (auto i = 0; i < n; ++i)
{
if (i + a[i] >= n - 1) { ++count; break; }
if (i + a[i] > nei) nei = i + a[i];
if (i == ei)
{
if (ei == nei) return -1;
++count;
ei = nei;
}
}
return count;
}
void test_array_hopper()
{
int a[] = { 2, 4, 1, 2, 3, 2, 4, 2 };
assert(3 == array_hopper(a, sizeof(a) / sizeof(int)));
int b[] = {1, 2, 0, 4, 2, 4, 1, 0, 0, 4};
assert(4 == array_hopper(b, sizeof(b) / sizeof(int)));
int c[] = { 1, 2, 0, 2, 2, 3, 1, 0, 0, 4 };
assert(-1 == array_hopper(c, sizeof(c) / sizeof(int)));
}
you can't. the root edge case is just one example. you have to find the split node of both nodes and verify that it isn't one of them before making the swap. you can find the split node relatively easily if you have parent pointers but otherwise you have to find (and store, since it's not a BST) the paths of both nodes from the root.
- Omri.Bashari June 24, 2015void print_rational(int num, int den)
{
int left = 0;
std::vector<int> v;
boost::unordered_map<int, int> seen;
left = num / den;
int i = 0;
num -= (left*den);
num *= 10;
while (seen.find(num) == seen.end())
{
int d = num / den;
v.push_back(d);
seen[num] = i;
++i;
num -= d*den;
num = num * 10;
}
int p = seen[num];
std::cout << left << ".";
for (auto i = 0; i < p; ++i) std::cout << v[i];
std::cout << "(";
for (auto i = p; i < v.size(); ++i) std::cout << v[i];
std::cout << ")" << std::endl;
}
O(n) solution, O(1) space, order not maintained.
void alt_reorder(int* a, int n)
{
int* p = std::partition(a, a + n, [](int i) { return i < 0; });
if (p - a <= n / 2) std::reverse(a, a + n);
int count = std::min(p - a, n - (p - a)), si = ((a[0] < 0) ? 1 : 0);
for (int i = n - count; i < n; ++i, si += 2) std::swap(a[i], a[si]);
}
Update: I found a solution based on max-flow and posted it as an answer. Check it out.
- Omri.Bashari June 18, 2015Following up on some of the ideas in the suggested answers, I found a solution based on the max-flow algorithm.
1. create nodes for all 'constraints' i.e. distinct voter lines.
2. divide nodes into two groups - dog lover constraints and cat lover constraints.
3. create a directed graph flow network
3.1. create a source and sink nodes.
3.2. for each dog lover constraint node (left side).
3.2.1. connect source to node with edge capacity equal to constraint votes.
3.3. for each cat lover constraint node (right side)
3.3.1. connect node to sink with edge capacity equal to constraint votes.
3.4. for every two conflicting constraints
3.4.1. create an edge from left node to right node with infinitie capacity.
4. calcluate max flow.
5. return original number of voters - max flow.
The reason this works is that according to the max-flow-min-cut theorem, the max flow is equal to the minimum capacity that when removed from the network in a specific way, causes no flow from source to sink. That specific way is by removing the min-cut edges from the graph.In this problem, since we assigned infinite capacity to the original edges, the min-cut edges must be connected to the source or the sink. Additionally, the constraint nodes in these edges correspond to the minimum weight vertex cover, i.e. exactly the constraints we need to remove in order to avoid conflicts between the remaining voters constraints and maximize the number of remaining voters.
Below is a C++ implementation. It is too long for an interview setting but I believe it is the optimal solution to this problem.
#pragma once
#include <memory>
#include <queue>
#include <string>
#include <iostream>
#include <sstream>
#include <list>
#include <boost/unordered_map.hpp>
#include <boost/unordered_set.hpp>
#include <boost/tokenizer.hpp>
#include <boost/algorithm/string.hpp>
namespace reality_shows
{
template <typename K, typename V>
using map = boost::unordered_map<K, V>;
template <typename T>
using set = boost::unordered_set<T>;
using std::shared_ptr;
using std::istream;
using std::list;
using std::queue;
class vote_t
{
public:
vote_t(size_t _first_id, size_t _second_id, bool _hates_cats)
: first_id(_first_id), second_id(_second_id), hates_cats(_hates_cats) {}
size_t first_id;
size_t second_id;
bool hates_cats;
};
class node;
class edge
{
public:
edge(shared_ptr<node> _from, shared_ptr<node> _to, size_t _capacity, bool _residual = false)
: from(_from), to(_to), capacity(_capacity), flow(_residual ? capacity : 0),
residual(_residual), complement(nullptr) {};
shared_ptr<node> from;
shared_ptr<node> to;
size_t capacity;
size_t flow;
// is a residual edge
bool residual;
// complement edge to->from
// for the max-flow residual network
shared_ptr<edge> complement;
// residual capacity
inline size_t cf() { return capacity - flow; }
};
class node
{
public:
node(const vote_t& _vote) : vote(_vote), count(1) {}
size_t count;
vote_t vote;
set<shared_ptr<edge>> outgoing;
};
class reality_show
{
public:
reality_show(istream& in)
{
int num_cats = 0, num_dogs = 0;
in >> num_cats;
if (num_cats <= 0) throw std::invalid_argument("invalid cat number.");
in >> num_dogs;
if (num_dogs <= 0) throw std::invalid_argument("invalid dog number.");
in >> num_voters;
if (num_voters <= 0) throw std::invalid_argument("invalid voters number.");
// skip line
std::string line;
std::getline(in, line);
for (auto v = 0; v < num_voters; ++v)
{
std::getline(in, line);
boost::trim(line);
using separator_t = boost::char_separator < char >;
separator_t sep(" ", "CD");
boost::tokenizer<separator_t> tokens(line, sep);
if (4 != std::distance(tokens.begin(), tokens.end()))
throw std::invalid_argument("invalid vote format.");
auto token = tokens.begin();
auto first_pet_type = *token;
if ("C" != first_pet_type && "D" != first_pet_type)
throw std::invalid_argument("invalid vote format.");
++token;
auto first_id = std::stoi(*token);
++token;
auto second_pet_type = *token;
if (("C" != second_pet_type && "D" != second_pet_type) ||
first_pet_type == second_pet_type)
throw std::invalid_argument("ilegal vote format.");
++token;
auto second_id = std::stoi(*token);
if (first_id < 1 || second_id < 1)
throw std::invalid_argument("invalid pet id.");
if ("C" == first_pet_type &&
(first_id > num_cats || second_id > num_dogs))
throw std::invalid_argument("invalid pet id.");
if ("D" == first_pet_type &&
(first_id > num_dogs || second_id > num_cats))
throw std::invalid_argument("invalid pet id.");
if (nodes.find(line) == nodes.end())
{
vote_t vote(first_id, second_id, "D" == first_pet_type);
nodes[line] = std::make_shared<node>(vote);
}
else
{
++nodes[line]->count;
}
}
}
size_t advance()
{
// For this problem the maximum number of voters kept
// is the original number of voters minus
// the maximum flow.
// The reason for this is that according to the
// max-flow-min-cut theorem, the max flow
// is equal to the minimum capacity that when removed
// from the network in a specific way, causes no flow
// from source to sink. That specific way is
// by removing the min-cut edges from the graph.
// In this problem, since we assigned infinite capacity
// to the original edges, the min-cut edges must be
// connected to the source or the sink. Additionally,
// the constraint nodes in these edges correspond
// to the minimum weight vertex cover, i.e. exactly
// the constraints we need to remove in order to avoid
// conflicts between the remaining voters constraints
// and maximize the number of remaining voters.
construct_bipartite_flow_graph();
auto max_flow = edmunds_karp_max_flow();
return num_voters - max_flow;
}
private:
void connect_nodes(shared_ptr<node>& from, shared_ptr<node>& to)
{
// create edge between conflicting nodes, infinite capacity
auto e = std::make_shared<edge>(from, to, std::numeric_limits<size_t>::max());
// create reverse residual edge as well
auto re = std::make_shared<edge>(to, from, std::numeric_limits<size_t>::max(), true);
// make them complements (i.e. flow updates effect each other)
e->complement = re;
re->complement = e;
// update the respective nodes
from->outgoing.insert(e);
to->outgoing.insert(re);
}
void construct_bipartite_flow_graph()
{
// mappings of pet id to the nodes
map< size_t, set<shared_ptr<node>> > dog_lovers;
map< size_t, set<shared_ptr<node>> > dog_haters;
map< size_t, set<shared_ptr<node>> > cat_lovers;
map< size_t, set<shared_ptr<node>> > cat_haters;
for (auto& p : nodes)
{
auto node = p.second;
auto vote = node->vote;
if (vote.hates_cats)
{
// hi i'm a dog lover
dog_lovers[vote.first_id].insert(node);
// and a cat hater
cat_haters[vote.second_id].insert(node);
// connect to everybody i hate
if (cat_lovers.find(vote.second_id) != cat_lovers.end())
{
for (auto cnode : cat_lovers[vote.second_id])
{
connect_nodes(node, cnode);
}
}
// connect to everybody that hates me
if (dog_haters.find(vote.first_id) != dog_haters.end())
{
for (auto cnode : dog_haters[vote.first_id])
{
connect_nodes(node, cnode);
}
}
}
else
{
// hi i'm a cat lover
cat_lovers[vote.first_id].insert(node);
// and a dog hater
dog_haters[vote.second_id].insert(node);
// connect everybody i hate to me
if (dog_lovers.find(vote.second_id) != dog_lovers.end())
{
for (auto dnode : dog_lovers[vote.second_id])
{
connect_nodes(dnode, node);
}
}
// connect everybody that hates me to me
if (cat_haters.find(vote.first_id) != cat_haters.end())
{
for (auto dnode : cat_haters[vote.first_id])
{
connect_nodes(dnode, node);
}
}
}
}
// create source and sink nodes
source = std::make_shared<node>(vote_t(0, 0, true));
sink = std::make_shared<node>(vote_t(0, 0, true));
// connect source to all dog lover nodes, with capacity equal to their count
for (auto& p : dog_lovers)
{
for (auto& node : p.second)
{
auto e = std::make_shared<edge>(source, node, node->count);
source->outgoing.insert(e);
}
}
// connect all cat lover nodes to sink, with capacity equal to their count
for (auto& p : cat_lovers)
{
for (auto& node : p.second)
{
auto e = std::make_shared<edge>(node, sink, node->count);
node->outgoing.insert(e);
}
}
}
// modified bfs to find a path from source to sink that can be improved
list<shared_ptr<edge>> find_residual_path()
{
map<shared_ptr<node>, shared_ptr<edge>> visited;
queue<shared_ptr<node>> q;
q.push(source);
while (!q.empty())
{
auto node = q.front();
q.pop();
for (auto e : node->outgoing)
{
if (e->cf() > 0 && visited.find(e->to) == visited.end())
{
visited[e->to] = e;
if (e->to == sink)
{
list<shared_ptr<edge>> path;
auto node = sink;
while (source != node)
{
path.push_back(visited[node]);
node = visited[node]->from;
}
return path;
}
q.push(e->to);
}
}
}
// no path
return list<shared_ptr<edge>>();
}
size_t edmunds_karp_max_flow()
{
auto max_flow = 0;
auto path = find_residual_path();
while (!path.empty())
{
// find minimum residual flow on path
auto mincf = std::numeric_limits<size_t>::max();
for (auto e : path)
{
mincf = std::min(e->cf(), mincf);
}
// update path flow
for (auto e : path)
{
// add flow to path edge
e->flow += mincf;
if (nullptr != e->complement)
{
// subtract flow from complement edge
auto ec = e->complement;
ec->flow -= mincf;
}
}
max_flow += mincf;
path = find_residual_path();
}
return max_flow;
}
using node_key = std::string;
map < node_key, shared_ptr<node> > nodes;
shared_ptr<node> source;
shared_ptr<node> sink;
size_t num_voters;
};
void test_reality_show()
{
std::string sin =
"3\n" \
"1 1 2\n" \
"C1 D1\n" \
"D1 C1\n" \
"1 2 4\n" \
"C1 D1\n" \
"C1 D1\n" \
"C1 D2\n" \
"D2 C1\n" \
"2 3 13\n" \
"C1 D1\n" \
"C2 D1\n" \
"C2 D1\n" \
"C2 D1\n" \
"C2 D1\n" \
"C2 D2\n" \
"C2 D2\n" \
"D1 C1\n" \
"D1 C1\n" \
"D1 C1\n" \
"D1 C2\n" \
"D3 C1\n" \
"D3 C1\n";
std::istringstream in(sin);
auto num_test_cases = 0;
in >> num_test_cases;
size_t expected[] = { 1, 3, 8 };
for (auto tc = 0; tc < num_test_cases; ++tc)
{
reality_show rs(in);
assert(expected[tc] == rs.advance());
}
}
}
Some terminology clarification: a graph can have many *maximal* independent sets, but only one *maximum* independent set. It is possible to reduce the problem of finding the *maximum* independent set in a bipartite graphs to polynomial complexity using Konig's theorem. However, finding all maximal independent sets is NP-complete regardless.
- Omri.Bashari June 17, 2015I upvoted your answer because it made me think of the problem in a new way. However, I'm pretty sure this solution won't work. The problem is that you are trying to find a maximum independent set of *constraints*, not a maximum set of voters. As the examples show, some constraints are more popular than others, so not all vertices are created equal. Therefore, all maximal independent sets must be considered in order to maximize the number of voters. This turns the problem NP-complete again and I suspect that in an interview setting the best I would come up with is an exhaustive recursive algorithm. Great idea though.
- Omri.Bashari June 17, 2015class BstNode
{
public:
BstNode(int _data) : data(_data), left(nullptr), right(nullptr) {};
int data;
BstNode* left;
BstNode* right;
};
bool can_superimpose(BstNode* a, BstNode* b)
{
if (nullptr == a) return true;
if (nullptr == b) return false;
return a->data == b->data && can_superimpose(a->left, b->left) && can_superimpose(a->right, b->right);
}
bool is_sub_tree(BstNode* a, BstNode* b)
{
if (nullptr == a) return true;
if (nullptr == b) return false;
auto value = a->data;
if (value == b->data && can_superimpose(a, b)) return true;
return is_sub_tree(a, b->left) || is_sub_tree(a, b->right);
}
void test_is_sub_tree()
{
auto a = new BstNode(5);
a->left = new BstNode(4);
a->right = new BstNode(7);
auto b = new BstNode(6);
b->left = new BstNode(5);
b->right = new BstNode(12);
b->left->left = new BstNode(4);
b->left->right = new BstNode(7);
b->right->left = new BstNode(8);
b->right->right = new BstNode(10);
b->left->left->left = new BstNode(1);
assert(is_sub_tree(a, b));
b->left->right->data = 42;
assert(!is_sub_tree(a, b));
delete b->left->right;
b->left->right = nullptr;
assert(!is_sub_tree(a, b));
}
O(n) solution
int count_pairs_ge(int* a, const int size, const int n)
{
auto si = 0;
auto ei = size - 1;
auto count = 0;
while (si < ei)
{
if (a[si] + a[ei] >= n)
{
count += ei - si;
--ei;
}
else
{
++si;
}
}
return count;
}
void test_count_pairs_ge()
{
int a[] = { 1, 5, 7, 9, 30, 33, 35, 65, 67 };
assert(21 == count_pairs_ge(a, sizeof(a) / sizeof(int), 42));
assert(1 == count_pairs_ge(a, sizeof(a) / sizeof(int), 120));
assert(32 == count_pairs_ge(a, sizeof(a) / sizeof(int), 13));
assert(0 == count_pairs_ge(a, sizeof(a) / sizeof(int), 150));
}
Alright, this took me a couple of hours to put together in a comfortable setup, I don't know how they expect this in an interview. The question is pretty easy if you remove the constraint of maximum courses per semester, but otherwise this has to be a recursive backtracking solution. It could probably done using dynamic programming as well but I'm afraid it will have a high memory complexity.
Below is a solution using a recursive method. The basic idea is topological sort, we have one map called the 'blueprint' which is essentially a dependency graph of the input unchanged, and another map 'curr_map' that is constantly being modified to reflect the courses we have chosen to take. Each time we choose to take courses in a given semester we remove them from the current map (and all of their dependencies) and when we backtrack we add them back. The method 'get_options' is a modified version of the 'get sources' procedure in topological sort that takes into account the current semester. The code is pretty long, but it works. Enjoy!
#pragma once
#include <iostream>
#include <memory>
#include <list>
#include <boost/unordered_map.hpp>
#include <boost/unordered_set.hpp>
namespace course_optimizer
{
template <typename K, typename V>
using map = boost::unordered_map<K, V>;
template <typename T>
using set = boost::unordered_set<T>;
template <typename T>
using shared_ptr = std::shared_ptr<T>;
enum class semester_t { fall, spring, both };
semester_t convert(char c)
{
if ('F' == c) return semester_t::fall;
else if ('S' == c) return semester_t::spring;
else if ('B' == c) return semester_t::both;
else throw std::invalid_argument("invalid course time character argument.");
}
using course_id = std::string;
using istream = std::istream;
class node
{
public:
node(course_id _id) : id(_id) {};
std::string id;
semester_t time;
set<shared_ptr<node>> outgoing;
set<shared_ptr<node>> incoming;
};
class optimizer
{
public:
using course_map = map < course_id, shared_ptr<node> > ;
optimizer(istream& in, size_t num, size_t mcps) : mcps_(mcps)
{
course_id id;
for (auto i = 0; i < num; ++i)
{
in >> id;
courses_[id] = std::make_shared<node>(id);
}
for (auto i = 0; i < num; ++i)
{
in >> id;
char ct;
in >> ct;
if (courses_.find(id) == courses_.end()) throw std::invalid_argument("invalid id argument.");
courses_[id]->time = convert(ct);
auto num_pre = 0;
in >> num_pre;
if (num_pre < 0) throw std::invalid_argument("invalid prerequisite argument: cannot be negative.");
for (auto j = 0; j < num_pre; ++j)
{
course_id pre_id;
in >> pre_id;
courses_[id]->incoming.insert(courses_[pre_id]);
courses_[pre_id]->outgoing.insert(courses_[id]);
}
}
}
size_t compute_min_courses()
{
auto curr_map = copy_course_map(courses_);
return compute(courses_, curr_map, semester_t::fall);
}
private:
size_t compute(const course_map& blueprint, course_map& curr_map, const semester_t& semester, std::list<course_id> options = std::list<course_id>(), size_t options_processed = 0)
{
if (curr_map.empty()) return 0;
semester_t next_semester = (semester_t::fall == semester) ? semester_t::spring : semester_t::fall;
if (options.empty()) options = get_options(curr_map, semester);
if (options.size() <= mcps_)
{
// we can take all of the options. remove from current map and get the recursive call result
for (const auto& id : options)
{
for (auto& n : curr_map[id]->outgoing)
{
n->incoming.erase(curr_map[id]);
}
curr_map.erase(id);
}
auto rv = 1 + compute(blueprint, curr_map, next_semester);
// recover the current map
for (const auto& id : options)
{
curr_map[id] = std::make_shared<node>(id);
curr_map[id]->time = blueprint.at(id)->time;
for (const auto& n : blueprint.at(id)->outgoing)
{
curr_map[n->id]->incoming.insert(curr_map[id]);
}
}
return rv;
}
else
{
// we have to pick an option to ignore (not take this semester). options_processed is a counter to avoid duplicating recursive calls.
auto min = std::numeric_limits<size_t>::max();
for (auto i = std::next(std::begin(options), options_processed); i != std::end(options); ++i)
{
// dont take it
auto id = *i;
i = options.erase(i);
min = std::min(min, compute(blueprint, curr_map, semester, options, options_processed));
i = options.insert(i, id);
++options_processed;
}
return min;
}
}
// get all courses that can be taken in the input semester and have all of their requisites filled already (no incoming).
std::list<course_id> get_options(const course_map& courses, const semester_t& semester)
{
std::list<course_id> options;
for (const auto& p : courses)
{
if (p.second->incoming.empty() && (semester_t::both == p.second->time || semester == p.second->time))
{
options.push_back(p.first);
}
}
return options;
}
course_map copy_course_map(const course_map& source)
{
course_map dest;
for (const auto& p : source)
{
dest[p.first] = std::make_shared<node>(p.first);
dest[p.first]->time = p.second->time;
}
for (const auto& p : source)
{
for (const auto& n : p.second->incoming)
{
dest[p.first]->incoming.insert(dest[n->id]);
}
for (const auto& n : p.second->outgoing)
{
dest[p.first]->outgoing.insert(dest[n->id]);
}
}
return dest;
}
// m = max courses per semester
const size_t mcps_;
// dependency map
course_map courses_;
};
void solve(istream& in)
{
auto n = 0, m = 0;
in >> n >> m;
while (n > 0 && m > 0)
{
optimizer opt(in, n, m);
std::cout << "The minimum number of semesters required to graduate is " << opt.compute_min_courses() << "." << std::endl;
in >> n >> m;
}
}
}
Following is the deterministic select algorithm based on median of medians to retrieve the k=th smallest element guaranteed to work in worst case running time of O(n).
int median5(int a, int b, int c, int d, int e)
{
if (a > b) std::swap(a, b);
if (c > d) std::swap(c, d);
if (a > c)
{
std::swap(b, d);
c = a;
}
a = e;
if (a > b) std::swap(a, b);
if (a > c)
{
std::swap(b, d);
c = a;
}
return std::min(b, c);
}
int partition(int* a, int left, int right, int pivot)
{
std::swap(a[right], a[pivot]);
auto si = left;
for (auto i = left; i <= right; ++i)
{
if (a[i] < a[right])
{
std::swap(a[i], a[si]);
++si;
}
}
std::swap(a[si], a[right]);
return si;
}
int select(int* a, int left, int right, int k);
int med_of_meds(int* a, int left, int right)
{
auto si = left;
for (auto i = left; i <= right; i += 5)
{
auto j = std::min(i + 4, right);
auto med = (i + 4 == j) ? median5(a[i], a[i + 1], a[i + 2], a[i + 3], a[i + 4]) : a[i];
auto pos = i;
for (; pos <= j; ++pos)
{
if (med == a[pos]) break;
}
std::swap(a[si], a[pos]);
++si;
}
return select(a, left, si - 1, std::ceil((si - left) / (double)2));
}
int select(int* a, int left, int right, int k)
{
if (right - left + 1 < k) throw std::exception("impossibru");
if (right == left) return a[left];
auto mm = med_of_meds(a, left, right);
auto pivot = left;
for (; pivot <= right; ++pivot)
{
if (mm == a[pivot]) break;
}
auto pos = partition(a, left, right, pivot);
if (pos - left + 1 == k) return mm;
else if (pos - left + 1 < k)
{
return select(a, pos + 1, right, k - (pos - left + 1));
}
else if (pos - left + 1 > k)
{
return select(a, left, pos - 1, k);
}
}
/// usage
int sa[] = { 5, 17, 92, 43, 55, 14, 2, 8, 75, 6 };
int s1 = select(sa, 0, 9, 1);
int s3 = select(sa, 0, 9, 3);
int s7 = select(sa, 0, 9, 7);
Iterative solution:
class ListNode {
public:
ListNode(int _data) : data(_data), next(nullptr) { };
ListNode* next;
int data;
};
ListNode* merge_ll(ListNode* a, ListNode* b)
{
if (nullptr == a) return b;
if (nullptr == b) return a;
auto d = (a->data < b->data) ? a : b;
auto s = (a == d) ? b : a;
ListNode* pd = nullptr;
while (nullptr != s && nullptr != d)
{
if (d->data < s->data)
{
pd = d;
d = d->next;
}
else
{
pd->next = s;
s = s->next;
pd->next->next = d;
pd = pd->next;
}
}
if (nullptr == d)
{
pd->next = s;
}
return (a->data < b->data) ? a : b;
}
Recursive and DP solution
int num_dc_matches_dp(const std::string& s, const std::string& t)
{
const auto sn = s.length();
const auto tn = t.length();
auto d = new int*[sn + 1];
for (auto i = 0; i < sn + 1; ++i) d[i] = new int[tn + 1];
for (auto i = 0; i < sn + 1; ++i) d[i][0] = 1;
for (auto j = 1; j < tn + 1; ++j) d[0][j] = 0;
for (auto i = 1; i < sn + 1; ++i)
{
for (auto j = 1; j < tn + 1; ++j)
{
if (s[i - 1] == t[j - 1])
{
d[i][j] = d[i - 1][j - 1] + d[i - 1][j];
}
else
{
d[i][j] = d[i - 1][j];
}
}
}
auto rv = d[sn][tn];
for (auto i = 0; i < sn + 1; ++i) delete[] d[i];
delete[] d;
return rv;
}
int num_dc_matches(char* s, char* t, int sn, int tn)
{
if (0 == s || 0 == t) return 0;
if (tn > sn) return 0;
if (tn == sn) return (strcmp(s, t) == 0) ? 1 : 0;
if (0 == tn) return 1;
if (*s == *t)
{
return num_dc_matches(s + 1, t + 1, sn - 1, tn - 1) + num_dc_matches(s + 1, t, sn - 1, tn);
}
else
{
return num_dc_matches(s + 1, t, sn - 1, tn);
}
}
int find_max_double_one_index(int n)
{
auto nt = n;
auto prev_one = nt & 1;
auto max_i = -1;
nt >>= 1;
auto i = 1;
while (nt)
{
if (nt & 1)
{
if (prev_one)
{
max_i = i;
}
prev_one = true;
}
else
{
prev_one = false;
}
nt >>= 1;
++i;
}
return max_i;
}
int next_sparse(int n)
{
auto i = find_max_double_one_index(n);
if (-1 == i)
{
++n;
i = find_max_double_one_index(n);
}
while (-1 != i)
{
n &= (~0 << (i + 1));
n |= (1 << (i + 1));
i = find_max_double_one_index(n);
}
return n;
}
template <typename K, typename V>
using map = boost::unordered_map<K, V>;
int longest_unique_substr(const std::string& s, const int m)
{
if (s.empty()) return 0;
if (0 == m) return 0;
auto max = 1;
auto start = 0;
auto end = 0;
map<char, int> counts;
counts[s[0]] = 1;
for (auto i = 1; i < s.length(); ++i)
{
end = i;
if (counts.find(s[i]) != counts.end())
{
counts[s[i]] += 1;
}
else
{
counts[s[i]] = 1;
if (counts.size() > m)
{
while (start < end)
{
counts[s[start]] -= 1;
if (0 == counts[s[start]])
{
counts.erase(s[start]);
++start;
break;
}
++start;
}
}
}
if (end - start + 1 > max)
{
max = end - start + 1;
}
}
return max;
}
Assuming valid input:
class BstNode
{
public:
BstNode(int _data) : data(_data), left(nullptr), right(nullptr) {};
int data;
BstNode* left;
BstNode* right;
};
BstNode* rebuild_bst(int* pre, int* in, int n)
{
if (0 == n) return nullptr;
auto data = pre[0];
auto root = new BstNode(data);
auto i = 0;
for (; i < n; ++i)
{
if (data == in[i]) break;
}
root->left = rebuild_bst(pre + 1, in, i);
root->right = rebuild_bst(pre + i + 1, in + i + 1, n - i - 1);
return root;
}
bool find_exp_aux(std::vector<int> & v, std::vector<std::string>& e, const int t, std::string& r)
{
if (v.empty()) return false;
auto n = v.size();
if (1 == n)
{
if (t == v[0])
{
r = e[0];
return true;
}
return false;
}
for (auto i = 0; i < n; ++i)
{
auto v1 = v[i];
const auto& e1 = e[i];
for (auto j = i + 1; j < n; ++j)
{
auto v2 = v[j];
const auto& e2 = e[j];
auto vc(v);
auto ec(e);
vc.erase(vc.begin() + j);
vc.erase(vc.begin() + i);
ec.erase(ec.begin() + j);
ec.erase(ec.begin() + i);
auto vnew = 0;
std::string enew = "";
vnew = v1 + v2;
enew = "(" + e1 + ")" + "+" + "(" + e2 + ")";
vc.push_back(vnew);
ec.push_back(enew);
if (find_exp_aux(vc, ec, t, r)) return true;
ec.pop_back();
vc.pop_back();
vnew = v1 * v2;
enew = "(" + e1 + ")" + "*" + "(" + e2 + ")";
vc.push_back(vnew);
ec.push_back(enew);
if (find_exp_aux(vc, ec, t, r)) return true;
ec.pop_back();
vc.pop_back();
vnew = v1 - v2;
enew = "(" + e1 + ")" + "-" + "(" + e2 + ")";
vc.push_back(vnew);
ec.push_back(enew);
if (find_exp_aux(vc, ec, t, r)) return true;
ec.pop_back();
vc.pop_back();
vnew = v2 - v1;
enew = "(" + e2 + ")" + "-" + "(" + e1 + ")";
vc.push_back(vnew);
ec.push_back(enew);
if (find_exp_aux(vc, ec, t, r)) return true;
ec.pop_back();
vc.pop_back();
if (0 != v2)
{
vnew = v1 / v2;
enew = "(" + e1 + ")" + "/" + "(" + e2 + ")";
vc.push_back(vnew);
ec.push_back(enew);
if (find_exp_aux(vc, ec, t, r)) return true;
ec.pop_back();
vc.pop_back();
}
if (0 != v1)
{
vnew = v2 / v1;
enew = "(" + e2 + ")" + "/" + "(" + e1 + ")";
vc.push_back(vnew);
ec.push_back(enew);
if (find_exp_aux(vc, ec, t, r)) return true;
ec.pop_back();
vc.pop_back();
}
}
}
return false;
}
bool find_exp(const std::vector<int>& v, const int t, std::string& r)
{
std::vector<int> vc(v);
std::vector<std::string> ec;
for (auto i : vc)
{
ec.push_back(std::to_string(i));
}
return find_exp_aux(vc, ec, t, r);
}
Following are three solutions:
1. A simple recursive solution for the box stacking problem in 3 dimensions.
2. A dynamic programming solution for the box stacking problem in 3 dimensions.
3. A dynamic programming solution for the box stacking problem in K dimensions.
Solution 1:
class Box {
public:
int h, w, l;
Box(int _h, int _w, int _l) : h(_h), w(_w), l(_l) {};
};
int msha(const std::vector<Box>& boxes, int ch, int bh, int bw)
{
auto max = ch;
for (const auto& b : boxes)
{
if ((b.h < bh && b.w < bw) || (b.w < bh && b.h < bw))
{
max = std::max(max, msha(boxes, ch + b.l, b.h, b.w));
}
if ((b.h < bh && b.l < bw) || (b.l < bh && b.h < bw))
{
max = std::max(max, msha(boxes, ch + b.w, b.h, b.l));
}
if ((b.l < bh && b.w < bw) || (b.w < bh && b.l < bw))
{
max = std::max(max, msha(boxes, ch + b.h, b.l, b.w));
}
}
return max;
}
int msh(const std::vector<Box>& boxes)
{
return msha(boxes, 0, std::numeric_limits<int>::max(), std::numeric_limits<int>::max());
}
Solution 2:
class Box {
public:
int h, w, l;
Box(int _h, int _w, int _l) : h(_h), w(_w), l(_l) {};
};
void get_dims(const Box& b, int type, int& h, int &w, int& l)
{
if (0 == type)
{
h = b.h; w = b.w; l = b.l;
}
else if (1 == type)
{
h = b.l; w = b.w; l = b.h;
}
else if (2 == type)
{
h = b.h; w = b.l; l = b.w;
}
}
int msh_dp(const std::vector<Box>& boxes)
{
auto n = boxes.size() * 3;
auto d = new int[n];
for (auto i = 0; i < n; ++i)
{
auto h = 0, w = 0, l = 0;
get_dims(boxes[i / 3], i % 3, h, w, l);
d[i] = l;
}
for (auto k = 0; k < 4; ++k)
{
for (auto i = 0; i < n; ++i)
{
auto hi = 0, wi = 0, li = 0;
get_dims(boxes[i / 3], i % 3, hi, wi, li);
for (auto j = 0; j < n; ++j)
{
auto hj = 0, wj = 0, lj = 0;
get_dims(boxes[j / 3], j % 3, hj, wj, lj);
if ((hi < hj && wi < wj) || (hi < wj && wi < hj))
{
d[i] = std::max(d[i], d[j] + li);
}
}
}
}
auto max = 0;
for (auto i = 0; i < n; ++i)
{
max = std::max(max, d[i]);
}
delete[] d;
return max;
}
Solution 3:
namespace BoxStacking {
template <size_t K = 3>
class Box
{
public:
std::vector<int> dims;
Box(std::initializer_list<int> _dims)
{
if (_dims.size() != K) throw std::invalid_argument((boost::format("Box<%1%>() invalid number of dimensions") % K).str());
dims = _dims;
}
};
void get_perms_aux(std::vector<int>& v, std::list<std::vector<int>>& p, int k, int n)
{
if (k == n)
{
p.push_back(v);
return;
}
for (auto i = k; i < n; ++i)
{
std::swap(v[k], v[i]);
get_perms_aux(v, p, k+1, n);
std::swap(v[k], v[i]);
}
}
void get_perms(const std::vector<int>& v, std::list<std::vector<int>>& p)
{
std::vector<int> v2(v);
get_perms_aux(v2, p, 0, v2.size());
}
template <size_t K>
int msh_dp(const std::vector<Box<K>>& boxes)
{
auto n = boxes.size() * K;
auto d = new int[n];
for (auto i = 0; i < n; ++i)
{
d[i] = boxes[i / K].dims[i % K];
}
// repeat 4 times because every box could be selected at most twice so the number of possible stackings is (2n)^2 = 4*n^2
for (auto l = 0; l < 4; ++l)
{
for (auto i = 0; i < n; ++i)
{
auto idims = boxes[i / K].dims;
auto height_value = idims[i % K];
idims.erase(std::begin(idims) + (i % K));
std::list<std::vector<int>> perms;
get_perms(idims, perms);
for (auto j = 0; j < n; ++j)
{
auto jdims = boxes[j / K].dims;
jdims.erase(std::begin(jdims) + (j % K));
for (const auto& p : perms)
{
auto fits = true;
auto m = jdims.begin();
auto n = p.begin();
for (; n != p.end(); ++m, ++n)
{
if (*m <= *n)
{
fits = false;
break;
}
}
if (fits)
{
d[i] = std::max(d[i], d[j] + height_value);
}
}
}
}
}
auto max = 0;
for (auto i = 0; i < n; ++i) max = std::max(max, d[i]);
delete[] d;
return max;
}
void test_msh_dp()
{
std::vector<Box<4>> boxes;
boxes.push_back({ 2, 2, 2, 2 });
boxes.push_back({ 4, 4, 4, 4 });
boxes.push_back({ 8, 6, 4, 2 });
auto msh = msh_dp(boxes);
boxes.push_back({ 7, 5, 3, 1 });
msh = msh_dp(boxes);
}
};
Note that the generalized solution for K dimensions has a run time complexity of O(n^2*K!) and a space complexity of O(n*K + K!). This is because all permutations of the dimensions have to be checked for a fit (in 3 dimensions there are only 6 permutation tested for each box). Also note that a box may be selected in the stack at most twice which is the reason for the 4 repetitions in the outer loop in both dynamic programming solutions.
- Omri.Bashari May 24, 2015Recursive solution
#include <string>
#include <vector>
#include <boost/tokenizer.hpp>
int num_true(const std::vector<std::string>& exp, int s, int e);
int num_false(const std::vector<std::string>& exp, int s, int e);
int num_ways_evaluate_true(const std::string& exp)
{
boost::char_separator<char> sep(" ");
boost::tokenizer<boost::char_separator<char>> tokens(exp, sep);
std::vector<std::string> exp_v;
for (const auto& token : tokens)
{
exp_v.push_back(token);
}
return num_true(exp_v, 0, exp_v.size() - 1);
}
int num_true(const std::vector<std::string>& exp, int s, int e)
{
if (s == e) return (exp[s] == "true") ? 1 : 0;
int num = 0;
for (auto i = s + 1; i < e; i += 2)
{
if (exp[i] == "&")
{
num += num_true(exp, s, i - 1) * num_true(exp, i + 1, e);
}
else if (exp[i] == "|")
{
num += num_true(exp, s, i - 1) * num_true(exp, i + 1, e);
num += num_false(exp, s, i - 1) * num_true(exp, i + 1, e);
num += num_true(exp, s, i - 1) * num_false(exp, i + 1, e);
}
else if (exp[i] == "^")
{
num += num_false(exp, s, i - 1) * num_true(exp, i + 1, e);
num += num_true(exp, s, i - 1) * num_false(exp, i + 1, e);
}
}
return num;
}
int num_false(const std::vector<std::string>& exp, int s, int e)
{
if (s == e) return (exp[s] == "false") ? 1 : 0;
int num = 0;
for (auto i = s + 1; i < e; i += 2)
{
if (exp[i] == "&")
{
num += num_false(exp, s, i - 1) * num_false(exp, i + 1, e);
num += num_false(exp, s, i - 1) * num_true(exp, i + 1, e);
num += num_true(exp, s, i - 1) * num_false(exp, i + 1, e);
}
else if (exp[i] == "|")
{
num += num_false(exp, s, i - 1) * num_false(exp, i + 1, e);
}
else if (exp[i] == "^")
{
num += num_false(exp, s, i - 1) * num_false(exp, i + 1, e);
num += num_true(exp, s, i - 1) * num_true(exp, i + 1, e);
}
}
return num;
}
Here's a generalized regex substring matcher. Supports things like a+ (1 or more 'a'), ?* and .* (zero or more wildcards), as well as .+ and ?+.
bool is_regex_substr(const char* s, const char* p)
{
if (0 == s || 0 == p) return false;
if (0 == *p) return true;
if (0 == *s && 0 != *p && '*' != *(p + 1)) return false;
if ('*' == *(p + 1))
{
if (is_regex_substr(s, p + 2)) return true;
while (*s != 0 && (*s == *p || *p == '.' || *p == '?'))
{
if (is_regex_substr(s + 1, p + 2)) return true;
++s;
}
return false;
}
if ('+' == *(p + 1))
{
while (*s != 0 && (*s == *p || *p == '.' || *p == '?'))
{
if (is_regex_substr(s + 1, p + 2)) return true;
++s;
}
return false;
}
if ('?' == *p || *s == *p) return is_regex_substr(s + 1, p + 1);
return false;
}
// usage
auto irs = is_regex_substr("careercup", "c?re+.*r+?+d*p");
void gen_i18n(const std::string& s)
{
auto len = s.length();
for (auto i = len - 2; i >= 1; --i)
{
for (auto p = 1; p + i <= len - 1; ++p)
{
std::ostringstream oss;
oss << s.substr(0, p);
oss << i;
oss << s.substr(p + i, len - (p + i));
std::cout << oss.str() << std::endl;
}
}
std::cout << s << std::endl;
}
int num_strings_o1space(const std::string& s)
{
int count_0 = 1;
int count_1 = 1;
int len = s.length();
for (auto i = 1; i < len; ++i)
{
int num = s[i-1] - '0';
num *= 10;
num += s[i] - '0';
if (s[i-1] != '0' && num < 26)
{
int count_new = count_0 + count_1;
count_0 = count_1;
count_1 = count_new;
}
else
{
count_0 = count_1;
}
}
return count_1;
}
Here's an implementation
#include <list>
#include <boost/unordered_map.hpp>
#include <boost/unordered_set.hpp>
template <typename K, typename V>
using map = boost::unordered_map<K,V>;
template <typename T>
using set = boost::unordered_set < T > ;
class Node
{
public:
Node(char _value) : value(_value) {};
char value;
set<Node*> outgoing;
set<Node*> incoming;
};
// topological sort
void get_order(const map<char, set<char>>& dep, std::list<char>& out)
{
map<char, Node*> cn;
for (auto i : dep)
{
if (cn.find(i.first) == std::end(cn))
{
cn[i.first] = new Node(i.first);
}
for (auto j : i.second)
{
if (cn.find(j) == std::end(cn))
{
cn[j] = new Node(j);
}
cn[i.first]->outgoing.insert(cn[j]);
cn[j]->incoming.insert(cn[i.first]);
}
}
std::list<Node*> q;
for (auto i : cn)
{
if (i.second->incoming.empty()) q.push_back(i.second);
}
while (!q.empty())
{
auto n = q.front();
q.pop_front();
out.push_back(n->value);
for (auto i : n->outgoing)
{
i->incoming.erase(n);
if (i->incoming.empty()) q.push_back(i);
}
}
for (auto i : cn)
{
delete i.second;
}
}
// usage
map<char, set<char>> dep;
std::list<char> order;
dep['A'].insert('B');
dep['A'].insert('C');
dep['B'].insert('D');
dep['B'].insert('E');
dep['B'].insert('F');
dep['D'].insert('F');
dep['F'].insert('E');
get_order(dep, order);
typedef int date_t; // or use any comparable date class
class Interval
{
public:
Interval(date_t _start, date_t _end): start(_start), end(_end) {};
date_t start;
date_t end;
};
void split(const std::vector<Interval>& in, std::vector<Interval>& out)
{
std::vector<date_t> dates;
for (auto i : in)
{
dates.push_back(i.start);
dates.push_back(i.end);
}
std::sort(std::begin(dates), std::end(dates));
auto start = *std::begin(dates);
for (auto i : dates)
{
if (i == start) continue;
out.push_back(Interval(start, i));
start = i + 1;
}
}
// usage
std::vector<Interval> in, out;
in.push_back(Interval(6, 25));
in.push_back(Interval(3, 20));
split(in, out);
#define MAX 3
struct node {
int data;
struct node *child[MAX];
int num_child;
};
typedef struct node node_t;
// create tree from post order and pre order
node_t* get_tree(int pre[], int post[], int pre_start, int pre_end, int post_start, int post_end)
{
if (pre_start > pre_end || post_start > post_end || pre[pre_start] != post[post_end] || pre_end - pre_start != post_end - post_start) throw std::exception("Invalid input");
auto rv = new node_t;
rv->data = pre[pre_start];
rv->num_child = 0;
auto next_pre_start = pre_start + 1;
auto next_pre_end = next_pre_start; // will find this;
auto next_post_end = post_end - 1; // will find this;
auto next_post_start = post_start;
for (auto i = 0; i < MAX; ++i)
{
if (next_pre_start > pre_end)
{
// no more children, fill the rest with nulls
for (auto j = i; j < MAX; ++j)
{
rv->child[j] = nullptr;
}
return rv;
}
for (; next_post_end >= next_post_start; --next_post_end)
{
if (post[next_post_end] == pre[next_pre_start]) break;
}
if (next_post_end < next_post_start) throw std::exception("Invalid input");
auto range = next_post_end - next_post_start;
next_pre_end += range;
rv->child[i] = get_tree(pre, post, next_pre_start, next_pre_end, next_post_start, next_post_end);
++rv->num_child;
next_pre_start = ++next_pre_end;
next_post_start = next_post_end + 1;
next_post_end = post_end - 1;
}
return rv;
}
// usage
/*
3
/ \
5 4
/ \
6 7
*/
int pre[] = { 3, 5, 6, 7, 4 };
int post[] = { 6, 7, 5, 4, 3 };
auto n = get_tree(pre, post, 0, 4, 0, 4);
Generate the BFS queue of both sub trees.
The trick is that the BFS queue for the right sub tree is generated right-to-left, i.e., adding the right child before the left child and hence processing it first.
Add null children to the queue as well.
Compare the two.
bool is_foldable(node_t* n)
{
if (nullptr == n) return true;
std::list<node_t*> s_left, s_right;
get_bfs(n->left, s_left, true);
get_bfs(n->right, s_right, false);
if (s_left.size() != s_right.size()) return false;
auto li = s_left.begin();
auto ri = s_right.begin();
for (; li != s_left.end(); ++li, ++ri)
{
if (nullptr == *li && nullptr = *ri) continue;
if (nullptr == *li || nullptr = *ri) return false;
if (li->data != ri->data) return false;
}
return true;
}
void get_bfs(node_t* n, std::list<node_t*>& s_out, bool ltr)
{
std::list<node_t*> s_in;
s_in.push_back(n);
while (!s_in.empty())
{
node_t* curr = s_in.front();
s_in.pop_front();
s_out.push_back(curr);
if (nullptr != curr)
{
if (ltr)
{
s_in.push_back(curr->left);
s_in.push_back(curr->right);
}
else
{
s_in.push_back(curr->right);
s_in.push_back(curr->left);
}
}
}
}
This is the standard solution.
There is even a more efficient way - map all characters in the alphabet to the first 26 prime numbers and calculate the hash value of a word as the multiplication of those values. Hash is guaranteed to be unique for every different anagram set, and the same for all anagrams within a set. This reduces the complexity from O(nlgn) of sorting to O(n) in the calculation of the hash value.
Your answer is correct if a tile is allowed to be visited more than twice. The provided example isn't making this very clear. If you think about a traditional domino game, tiles can't be connected to more than two tiles so "inner loops" aren't allowed. This then turns into the Hamiltonian cycle problem, which is NP-complete. It's possible that this specific question solution is not NP-complete as some graphs (e.g. a complete graph) can be determined to have a Hamiltonian cycle without exhaustive search.
- Omri.Bashari May 09, 2015I should clarify - I call the "size" of the layer to be the size of the rectangle side, not the total number of elements in the layer.
- Omri.Bashari May 08, 2015
- Omri.Bashari May 10, 2021