We use cookies to ensure you have the best browsing experience on our website. Please read our cookie policy for more information about how we use cookies.
Lazy White Falcon
Lazy White Falcon
Sort by
recency
|
14 Discussions
|
Please Login in order to post a comment
PyPy 3 solution
here is hackerrank lazy white falcon solution
Hello. Please can anyone help me with this problem? I'm getting a timeout for the testcase with 100000 queries. I suspect the problem is from my algorithm to calculate the sum. The code is below:
Is there a better way to implement it?
include
include
include
include
include
using namespace std;
const int N = 100010; const int LG_N = 20;
vector adj[N]; int n, q;
int tree[2*N]; vector euler; int first[N], last[N];
int H[N], P[N][LG_N]; int val[N];
void dfs(int u, int p, int h) { H[u] = h; P[u][0] = p; for(int i = 1;i < LG_N;i++) { P[u][i] = P[P[u][i-1]][i-1]; } first[u] = euler.size(); euler.push_back(u); for(int v : adj[u]) { if(v == p) { continue; } dfs(v, u, h+1); } last[u] = euler.size(); euler.push_back(u); } int lca(int u, int v) { if(H[u] < H[v]) swap(u, v); for(int i = LG_N-1;i >= 0;i--) { if(H[P[u][i]] >= H[v]) { u = P[u][i]; }
} if(u == v) { return u; } for(int i = LG_N-1;i >= 0;i--) { if(P[u][i] != P[v][i]) { u = P[u][i]; v = P[v][i]; } } return P[u][0]; } void update(int idx, int val) { while(idx < euler.size()) { tree[idx] += val; idx += idx & (-idx); } } int query(int idx) { int ans = 0; while(idx > 0) { ans += tree[idx]; idx -= idx & (-idx); } return ans; } int main() {
}
Finally AC with HLD with Segment Trees ;)
can anyone tell why this code is givin runtime error on few test cases
import java.util.; import java.io.; class Solution{ static PrintWriter out = new PrintWriter(System.out); static int N =60000, LOGN = 20; static int n,q,ptr,chainNo; static int subSize[] = new int[N]; static int depth[] = new int[N]; static int st[] = new int[4*N]; static int chainInd[] = new int[N]; static int chainHead[] = new int[N]; static int base[] = new int[N]; static int posInBase[] = new int[N]; static int node[] = new int[N]; static int P[][] = new int[LOGN][N]; static LinkedList g[] = new LinkedList[N];
static int LCA(int x , int y){ if(depth[x] < depth[y]){ int t =x; x=y; y=t; } int lg = 1; for(;(1<= 0 ; i--) if(depth[x] - (1<= depth[y]) x = P[i][x]; if(x == y)return x; for(int i = lg ; i >= 0 ; i--) if(P[i][x] != -1 && P[i][x] != P[i][y]){ x = P[i][x]; y = P[i][y]; } return P[0][x]; } static void build() {
for (int i = n - 1; i > 0; --i) st[i] = st[i<<1] +st[i<<1|1]; }
static void modify(int p, int value) { for (st[p += n] = value; p > 1; p >>= 1) st[p>>1] = st[p] + st[p^1]; }
static int query(int l, int r) { // max on interval [l, r) int res = 0 ; for (l += n, r += n; l < r; l >>= 1, r >>= 1) { if ((l&1)==1) res = res+st[l++]; if ((r&1)==1) res = res+st[--r]; } return res; } static void dfs(int a,int p,int level){ depth[a]=level; subSize[a]=1; P[0][a]=p; for(Integer I : g[a]) if(I!=p){ dfs(I,a,level+1); subSize[a]+=subSize[I]; } }
static int query_up(int u,int v){
int uchain,vchain=chainInd[v],ret=0; while(1>0){ uchain=chainInd[u]; if(uchain==vchain){ ret=ret+query(posInBase[v],posInBase[u]+1); break; } else{ ret=ret+query(posInBase[chainHead[uchain]],posInBase[u]+1);
u=P[0][chainHead[uchain]]; } } return ret; }
static void update(int node , int val) { int pos = posInBase[node]; modify( pos,val); }
static void hld(int curNode,int p){ if(chainHead[chainNo]==-1) chainHead[chainNo]=curNode;
}
public static void main(String[] args){ Arrays.fill(chainHead , -1); n = ni(); q = ni(); int value[] = new int[n]; // st = new int[2*n]; for(int i=0;i for(int i=1;i<=q;i++) { int type = ni(); if(type == 1) { int u = ni(); int x = ni(); value[u]=x; update(u , x); } else { int u = ni(); int v = ni(); int lca = LCA(u,v); int ans = query_up(u,lca) + query_up(v,lca)-value[lca]; System.out.println(ans); } } // out.flush(); } static FastReader sc=new FastReader();
static class FastReader { BufferedReader br; StringTokenizer st;
}
Quite a tedious problem. The catch is in the fact that the tree may be very unbalanced, so you cannot directly “walk” the tree to aggregate.
Don't know if there is an official name for the summarized tree representation, but I'm reasonably proud that invented it independently:
First modification we make is node set operation has to be expressed in terms of node increment operation (just remember the last value independently)
Then we come to the concept of aggregate tree representation where each child node has a summary of all parents up to the root. That way the answer is L + R - LCA(L,R)
We will get such aggregate tree if every node increment operation will be performed as entire subtree increment.
Here is how to perform all children increment efficiently:
The strategy lies in the DFS tree representation, where all children lay sequentially. That way updates turn into interval increment commands which we can perform using Fenwick or segment tree with O(logN) update and O(logN) to summarize.
Lowest common ancestor (LCA) routine has to be accelerated too. That part is relatively straightforward, just have pointers to 1, 2, 4, 8, 16th, etc parent above in each node to perform ascend operation in O(logN). Then use a binary search to find LCA in O(logN * logN).
here is problem solution in python java c++ and c programming. https://programs.programmingoneonone.com/2021/05/hackerrank-lazy-white-falcon-problem-solution.html
Hey @d_kokarev thanks for the input. I've looked at Fenwick trees and seem like the way to go. But I still have a hard time between the mapping of the Tree/Graph nodes, and the Fenwick tree indexes.
I assume you refer here to the array so called "edgeTo" which contains the information encoded after a DFS or BFS is done in a Tree/Graph of the connections between the nodes. I'm still a a loss as I mntioned in the mapping. Do you have any pointers on that? I would appreciate any help. Thanks in advance