import java.io.*; import java.util.*; import java.text.*; import java.math.*; import java.util.regex.*; public class Solution { final static long mod = 1000000007; static void DFS(long[][] dp, int[] pp, int pos, int chunksize){ //assume it isn't ever called on pos == n? if(dp[pos][chunksize] != -1) return; //already solved. int newpos = pos+chunksize; int n = dp.length-1; int newmaxchunk = newpos == n ? 0 : pp[newpos]; if(newpos == n){ dp[pos][chunksize] = 1; } else { long res = 0; long mult = 1; for(int i=1; i<=newmaxchunk; i++){ mult = (mult * (chunksize - (i-1))) % mod; //i think this is right... 50% DFS(dp, pp, newpos, i); res = (res + (dp[newpos][i] * mult)) % mod; //System.out.println(res); } dp[pos][chunksize] = res; } } // vv don't need this. use the n!/(n-k)! because that is the answer. //@@@ ^ do not permute the subproblem! (don't permute on current chunk size), because ^ that takes care of it! //static int nchoosek(int n, int k, long[] permutation){ //@@@@@@@@@@@ I don't know what to do except to hope that none of the permutations //return permutation[n]/(permutation[n-k]*permutation[k]); //} //^ tabulation might be a lot faster than recursion. but lets see. public static void main(String[] args) { Scanner in = new Scanner(System.in); int n = in.nextInt(); long[] a = new long[n]; for(int a_i=0; a_i < n; a_i++){ a[a_i] = in.nextLong(); } int[] pp = new int[n]; pp[n-1] = 1; //int streak = 1; for(int i=n-2; i>=0; i--){ //they are never equal. pp[i] = a[i] < a[i+1] ? pp[i+1] + 1 : 1; } long[][] dp = new long[n+1][n+1]; //dp[pos][chunk.size], pos == n, implies termination. for(int i=0; i a[i+1] ? //^ this is the only thing that matters i think //^ i think 50% i have the right intuition about this... //^ //since thickness can only decrease/stay the same, //^ each time you eat a chunk, starting from a given position, the chunk.size == min(thickness, length of longest increasing streak starting from here (might be as low as 1)) //^ you update the thickness to be this chunk size (chunk size NEVER gets larger) // //^ i suspect that you must process starting from the left side (front), since that affects what kind of chunks you can produce later on? //^ pretty much... //^ well... it might not be true, i didn't think about it much, but lets delve into assuming that is true first. (must process from a[0]...) //^ what kind of dp[i] can you do? //dp[pos][chunk.size] == res.... == how many different arrangements of V can satisfy this subproblem? //^ is pretty deterministic... ///^ you could do memoization even? If recursion seems to be cleanest?... dunno yet. //pp[pos] == length of longest increasing streak starting from here (might be 1) //^ i would say that dp[pos][chunk.size]'s solution discretely specifies what the first wave of elements looks like. (first column when you look at V) //^ like... that first column MUST contain a[pos], ..., a[chunk.size] //^^ so... when you are setting the next column (column 2), notice the number of ways you can arrange those elements to make unique V == ((col.1.size) choose (col.2.size)) //make unique V == ((col.i-1.size) choose (col.i.size)) //what is the base case? //dp[end][chunk.size == 0] = 1; //only one way to set up V for an empty list. (0 array V) // //^ how to merge into a supercase? // //btw... how much computation does this seem to be? // //recursion case for dp[pos][chunk.size] means that you use accepted this pos*chunk.size into the associated V. //^ it will call subcases, dp[pos+chunk.size][1..min(pp[pos+chunk.size], chunk.size)] //^ but... how much computation is this?? //what if the whole fucking array is just one long increasing [1,2...n] ?? //^ dude... O(n^3) is too much. //^ how many times //dp[pos][] will be computed for essentially all the chunk sizes. //^ and each one costs O(n) to tally up... //^ maybe thats what they wanted. but geez. isn't that a bit much to ask? // //1200*1200*11 == 10^7 ish. //what is dp[][] relationship with each other? //what if i dp[][] compute with a tabulation approach, and go from the end to the front? would that work? // //each position has a BIT tree to do range prefix sum maintenance? that seems a bit overkill tho? //therefore you can query BITTREE[pos] for a particular [chunk.size], to get the prefix sum tally up of how many // //^ should i do recursion or tabulation? //^ //^ OR, what if it process in a greedy order? //^ eg. start with min.chunk size first? //^ tally[pos] == how many V enumerations are possible starting at this pos, with current running "chunk.size permitted (should be monotonically increasing? since we are being greedy about which order to process chunk sizes?)", // //^ can i prove that correctness? or monotonicity about updating tally[pos]? *this is really important to do, if i can* //^ could i prove further than each time it is updated, the new chunk size it represents is incremental +1? //eg. what about for a very small array? //^ proof by contradiction. //^ if you can reach a certain cell maintaining a chunk.size of x //can you reach the tally[pos] with the same chunk size several times? //oh yes. eg 2 2 (1) , 2 1 1 (1) //tallyDegree[pos] == the current chunk size for which the corresponding tally[pos] handles. //^ it might increase beyond the chunk size that is actually permitted orignating from [pos], but that is fine! //^ simply DONT CALL dp[pos][larger.chunk.size], if this larger.chunk.size isn't doable from this pos. //^ remember, any call to SOLVE dp[pos][..] will increase the tallyDegree[pos], as well as the tally[pos] // //so. you are solving for a given dp[pos][chunk.size] //^ you will query tallyDegree[pos+chunk.size] //^ if it is < your chunk.size, then (if the dp[pos+chunk.size][chunk.size] is solvable) solve dp[pos+chunk.size][chunk.size]. as this will increment the tallyDegree, and update tally[pos+chunk.size] to what you want it to be. //^ *DEBUG STRAT*, you should probably check throughout that your assumed ordering of incrementing tallyDegree is actually true. //^ ^ altho, i'm like 80% sure it is. just by reasoning about it. //^ ermmm... i have a very big issue with the "n choose k placement of the subproblems..." //the prefix permutation sum can be preprocessed in O(n^2), so that isn't an issue. //but n choose k //how to go from n choose k to n+1 choose k? // //n! / (k!(n-k)!) // //*(n+1) and / (n+1-k) //^ yeah. thats basically it. //^... if it is dependent on k, i don't think this is doable... //since there might be many k... // //but what if there are many k? //n!/(3!(n-3)!) + n!/(4!(n-4)!) //1 2 3 1, 1 2 3 4 //1 2 3 1 2, 1 2 3 4 1 // //^ for a fixed n, you can increment on k in a clean way... not that this helps, because subcase of width 3 doesn't represent width 4 arrangements... // //you can debug on your own code (pass in like 1...800) or something and see if it times out. //^ should give you an idea of whether or not implementing in C++ will allow you to pass with an O(n^3) algorithm... // //i guess.. just write a correct O(n^3) algorithm first.... //thickness 3 to thickness 2 --> 3 * 2 --> 3!/(3-2)! //^ holy shit, that might be it. //^ can i merge combinations like this together? //3 to 1 --> 3 --> 3!/(3-1)! //how to increase n to n+1? // /3 * 4 // /2 * 4 ... no not really. //1 2 3 4 //1 3 4 //2 //1 //2 3 4 //1 3 //2 4 //1 4 //2 3 //1 4 //2 //3 //1 //2 4 //3 //1 //2 //3 4 //1 //2 //3 //4 //omg stop being a pussy just accept it what ever it is. } }