Clipped sequence (monotonic queue optimizes dp)

Link to the original title:
Cropping sequence

dp analysis

dp analysis
The data range of this question is 1e5

Simple version of the code, based on the most basic understanding
(but only 6 data, n three-party complexity)

#include<iostream>
#include<algorithm>
#include<cstring>
#include<queue>

using namespace std;
typedef long long ll;

const int N = 1e5 +10;

int n;
int a[N];
ll m;
ll f[N], sum[N];

int main(){
    
    
	cin >> n >> m;
	for(int i = 1; i <= n; i ++){
    
    
	    scanf("%d", &a[i]);
	    sum[i] = sum[i-1] + a[i];
	}
	
	memset(f, 0x3f, sizeof(f));
	f[0] = 0;
	
	for(int i = 1; i <= n; i ++){
    
    
	    for(int j = 1; j <= i; j ++)
	    {
    
    
	        if(sum[i] - sum[j - 1] > m) continue;
	        int maxx = 0;
	        //每次循环都需要查找一个区间最大值
	        //这里可以用ST表进行O(1)优化
	        for(int k = j; k <= i; k ++)
	        {
    
    
	            maxx = max(maxx, a[k]);
	        }
	        f[i] = min(f[i], f[j-1] + maxx);
	    }
	}
	
	if(f[n] == 0) puts("-1");
	else
	printf("%lld\n", f[n]);
	
	return 0;
} 

Added fast reading and ST table lookup, the maximum value of the interval can exceed 11 samples, but the time complexity is still n square or AC cannot

#include<iostream>
#include<algorithm>
#include<cstring>
#include<queue>
#include<cmath>

using namespace std;
typedef long long ll;

const int N = 1e5 +10;

int n;
int a[N], p[N][30];
ll m;
ll f[N], sum[N];

inline int read()
{
    
    
    int x = 0, f = 1;
    char ch = getchar();
    while(ch < '0' || ch > '9')
    {
    
    
        if(ch == '-')
        {
    
    
            f = -1;
        }
        ch = getchar();
    }
    while(ch >= '0' && ch <= '9')
    {
    
    
        x = x*10 + ch - 48;
        ch = getchar();
    }
    return x * f;
}

int main(){
    
    
	cin >> n >> m;
	for(int i = 1; i <= n; i ++){
    
    
	    a[i] = read();
	    p[i][0] = a[i];
	    sum[i] = sum[i-1] + a[i];
	}
	
	memset(f, 0x3f, sizeof(f));
	f[0] = 0;
	
	//实现ST表
	for(int j = 1; j <= log2(n); j ++)
	{
    
    
	    for(int i = 1; i + (1 << j) - 1 <= n; i++)
	    {
    
    
	        p[i][j] = max(p[i][j-1], p[i+(1 << (j-1))][j-1]);
	    }
	}
	
	for(int i = 1; i <= n; i ++){
    
    
	    for(int j = 1; j <= i; j ++)
	    {
    
    
	        if(sum[i] - sum[j - 1] > m) continue;
	        int len = i - j + 1;
	        int lo = log2(len);
	        int maxx = max(p[j][lo], p[i - (1 << lo) + 1][lo]);
	        
	        f[i] = min(f[i], f[j-1] + maxx);
	    }
	}
	
	if(f[n] == 0) puts("-1");
	else
	printf("%lld\n", f[n]);
	
	return 0;
} 
  • The next step is the most difficult part. To solve the problem, we need to analyze the specific problem, analyze the unique nature of this problem, and then change our thinking, because the limit of the above thinking is n^2, and we still need to find new optimizations!

The monotone queue maintains all possible updates to f[i].
The worst time complexity is O(n^2), that is, the original sequence is monotonically decreasing and the sum does not exceed m

#include<iostream>
#include<algorithm>
#include<cstring>
#include<queue>
#include<cmath>

using namespace std;
typedef long long ll;

const int N = 1e5 +10;

int n;
int a[N], q[N];
ll m;
ll f[N];

inline int read()
{
    
    
    int x = 0, f = 1;
    char ch = getchar();
    while(ch < '0' || ch > '9')
    {
    
    
        if(ch == '-')
        {
    
    
            f = -1;
        }
        ch = getchar();
    }
    while(ch >= '0' && ch <= '9')
    {
    
    
        x = x*10 + ch - 48;
        ch = getchar();
    }
    return x * f;
}

int main(){
    
    
	cin >> n >> m;
	for(int i = 1; i <= n; i ++){
    
    
	    a[i] = read();
	}
	
	memset(f, 0x3f, sizeof(f));
	f[0] = 0;
	
	//单调队列实现
	int hh = 0, tt = -1;
	ll cnt = 0;
	//因为a数组下标从1开始,所以先++t;
	q[++tt] = 0;
	for(int i = 1, j = 1; i <= n; i ++)
	{
    
    
		//i是前i个数, j是选择的要更新f[i]的f[j],
		//f[i]=max(f[i],f[j]+max(a[j+1...i])
		//  hh  tt  //代表单调队列的下标,他的值是a的下标
		cnt += a[i];//维护区间和小于m的(j...i)
		while(cnt > m) {
    
    
			cnt -= a[j];
			j ++;
		}
		//j在cnt<=m的最左边
		
		//这里是把q[hh]中,也就是对头超出范围的去掉
		//保留在j到i之间的q[hh]
		while(hh <= tt && q[hh] < j)
		{
    
    
			hh ++;
		}
		
		//维护单调队列中的不减序列,将a[i]加到其中
		while(hh <= tt && a[q[tt]] <= a[i]) tt--;
		q[++tt] = i;
		
		f[i] = f[j-1] + max(a[q[hh]], a[i]);
		//... 2 ... (j)1 ..."8" .1.3. "7" ..1. "6" ..4. "5" ... (i)2
		//就是q始终维护最后区间和小于m的区间上的不增序列,但无法决断出哪个
		//f[j]+max(a[j...i])是最小的,只能说最小的都在维护的序列中
		//因为f[7所在下标]+6肯定小于f[3所在下标]+7,但无法知道哪个最小,所以还需要枚举
		
		for(int k = hh; k <= tt; k ++)
		{
    
    
			f[i] = min(f[i], f[q[k]] + max(a[q[k+1]], a[i]));
		}
		
	}

	if(f[n] == 0) puts("-1");
	else
	printf("%lld\n", f[n]);
	
	return 0;
} 

Final optimization:
We found that we only need to insert the sequence, delete specific values, and find the minimum value, so the balanced tree can complete these operations, and the time is log(n). The
total time is nlog(n) and it can be ac Introduced
a balanced tree set
but this sequence may have repeated elements, so use multiset

#include<iostream>
#include<algorithm>
#include<cstring>
#include<queue>
#include<cmath>
#include<set>

using namespace std;
typedef long long ll;

const int N = 1e5 +10;

int n;
int a[N], q[N];
ll m;
ll f[N];
multiset<ll> se;

inline ll get(int pos)
{
    
    
    return f[q[pos]] + a[q[pos + 1]];
}

inline void del(int pos)
{
    
    
    auto t = se.find(get(pos));
    se.erase(t);
}

inline int read()
{
    
    
    int x = 0, f = 1;
    char ch = getchar();
    while(ch < '0' || ch > '9')
    {
    
    
        if(ch == '-')
        {
    
    
            f = -1;
        }
        ch = getchar();
    }
    while(ch >= '0' && ch <= '9')
    {
    
    
        x = x*10 + ch - 48;
        ch = getchar();
    }
    return x * f;
}

int main(){
    
    
	cin >> n >> m;
	for(int i = 1; i <= n; i ++){
    
    
	    a[i] = read();
	}
	
	memset(f, 0x3f, sizeof(f));
	f[0] = 0;
	
	int hh = 0, tt = -1;
	ll cnt = 0;
	
	q[++tt] = 0;
	for(int i = 1, j = 1; i <= n; i ++)
	{
    
    
		cnt += a[i];
		while(cnt > m) {
    
    
			cnt -= a[j];
			j ++;
		}
		
		while(hh <= tt && q[hh] < j)
		{
    
    
		    if(tt > hh)
		    {
    
    
		        del(hh);
		    }
			hh ++;
		}
		
		
		while(hh <= tt && a[q[tt]] <= a[i]) {
    
    
		    if(tt > hh)
		    {
    
    
		        del(tt - 1);
		    }
		    tt--;
		}
		q[++tt] = i;
		
		if(tt > hh)
		{
    
    
		    se.insert(get(tt-1));
		}
		f[i] = f[j-1] + a[q[hh]];
		if(se.size())
		f[i] = min(f[i], *se.begin());
		
	}

	if(f[n] == 0) puts("-1");
	else
	printf("%lld\n", f[n]);
	
	return 0;
} 

Guess you like

Origin blog.csdn.net/qq_63092029/article/details/129578153