0 观察者模式的传统写法不再被需要

观察者模式的作用:
核心观点:将 对数据的操作 和 数据本身 做了解耦,新添加数据操作的时候,对数据本身的类不会有任何修改

1 观察者模式实例

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
public class VisitorDemo {
public static void main(final String[] args) {
Car car = new Car();
car.accept(new CarElementPrintVisitor());
}
}
// supertype of all objects in the structure
interface CarElement {
void accept(CarElementVisitor visitor);
}
// supertype of all operations
interface CarElementVisitor {
void visit(Body body);
void visit(Car car);
void visit(Engine engine);
}
class Body implements CarElement {
@Override
public void accept(CarElementVisitor visitor) {
visitor.visit(this);
}
}
class Engine implements CarElement {
@Override
public void accept(CarElementVisitor visitor) {
visitor.visit(this);
}
}
class Car implements CarElement {
private final List<CarElement> elements;
public Car() {
this.elements = List.of(new Body(), new Engine());
}
@Override
public void accept(CarElementVisitor visitor) {
for (CarElement element : elements) {
element.accept(visitor);
}
visitor.visit(this);
}
}
class CarElementPrintVisitor implements CarElementVisitor {
@Override
public void visit(Body body) {
System.out.println("Visiting body");
}
@Override
public void visit(Car car) {
System.out.println("Visiting car");
}
@Override
public void visit(Engine engine) {
System.out.println("Visiting engine");
}
}

2 利用封装接口和类型switch特性(java17) 快速达到目标

代码少了一半,但是效果确完全相同

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
public class VisitorDemo {
public static void main(final String[] args) {
Car car = new Car();
print(car);
}
// 这就是visitor
private static void print(Car car) {
car.elements()
.map(element -> switch (element) {
case Body body -> "Visiting body";
case Car car_ -> "Visiting car";
case Engine engine -> "Visiting engine";
})
.forEach(System.out::println);
}
}
// supertype of all objects in the structure
sealed interface CarElement
permits Body, Engine, Car { }
class Body implements CarElement { }
class Engine implements CarElement { }
class Car implements CarElement {
private final List<CarElement> elements;
public Car() {
this.elements = List.of(new Body(), new Engine());
}
public Stream<CarElement> elements() {
return Stream.concat(elements.stream(), Stream.of(this));
}
}

使用注意:
使结构类型的接口密封 (你不应修改密封接口,若要修改,参见3)
对于操作,使用模式开关来确定每种类型的代码路径 (从switch到具体的类的类型)
避免使用默认分支,这样当你修改类型接口时,你才能获得在每个switch中出现编译错误,这可以引导你去修改

1 考虑这么一部分代码

1
2
3
4
5
6
7
flatMap(queryResponseAfterAsr ->
instanceService.getSaaSByPaasEnvAndRegionWithAlibaba(instance)
.publishOn(Schedulers.boundedElastic())
.flatMap(saas -> { // 分出来三个流,分别是saas1 saas2 saas3
queryResponseAfterAsr.saasInstance = saas; // 1
return saasLogGetter.dealInputRequest(queryResponseAfterAsr) // 2
.flatMap(saasLogGetter::buildAndExecQuerySql); // 3

注意,上述看起来保证了在2,3过程当中,1的saasInstance为2,3中的instanceId。

但实际上可能由saas1流里的3的某个部分block了(比如异步开线程查了数据库)
,然后faltmap的第saas2,saas3对应的流修改queryResponseAfterAsr中的saasInstance,
从而导致saas1流看到的queryResponseAfterAsr.saasInstance不再最初的saas1。

2 总结

一个好的思维模式:流之间去耦合是必要的,若不同的流有相同的依赖状态,确保每个流拥有它的一个复制。

1 运行pointnet2

https://github.com/yanx27/Pointnet_Pointnet2_pytorch#part-segmentation-shapenet

下载s3dis数据:
https://docs.google.com/forms/d/e/1FAIpQLScDimvNMCGhy_rmBA2gHfDu3naktRm6A8BPwAWWDv-Uhm6Shw/viewform?c=0&w=1

2 准备自己的测试数据

思路:将自己的一个obj模型,包装成为s3dis的一样的格式

方式如下:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
# 1 准备原始数据:
目录结构如下:smallRoom.txt的每一行为 x y z r g b, floor_1.txt 是 smallRoom.txt的复制
nash5@gas:~/prjs/Pointnet_Pointnet2_pytorch/data/s3dis$ tree ./Stanford3dDataset_v1.2_Aligned_Version
./Stanford3dDataset_v1.2_Aligned_Version
└── Area_5
└── office_1
├── Annotations
│   └── floor_1.txt
└── smallRoom.txt

# 2 使用s3dis脚本生成测试数据:(你需要修改对应的meta文件,只留下一行)
nash5@gas:~/prjs/Pointnet_Pointnet2_pytorch/data/s3dis$ cat ../../data_utils/meta/anno_paths.txt
Area_5/office_1/Annotations
nash5@gas:~/prjs/Pointnet_Pointnet2_pytorch/data/s3dis$

然后:
cd data_utils
python collect_indoor3d_data.py

之后会在data目录下生产stanford_indoor3d目录,将其移动到
3dis下面:
nash5@gas:~/prjs/Pointnet_Pointnet2_pytorch/data/s3dis$ ls ..
modelnet40_normal_resampled s3dis shapenetcore_partanno_segmentation_benchmark_v0_normal stanford_indoor3d_ori
myData s3dis_ori
nash5@gas:~/prjs/Pointnet_Pointnet2_pytorch/data/s3dis$ ls
Stanford3dDataset_v1.2_Aligned_Version stanford_indoor3d

# 3 运行测试脚本:(你需要改动batch_size,1080可以设置为16),然后你会在log里的几层嵌套中找到这个
nash5@gas:~/prjs/Pointnet_Pointnet2_pytorch$ python test_semseg.py --log_dir pointnet2_sem_seg --test_area 5 --visual --batch_size 16
ay(total_correct_class_tmp) / (np.array(total_iou_deno_class_tmp, dtype=np.float) + 1e-6)
[0. 0.03883974 0. 0. 0. 0.
0. 0. 0. 0. 0. 0.
0. ]
Mean IoU of Area_5_office_1: 0.0388
----------------------------
test_semseg.py:185: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.
Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations
IoU = np.array(total_correct_class) / (np.array(total_iou_deno_class, dtype=np.float) + 1e-6)
test_semseg.py:190: RuntimeWarning: invalid value encountered in true_divide
total_correct_class[l] / float(total_iou_deno_class[l]))
------- IoU --------
class ceiling , IoU: 0.000
class floor , IoU: 0.039
class wall , IoU: 0.000
class beam , IoU: 0.000
class column , IoU: 0.000
class window , IoU: 0.000
class door , IoU: 0.000
class table , IoU: nan
class chair , IoU: 0.000
class sofa , IoU: 0.000
class bookcase , IoU: 0.000
class board , IoU: 0.000
class clutter , IoU: 0.000

eval point avg class IoU: 0.002988
test_semseg.py:194: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.
Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations
np.mean(np.array(total_correct_class) / (np.array(total_seen_class, dtype=np.float) + 1e-6))))
eval whole scene point avg class acc: 0.002988
eval whole scene petails and guidance: https://numpy.org/devdocs/release

# 4 查看结果: 用meshlab看那个pred.obj
nash5@gas:~/prjs/Pointnet_Pointnet2_pytorch$ ls log/sem_seg/pointnet2_sem_seg/visual/Area_5_office_1
Area_5_office_1_gt.obj Area_5_office_1_pred.obj Area_5_office_1.txt

1 滑动窗口

priority_queue经常用 或者st、ed

2 例题:

1425constrainedSubsetSum 带限制最大子序列和

1 题目

https://leetcode.cn/problems/constrained-subsequence-sum/

2 解题思路

  • 1 解题思路:
    • 1.1 dp[i]表示以第i个数据结尾的带限制最大子序列和
    • 1.2 dp[i] = nums[i] + max(0, dp[i-k], dp[i-k+1], …, dp[i-1])
    • 1.3 如何在i-1到i-k的数的集合中快速找到最大的满足限制要求的k?使用堆即可,只需要找到第一个满足要求的下标对应的值即可,不满足要求的下标可以pop掉(因为如果现在都无法满足要求,那么后面更加无法满足要求)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
class Solution {
public:
using pii = pair<int, int>;


int constrainedSubsetSum(vector<int>& nums, int k) {
// Let dp[i] be the solution for the prefix of the array that ends at index i,
// if the element at index i is in the subsequence.
// dp[i] = nums[i] + max(0, dp[i-k], dp[i-k+1], ..., dp[i-1])

int n = nums.size();
vector<int> dp(n, 0);
dp[0] = nums[0];
int curRes = dp[0];

// <idx, value>
auto cmp = [](const pii& a, const pii& b) {
return a.second < b.second;
};
priority_queue<pii, vector<pii>, decltype(cmp)> maxHeap(cmp);

maxHeap.push({0, dp[0]});

auto findInWindow = [](int minIdx, decltype(maxHeap)& window) {
bool foundInWin = false;
while(!window.empty()) {
auto node = window.top();
if(minIdx <= node.first) {
// window.pop();
return max(0, node.second);
} else {
window.pop();
}
}
return 0;
};

for(int i = 1; i < n; ++i) {
dp[i] = nums[i] + findInWindow(max(0, i - k), maxHeap);
curRes = max(curRes, dp[i]);
// cout << i << "-> " << dp[i] << endl;
maxHeap.push({i, dp[i]});
}
return curRes;
}
}

1499findMaxOfEquationInVec 满足不等式的最大值

1 题目

https://leetcode.cn/problems/max-value-of-equation/

2 解题思路

  • 1 解题思路:
    • 1.1 首先能想到的是,对于数字point[i]的xy,我们可以维护一个window,其中放了所有xi,yi,满足|xi - x| <= k,然后我们遍历这个窗口的所有值,找到一个答案,但是这样复杂度是nk有点大了
    • 1.2 我们考虑一下,是不是需要考虑point[i]的左右距离为k的值?不需要,因为上面显然存在重复计算,比如x坐标为1,3,5的三个点,k = 100好了,对于1,考虑了3,5,对于3考虑了1,5,然而13的组合在x=1和这个点已经考虑过了,所以我们可以只考虑坐标的左半边或者右半边,我们以考虑左半边为例子
    • 1.3 由于是左半边,我们遍历的点称之为xj,那么xi就是左边的所有点,我们如何快速拿到结果呢?
      • res = for i < j && xj - xi <= k: max(yi + yj + |xi - xj|) = max(yj + xj + yi - xi),去掉了绝对值
      • 然后可以看出来对于一个j,不就是求它左边窗口(窗口内的值需要满足:xj - xi <= k)中yi - xi的最大值吗?那不就是用priority_queue存起来就行了
      • 注意一点的是:当窗口中的xi, xj - xi > k可以放心pop,因为若当前的j都距离xi太远了,那后面的只会更加的遥远,于是可以pop
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
class Solution {
public:

using pii = pair<int, int>;

static constexpr auto cmp = [](const pii& a, const pii& b) {
return a.second < b.second;
};

int findMaxValueOfEquation(vector<vector<int>>& points, int k) {
// first: yi + yj + |xi - xj| == yj + xj + yi - xi
// so: for each j, we shall found the biggest yi - xi and |xj - xi| <= k
// using a priority_queue to store those yi - xi

int n = points.size();
// priority_queue<pii, vector<pii>, decltype(cmp)> maxHeapOri(cmp);
priority_queue<pii, vector<pii>, decltype(cmp)> maxHeap(cmp);
maxHeap.push({points[0][0], points[0][1] - points[0][0]}); // init it

int curRes = INT_MIN;
for(int j = 1; j < n; ++j) {
// |xj - xi| <= k, we should consider xj - xi <= k or xi - xj <= k
// but we can only consider those xj > xi, cause, when we try to consider xj < xi,
// the bigger J after j will be like xi, so all cases coverd
int xj = points[j][0];
int yj = points[j][1];
// cout << "dealing: " << xj << " " << yj << endl;
// auto maxHeap = maxHeapOri;
while(1) {
if(maxHeap.empty()) {
break;
}

auto t = maxHeap.top();
int xi = t.first;
int delatI = t.second;
if(xi != xj ) {
if(xj - xi > k) {
// cout << "pop: " << xi << endl;
maxHeap.pop();
} else {
curRes = max(curRes, yj + xj + delatI);
break;
}
} else {
break;
}
}
maxHeap.push({points[j][0], points[j][1] - points[j][0]});
}
return curRes;

}
}

1610visiblePoints 可见顶点的最大数目

1 题目

https://leetcode.cn/problems/maximum-number-of-visible-points/

2 解题思路

  • 1 解题思路:
    • 1.1 算出极角,然后排序,然后用一个窗口,范围是angle,从最小移动到最大即可
    • 1.2 注意循环:比如-179和179这两个数字爱得很近,所以我们把排序好的点的极角加上360加入到点的数组中,滑动窗口就行
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
class Solution {
public:
#define PI 3.14159265

using Point = pair<pair<int, int>, double>;

int visiblePoints(vector<vector<int>>& points, int angle, vector<int>& location) {

vector<Point> calPoints;
int n = points.size();
int dupWithOriCnt = 0;
// start form the negx: from -179.999 to 180
auto calDegreeForPoints = [&](vector<int>& xy, vector<Point>& calPoints) {
int x = xy[0] - location[0];
int y = xy[1] - location[1];
if(0 == x && 0 == y) {
dupWithOriCnt += 1;
return;
}
double result;
result = atan2 (y, x) * 180 / PI;
calPoints.emplace_back(Point{{x, y}, result});
};

for(auto& p : points) {
calDegreeForPoints(p, calPoints);
}

// all duplicated
if(0 == calPoints.size()) {
return dupWithOriCnt;
}

// sort by the angle
sort(calPoints.begin(), calPoints.end(), [](const Point& a, const Point& b) {
return a.second < b.second;
});
for(int i = 0; i < n; ++i) { // solve the circle problem
auto p = calPoints[i];
p.second += 360.0;
calPoints.push_back(p);
}

// init window
int st = 0;
int ed = 0;

double dAngle = angle;

while(ed < calPoints.size() && calPoints[ed].second - calPoints[st].second <= dAngle) {
++ed;
}
--ed;
int finRes = ed - st + 1;

// mv window
while(ed < calPoints.size()) {
// mv st
++st;
while(ed < calPoints.size() && calPoints[ed].second - calPoints[st].second <= dAngle) {
++ed;
}
if(ed == calPoints.size()) {
finRes = max(finRes, static_cast<int>(calPoints.size()) - st);
} else {
--ed;
finRes = max(finRes, ed - st + 1);
}

}

return finRes + dupWithOriCnt;
}
}

2302coountSunarrysScoreLessThanK 统计得分小于 K 的子数组数目

1 题目

https://leetcode.cn/problems/count-subarrays-with-score-less-than-k/

2 解题思路

  • 1 解题思路:
    • 1.1 使用st,ed记录当前窗口,将窗口扩展至最大的满足分数小于k的条件
    • 1.2 ++st,找到下一个st
    • 1.3 对于每个窗口如何计数:
      • 计数:以st开始的区间
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
class Solution {
public:
long long countSubarrays(vector<int>& nums, long long k) {
// alway keep st,ed as the max status:
// score(st, ed) <= k, ed cannot be bigger for each st
// then we statistic for each st, how many ways start with st

// moving window score = getScore(nums[st:ed])
long long st = 0;
long long ed = 0;
long long n = nums.size();
long long curScore = 0;
long long cnt = 0;
while(st < n) {
bool edMoved = false;
while(ed < n && (curScore + nums[ed])*(ed - st + 1) < k) {
curScore += nums[ed];
++ed;
edMoved = true;
}

// add cnt start with cur st
if(curScore*(ed - st) < k) {
cnt += ed - st;
}

curScore -= nums[st];
++st;
}
return cnt;
}
};

0 回溯

回溯的思想在于回这个字,以最经典的八皇后作为例子:

1
2
3
4
5
backTrack(当前层数,棋盘):
for: 当前层的每个位置
放置一个棋子到这个位置
backTrack(下一层, 棋盘) // 深入到下一层
取消将棋子放到这个位置 // 回溯,以便于尝试当前层的下一个位置

当然,回溯也有当前搜索的解空间的情况,比如对于棋盘就是目前已经放置的所有的位置,可以使用记忆化搜锁
下面的题目中0698就使用了

1 例题:

0698canPartitionKSubsets 可以划分为k个等和子集

1 题目

https://leetcode.cn/problems/partition-to-k-equal-sum-subsets/

2 解题思路

  • 1 解题思路:

    • 1.1 首先,我们之前的回溯都是最多一个搜索目标,比如8皇后,那就是一个棋盘作为搜索目标,比如划分成2个等和子集,那就是搜索一个子集的值为sum/2,那么这种多目标如何考虑?其实就是,这样想,多个等和子集对吧?那不就是对于数组的每一个数组,在0到k-1号集合里面嘛,这样去回溯就好了
    • 1.2 那么需要写成: 每一个数字在每一个桶里面的情况吗?不需要,为啥?因为对于第i个数字,我们会尝试每一个桶,写成双层for循环智慧带来多余的计算:
    • 1.3 什么时候能进入下一层?当前桶能够放下没被用过的最小的数字,那么没被用过的数字是否需要标记,不需要,为什么,因为我们回溯的时候,每个数字就是下一层,然后每一层不过有4个选择罢了
    • 1.4 考虑使用记忆化加速?
      • 比如:当nums[i]放入通bucket[buc],失败了,那么后面所有桶值为bucket[buc],那么就不再去搜索就行
      • 那么需要注意:这是考虑在所有nums[0 : i-1]都相同的情况下才能用这个记忆,一旦第i号所有搜索都失败了,那么我们需要将memo[i]清除,以便后续操作
  • 2 1.2的时机例子如下:典型的多余的for循环:1取到1,2取到2,那么当最外层的2取到2,1又取到1,那不就是重复计算了嘛

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    // for every number, try bucket from 0 to k-1
    void backTrack(vector<int>& nums, vector<int>& buckets, int tar) {
    if(fin) {
    return ;
    }

    bool tmp = true;
    for(auto& buc : buckets) {
    tmp = tmp && (buc == tar);
    }
    if(tmp) {
    fin = true;
    }

    for(int i = 0; i < nums.size(); ++i) {
    if(!used[i]) {
    for(int buc = 0; buc < buckets.size(); ++buc) {
    buckets[buc] += nums[i]; // put it into buc
    used[i] = true;
    if(buckets[buc] <= tar) {
    backTrack(nums, buckets, tar);
    }
    buckets[buc] -= nums[i];
    used[i] = false;
    }
    }
    }

    }

标准的带有返回值的写法:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
class Solution {
public:
vector<int> used;
bool canPartitionKSubsets(vector<int>& nums, int k) {
vector<int> buckets(k, 0);
used.resize(nums.size(), false);

sort(nums.rbegin(), nums.rend());

int sum = accumulate(nums.begin(), nums.end(), 0);
if(sum % k != 0) {
return false;
}

int tar = sum / k;
if(nums[0] > tar) {
return 0;
}

vector<unordered_set<int>> memo(nums.size()); // for node[i], memo[i][j]means: node[i] put into a bucket with value j will failed

return backTrack(nums, buckets, 0, tar, memo);
}

// for every number, try bucket from 0 to k-1
bool backTrack(vector<int>& nums, vector<int>& buckets, int st, int tar, vector<unordered_set<int>>& memo) {

// 所有数字都放进去了,成功了
if(st == nums.size()) {
return true;
}

for(int buc = 0; buc < buckets.size(); ++buc) {
if(0 == memo[st].count(buckets[buc])) {
// if(buckets[i]) {
buckets[buc] += nums[st]; // put it into buc
// used[st] = true;
if(buckets[buc] == tar || (tar - buckets[buc] >= nums.back())) {
// cout << "to buc: " << buc << endl;
if(backTrack(nums, buckets, st + 1, tar, memo)) {
return true; // 提前返回,因为已经找到一个结果
}
}
buckets[buc] -= nums[st];

// 如果nums[st] 放入 buckets[buc]失败了,那么后面值相同的桶都会失败,就不用放了
memo[st].insert(buckets[buc]);
}
}

// 说明放错桶的不是st,而是nums[0 : st-1]中的某个,于是把memo清除掉
memo[st].clear();

return false;
}
}

我写的回溯不喜欢带返回值,后面是带有返回值的:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
class Solution {
public:
vector<int> used;
bool fin = false;
bool canPartitionKSubsets(vector<int>& nums, int k) {
vector<int> buckets(k, 0);
used.resize(nums.size(), false);

sort(nums.rbegin(), nums.rend());

int sum = accumulate(nums.begin(), nums.end(), 0);
if(sum % k != 0) {
return false;
}

int tar = sum / k;
if(nums[0] > tar) {
return 0;
}

int useCnt = 0;

vector<unordered_set<int>> memo(nums.size());
backTrack(nums, buckets, 0, tar, useCnt, memo);

return fin;
}

// for every number, try bucket from 0 to k-1
void backTrack(vector<int>& nums, vector<int>& buckets, int st, int tar, int& useCnt, vector<unordered_set<int>>& memo) {
if(fin) {
return ;
}

if(useCnt == nums.size()) {
bool tmp = true;
for(auto& buc : buckets) {
tmp = tmp && (buc == tar);
}
if(tmp) {
fin = true;
return;
}
}

if(useCnt == nums.size()) {
return; // return true就行
}


for(int buc = 0; buc < buckets.size(); ++buc) {
if(0 == memo[st].count(buckets[buc])) {
buckets[buc] += nums[st]; // put it into buc
++useCnt;
if(buckets[buc] == tar || (tar - buckets[buc] >= nums.back())) {
// cout << "to buc: " << buc << endl;
backTrack(nums, buckets, st + 1, tar, useCnt, memo);
}

buckets[buc] -= nums[st];

// 如果nums[st] 放入 buckets[buc]失败了,那么后面值相同的桶都会失败,就不用放了
memo[st].insert(buckets[buc]);

--useCnt;
}
}

// 说明放错桶的不是st,而是nums[0 : st-1]中的某个,于是把memo清除掉
memo[st].clear();
}
}

0980differentPath3 不同路径3

1 题目

https://leetcode.cn/problems/unique-paths-iii/

2 解题思路

  • 1 解题思路:
    • 回溯,就暴力搜索就行,只需要考虑两个问题:
    • 什么时候进入下一个位置?没有出界,没有被访问过,不是障碍物
    • 什么时候返回?如果到edXY的时候,已经把所有0访问过,则认为找到一个方案,返回1,如果到edXY访问0的个数没达到总个数,则认为没找到,则返回0,最后将所有的返回值加起来就行
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
class Solution {
public:
int m,n;
int dx[4] = {1, 0, -1, 0};
int dy[4] = {0, 1, 0, -1};
int tarSum = 0;
int edX, edY;

int backTrack(vector<vector<int>>& grid, vector<vector<bool>>& vis, int x, int y, int curSum) {
if(x == edX && y == edY) {
// cout << "yes: " << curSum << endl;
if(curSum == tarSum) {
return 1;
} else {
return 0;
}
}

int ways = 0;
for(int mv = 0; mv < 4; ++mv) {
int nextX = x + dx[mv];
int nextY = y + dy[mv];
if(0 <= nextX && nextX < m && 0 <= nextY && nextY < n && \
!vis[nextX][nextY] && \
-1 != grid[nextX][nextY]
) {
vis[nextX][nextY] = true;
ways += backTrack(grid, vis, nextX, nextY, curSum + 1);
vis[nextX][nextY] = false;
}
}

return ways;
}


int uniquePathsIII(vector<vector<int>>& grid) {
m = grid.size();
n = grid[0].size();

// just backTracking all
vector<vector<bool>> vis(m, vector<bool>(n, false));

int x, y;
for(int i = 0; i < m; ++i) {
for(int j = 0; j < n; ++j) {
if(1 == grid[i][j]) {
x = i; y = j;
} else if(2 == grid[i][j]) {
edX = i; edY = j;
++tarSum;
} else {
tarSum += (grid[i][j] == 0);
}
}
}
// cout << tarSum << endl;

int curSum = 0;
vis[x][y] = true;
return backTrack(grid, vis, x, y, curSum);
}
};

0996numSquarefulPermutations 正方形数组的排列个数

1 题目

https://leetcode.cn/problems/number-of-squareful-arrays/

2 解题思路

  • 1 解题思路:
    • 回溯获取全排列,backTrack带st参数,表示正在放第st个数字,注意此时nums[:st-1]是已经放好的排列,只需要在nums[st:]后半部分填充到st位置即可,也就是交换一下
    • 剪枝:若nums[st]和nums[st-1]的和不是完全平方数,则直接不进入下一个st
    • 记忆化搜索:考虑例子:2 2 2 2 2 2 2,使用memo[st][num],表示,在当前nums[:st-1]作为排列前缀的时候,num值放在st是否被搜索过,搜索过则标记一下,下一次不搜了
      • 需要注意一点:memo[st][num],表示在当前nums[:st-1]作为排列前缀在st放值时刻的num值的搜索情况,所以当st位置所有num值搜索完成,要返回进入下一个排列前缀搜索之前,记得clear掉,这个和0698这道题很像
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
class Solution {
public:
void swap(int& a, int& b) {
int c = a;
a = b;
b = c;
}

bool judge(double tar) {
double x = sqrt(tar);
if(floor(x) == x) {
return true;
}
return false;
}

void print(vector<int>& vec) {
for(auto i : vec) {
cout << i <<" ";
}cout <<"\n";
}

unordered_set<string> rmDup;

// memo thah: cal[st][num] in pos st, whethter num has been visited
unordered_map<int, unordered_map<int, bool>> cal;

// st means : we are choosing a num for nums[st]
// and we should be sure that: nums[st] and nums[st-1] is able to sqrt
int permutationCheck(vector<int>& nums, int st) {
if(st == nums.size()) {
string tmp = "";
for(auto& i : nums) {
tmp += to_string(i);
}
if(rmDup.insert(tmp).second) {
return 1;
}
return 0;
}

int res = 0;
for(int i = st; i < nums.size(); ++i) {
swap(nums[i], nums[st]);
// cout << "level: " << st << " ";
// print(nums);
if(st == 0) {
if(!cal[st][nums[st]]) {
res += permutationCheck(nums, st + 1);
cal[st][nums[st]] = true;
}
} else {
if(judge(nums[st-1] + nums[st])) {
if(!cal[st][nums[st]]) {
res += permutationCheck(nums, st + 1);
cal[st][nums[st]] = true;
}
}
}
swap(nums[i], nums[st]);
}

// when return, the permutated prefix: nums[:st-1] will change, so we clear the memo
cal[st].clear();
return res;
}

int numSquarefulPerms(vector<int>& nums) {
int n = nums.size();
if(1 == n) {
return judge(nums.front());
}

// all sequence(permutation) and check each
return permutationCheck(nums, 0);
}
};

1240tilingRect 铺瓷砖

1 题目

https://leetcode.cn/problems/tiling-a-rectangle-with-the-fewest-squares/

2 解题思路

  • 1 解题思路:
    • 每一次从左上角,找出来目前能够铺的最大瓷砖,然后对于该位置,从最大瓷砖开始铺,铺好了进入下一层回溯,当下一层回溯完毕,对于当前位置,我们不再使用最大瓷砖,而使用长度小1的瓷砖,直到长度减小到1,对于每个起始位置,我们都从最大长度开始回溯,然后长度递减去回溯
    • 剪枝:当当前贴的瓷砖个数已经超过目前的值,我们直接return; 为什么要这个剪枝?比如一个10X10的瓷砖,一开始第一次就能铺满,那么等回溯到最后面都是1x1的瓷砖在贴,很慢很慢,那么就可以用这个已经得到的结果1,去提前返回,加速了回溯过程;就是如果无法获取更优结果就提前返回,在面试题17.25中也出现过
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
class Solution {
public:
vector<vector<bool>> board; // top left is 0, 0
int n, m; // n colums, m rows

int finRes = INT_MAX;

// start from left top
pair<int, pair<int, int>> findPos(vector<vector<bool>>& board) {
int x, y;
for(int i = 0; i < m; ++i) {
for(int j = 0; j < n; ++j) {
if(!board[i][j]) {
x = i; y = j;
// find len:
int u = x;
while(u < m && !board[u][y]) {
++u;
}
int len = u - x;

int v = y;
while(v < n && !board[x][v]) {
++v;
}
len = min(len, v - y);

return {len, {x, y}};
}
}
}

return {-1, {-1, -1}};
}

void fillSquareWith(vector<vector<bool>>& board, int x, int y, int len, bool content) {
for(int i = x; i < x + len; ++i) {
for(int j = y; j < y + len; ++j) {
board[i][j] = content;
}
}
}

void backTrack(vector<vector<bool>>& board, int& leftCnt, int curRes) {
if(finRes <= curRes) {
return ;
}
if(0 == leftCnt) {
finRes = min(finRes, curRes);
return ;
}

// find first zero pos along the boarder, or the next 1
auto p = findPos(board);
int len = p.first;
int x = p.second.first, y = p.second.second;
// cout << "found max len: " << len << " x/y " << x << " " << y << endl;

for(int l = len; l > 0; --l) {
fillSquareWith(board, x, y, l, true);
leftCnt -= l*l;

// cout <<"ready to fill: x/y/l: " << x << "/" << y << "/" << l << "/" << "left: " << leftCnt << endl;
// print(board);
backTrack(board, leftCnt, curRes + 1);

leftCnt += l*l;
fillSquareWith(board, x, y, l, false);
}

}

int tilingRectangle(int n, int m) {
this->n = m;
this->m = n;
// cout << "this: " << this->n << " " << this->m << endl;
board.resize(n, vector<bool>(m, 0));
int leftCnt = n*m;
int curRes = 0;
backTrack(board, leftCnt, curRes);
return finRes;
}

void print(vector<vector<bool>>& board) {
for(auto v : board) {
for(auto i : v) {
cout << i << " ";
}cout << "\n";
}cout << "\n ---------- ";

}
}

1255maxScoreWords 最大单词分数从字母集合中

1 题目

https://leetcode.cn/problems/maximum-score-words-formed-by-letters/

2 解题思路

  • 1 解题思路:
    • 1.1 很显然:我们可以遍历所有的单词的集合,就是用或者不用,可以用状态压缩,也可以使用回溯
    • 1.2 这里我们使用回溯,主要就是:首先用,然后我们能得到一个分数,然后不用,我们也能得到一个分数,我们两者取最大值,记得在用了之后将其回溯,也就是把用掉的单词对应的字母再加回来
    • 1.3 返回结果:很显然,就是所有的单词都考虑到的时候返回0,就是对于编号st,每下一层回溯会让st++,那么等st和单词列表个数相等就返回就行
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
class Solution {
public:
vector<int> wordScore;

void printPool(unordered_map<char, int>& leftCharPool) {
for(auto& [c, cnt] : leftCharPool) {
cout << c << " " << cnt << endl;
}
cout << "---\n";
}

bool addOrRemove(string& word, unordered_map<char, int>& leftCharPool, bool add) {
// cout << "add/rm: " << word << endl;
if(add) {
for(auto& c : word) {
++leftCharPool[c];
}
} else {
unordered_map<char, int> curWord;
for(auto& c : word) {
++curWord[c];
}

for(auto& [c, cnt] : curWord) {
if(leftCharPool[c] < cnt) {
return false;
}
}

printPool(leftCharPool);
// remove the word from the pool
for(auto& [c, cnt] : curWord) {
// cout << "rm " << c << " : " << cnt << endl;
leftCharPool[c] -= cnt;
}
// printPool(leftCharPool);

}
return true;
}

int backTrack(vector<string>& words, int st, unordered_map<char, int>& leftCharPool) {
if(words.size() == st) {
return 0;
}

int curScore = INT_MIN;
// use it
if(addOrRemove(words[st], leftCharPool, false)) {
// cout << "using " << words[st] << endl;
// printPool(leftCharPool);
curScore = max(wordScore[st] + backTrack(words, st + 1, leftCharPool), curScore);
// add it back
addOrRemove(words[st], leftCharPool, true);
}
// not use it
// cout << "not using" << words[st] << endl;
// printPool(leftCharPool);
curScore = max(backTrack(words, st + 1, leftCharPool), curScore);
return curScore;
}

int maxScoreWords(vector<string>& words, vector<char>& letters, vector<int>& score) {
// check all subsets of words using status compression
// you may use status compression, but here we use backTrack
unordered_map<char, int> leftCharPool;
wordScore.resize(words.size(), 0);
int wIdx = 0;
for(auto& w : words) {
for(auto& c : w) {
wordScore[wIdx] += score[c - 'a'];
}
++wIdx;
}

for(auto& c : letters) {
++leftCharPool[c];
}

int st = 0;
return backTrack(words, 0, leftCharPool);
}
};

0000_interview_1725wordMaxRectangle 单词矩阵

1 题目

https://leetcode.cn/problems/word-rectangle-lcci/

2 解题思路

  • 1 解题思路:
    • 1.1 很显然回溯即可:对于所有的同长度的单词,用回溯实现任意的permutation,hori记录放置的单词,vert记录垂直的单词,一单vert构成的前缀不存在,说明当前单词需要从新选择,回溯的深度我们无须考虑,因为vert不成功我们就提前返回了,然而vert成功的最基本的就是,回溯深度不可能超过最长的单词。
    • 1.2 我们从最长的单词出发,这样我们能够尽早的获取更大面积的单词矩阵,然后利用这个面积结果去跳过那些不可能比当前结果大的长度的单词集合,比如,已经发现最大面积为12,然后我们搜索长度为2的单词可能的矩阵,然后最大单词长度为6,我们就不用搜了,因为2为长度的单词集合,最大带来的面积就是2 * 6 = 12,我们就可以直接跳过了。
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
class Solution {
public:
class Trie {
public:
vector<Trie*> curLevel;
bool isEnd = false;
Trie() : curLevel(26, nullptr) {
}

void buildTrie(vector<string>& words) {
for(auto& w : words) {
auto curNode = this;
for(auto& c : w) {
if(nullptr == curNode->curLevel[c - 'a']) {
curNode->curLevel[c - 'a'] = new Trie();
}
curNode = curNode->curLevel[c - 'a'];
}
curNode->isEnd = true;
}
}

bool preInTrie(string& w) {
auto curNode = this;
for(auto& c : w) {
auto nextLevel = curNode->curLevel[c - 'a'];
if(nullptr == nextLevel) {
return false;
}
curNode = nextLevel;
}
return true;
}

bool wordInTrie(string& w) {
auto curNode = this;
for(auto& c : w) {
auto nextLevel = curNode->curLevel[c - 'a'];
if(nullptr == nextLevel) {
return false;
}
curNode = nextLevel;
}
return curNode->isEnd;
}

};

void ableToMakeRes(vector<string>& hori, vector<string>& vert) {
// cout << "> hori.size() " << hori.size() << endl;
if(0 == vert[0].size()) {
// cout << "> vert empty: " << endl;
return;
}

for(auto& v : vert) {
if(!root->wordInTrie(v)) {
// cout << "> word failed: " << v;
return ;
}
}

int curSquare = vert[0].size() * vert.size();
// cout << "> curSquare: " << vert[0].size() * vert.size() << endl;
// cout << "> finSquare: " << finSquare << endl;
if(curSquare > finSquare) {
// cout << "updated!\n";
finSquare = curSquare;
resVec = hori;
}
}

// try all possible permutation of rect with width = w
void backTrack(
vector<string>& curWords,
vector<string>& hori,
vector<string>& vert,
vector<string>& resVec
) {
int width = curWords[0].size();
// cur found square bigger then all possilbe res from curWords
if(finSquare > width * maxWordLen) return;

// check if we obtain a res
ableToMakeRes(hori, vert);

for(auto& word : curWords) {
hori.push_back(word);
int vertCnt = 0;
bool failed = false;
for(int i = 0; i < width; ++i) {
vert[i].push_back(word[i]);
++vertCnt;
if(!root->preInTrie(vert[i])) {
failed = true;
break;
}
}
// cout << "trying: " << word << " | with hori = " << hori.size() << endl;

if(!failed) {
// cout << "choosing: " << word << endl;
// next level
backTrack(curWords, hori, vert, resVec);
}

// backTrack
// cout << "failed on: " << word << endl;
hori.pop_back();
for(int i = 0; i < vertCnt; ++i) {
vert[i].pop_back();
}
}

}
unique_ptr<Trie> root;
int finSquare = INT_MIN;
vector<string> resVec;

int maxWordLen = -1;

vector<string> maxRectangle(vector<string>& words) {
root = make_unique<Trie>();
root->buildTrie(words);

map<int, vector<string>, std::greater<int>> lenWords;
for(auto& w : words) {
lenWords[w.size()].push_back(w);
maxWordLen = max(maxWordLen, static_cast<int>(w.size()));
}

for(auto& [w, curWords] : lenWords) {
vector<string> hori;
vector<string> vert(w, "");
// cout << "w is: " << w <<endl;
backTrack(curWords, hori, vert, resVec);
}
return resVec;
}
};

1 8叉树的实现思路:(ref:)

  • 1 将所有点云初始化为一个节点的8茶树
  • 2 使用partition函数递归partition,其过程如下:需要传入可划分层级
    • 2.0 若可划分层级为0 || 子节点中没有数据 || 子节点个数过少不需要再次划分 ,return
    • 2.1 partition当前节点为8个子节点,将位于子节点点对应的点的下标放入子节点中
    • 2.2 减小level层级,然后调用partition函数

2 具体实现:

  • 1 构造函数:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    BoundaryVolumeHierarchy(const PointCloud<DIMENSION> *pointCloud)
    : Partitioner<DIMENSION>(pointCloud)
    , mRoot(this)
    , mParent(this)
    , mLeaf(true)
    , mLevel(0)
    {
    Rect<DIMENSION> extension = pointCloud->extension();
    mCenter = extension.center(); // 点云中心
    mSize = extension.maxSize() / 2; // 点云最长边长的一半
    mIndices = std::vector<size_t>(pointCloud->size());
    std::iota(mIndices.begin(), mIndices.end(), 0);
    mLeafTable = std::vector<BoundaryVolumeHierarchy<DIMENSION>*>(pointCloud->size(), this); // 表示点云中每个顶点位于8叉树的叶子节点的地址
    }
  • 2 划分函数

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58

    // 建立octree,直到每个叶子节点最多包含minNumPoints
    void partition(size_t levels = 1, size_t minNumPoints = 1, float minSize = 0.0f) override
    {
    if (!isLeaf())
    {
    for (size_t i = 0; i < NUM_CHILDREN; i++)
    {
    if (mChildren[i] != NULL)
    mChildren[i]->partition(levels - 1, minNumPoints, minSize);
    }
    }
    else
    {
    // 递归层次用完了 || 叶子内部点数达到最小要求 || 最多只有一个点了 || 节点的边长小于最小限度
    if (levels <= 0 || mIndices.size() <= minNumPoints || mIndices.size() <= 1 || mSize < minSize) return;
    // create new centers
    Vector newCenters[NUM_CHILDREN];
    calculateNewCenters(newCenters);

    mLeaf = false;

    // create children
    float newSize = mSize / 2;
    for (size_t i = 0; i < NUM_CHILDREN; i++)
    {
    mChildren[i] = NULL;
    }

    // split points
    for (const size_t &index : mIndices)
    {
    // calculate child index comparing position to child center
    // 对于每个点,计算它位于当前节点的哪个子节点?
    size_t childIndex = calculateChildIndex(this->pointCloud()->at(index).position());
    if (mChildren[childIndex] == NULL)
    {
    mChildren[childIndex] = new BoundaryVolumeHierarchy<DIMENSION>(this, newCenters[childIndex], newSize);
    mChildren[childIndex]->mIndices.reserve(mIndices.size());
    }
    // 将这个点压入对应的子节点中
    mChildren[childIndex]->mIndices.push_back(index);
    // update current leaf where point is stored
    mRoot->mLeafTable[index] = mChildren[childIndex];
    }

    mIndices.clear();

    // partition recursively, 将可用层数减少1,将每一个子节点进入下一次细分
    for (size_t i = 0; i < NUM_CHILDREN; i++)
    {
    if (mChildren[i] != NULL) {
    mChildren[i]->partition(levels - 1, minNumPoints, minSize);
    }
    }
    }
    }

3 完整实现 - https://github.com/abnerrjo/PlaneDetection

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
#ifndef BOUNDARYVOLUMEHIERARCHY_H
#define BOUNDARYVOLUMEHIERARCHY_H

#include <algorithm>
#include <set>

#include "partitioner.h"

template <size_t DIMENSION>
class BoundaryVolumeHierarchy : public Partitioner<DIMENSION>
{
public:
typedef typename Point<DIMENSION>::Vector Vector;
static const size_t NUM_CHILDREN = 1 << DIMENSION;

BoundaryVolumeHierarchy(const PointCloud<DIMENSION> *pointCloud)
: Partitioner<DIMENSION>(pointCloud)
, mRoot(this)
, mParent(this)
, mLeaf(true)
, mLevel(0)
{
Rect<DIMENSION> extension = pointCloud->extension();
mCenter = extension.center();
mSize = extension.maxSize() / 2; // 点云最长边长的一半
mIndices = std::vector<size_t>(pointCloud->size());
std::iota(mIndices.begin(), mIndices.end(), 0);
mLeafTable = std::vector<BoundaryVolumeHierarchy<DIMENSION>*>(pointCloud->size(), this);
}

BoundaryVolumeHierarchy(const BoundaryVolumeHierarchy &bvh) = delete;

~BoundaryVolumeHierarchy()
{
if (!isLeaf())
{
for (size_t i = 0; i < NUM_CHILDREN; i++)
{
if (mChildren[i] != NULL)
{
delete mChildren[i];
}
}
}
}

// 建立octree,直到每个叶子节点最多包含minNumPoints
void partition(size_t levels = 1, size_t minNumPoints = 1, float minSize = 0.0f) override
{
if (!isLeaf())
{
for (size_t i = 0; i < NUM_CHILDREN; i++)
{
if (mChildren[i] != NULL)
mChildren[i]->partition(levels - 1, minNumPoints, minSize);
}
}
else
{
// 递归层次用完了 || 叶子内部点数达到最小要求 || 最多只有一个点了 || 节点的边长小于最小限度
if (levels <= 0 || mIndices.size() <= minNumPoints || mIndices.size() <= 1 || mSize < minSize) return;
// create new centers
Vector newCenters[NUM_CHILDREN];
calculateNewCenters(newCenters);

mLeaf = false;

// create children
float newSize = mSize / 2;
for (size_t i = 0; i < NUM_CHILDREN; i++)
{
mChildren[i] = NULL;
}

// split points
for (const size_t &index : mIndices)
{
// calculate child index comparing position to child center
// 对于每个点,计算它位于当前节点的哪个子节点?
size_t childIndex = calculateChildIndex(this->pointCloud()->at(index).position());
if (mChildren[childIndex] == NULL)
{
mChildren[childIndex] = new BoundaryVolumeHierarchy<DIMENSION>(this, newCenters[childIndex], newSize);
mChildren[childIndex]->mIndices.reserve(mIndices.size());
}
// 将这个点压入对应的子节点中
mChildren[childIndex]->mIndices.push_back(index);
// update current leaf where point is stored
mRoot->mLeafTable[index] = mChildren[childIndex];
}

mIndices.clear();

// partition recursively, 将可用层数减少1,将每一个子节点进入下一次细分
for (size_t i = 0; i < NUM_CHILDREN; i++)
{
if (mChildren[i] != NULL) {
mChildren[i]->partition(levels - 1, minNumPoints, minSize);
}
}
}
}

const Partitioner<DIMENSION>* getContainingLeaf(size_t index) const override
{
return mRoot->mLeafTable[index];
}

BoundaryVolumeHierarchy<DIMENSION>* child(size_t index)
{
return mChildren[index];
}

std::vector<const Partitioner<DIMENSION>*> children() const override
{
if (isLeaf()) return std::vector<const Partitioner<DIMENSION>*>();
std::vector<const Partitioner<DIMENSION>*> children;
for (size_t i = 0; i < NUM_CHILDREN; i++)
{
if (mChildren[i] != NULL)
children.push_back(mChildren[i]);
}
return children;
}

const Partitioner<DIMENSION>* parent() const override
{
return mParent;
}

const Vector& center() const
{
return mCenter;
}

float cellSize() const
{
return mSize;
}

bool isRoot() const override
{
return this == mRoot;
}

bool isLeaf() const override
{
return mLeaf;
}

size_t octreeLevel() const
{
return mLevel;
}

size_t numPoints() const override
{
if (isLeaf())
{
return mIndices.size();
}
else
{
size_t numPoints = 0;
for (size_t i = 0; i < NUM_CHILDREN; i++)
{
if (mChildren[i] != NULL)
numPoints += mChildren[i]->numPoints();
}
return numPoints;
}
}

Rect<DIMENSION> extension() const override
{
return Rect<DIMENSION>(mCenter - Vector::Constant(mSize),
mCenter + Vector::Constant(mSize));
}

std::vector<size_t> points() const override
{
if (isLeaf())
{
return mIndices;
}
else
{
std::vector<size_t> points;
for (size_t i = 0; i < NUM_CHILDREN; i++)
{
if (mChildren[i] != NULL)
{
std::vector<size_t> childPoints = mChildren[i]->points();
points.insert(points.end(), childPoints.begin(), childPoints.end());
}
}
return points;
}
}

void getNeighborCells(std::vector<BoundaryVolumeHierarchy<DIMENSION>*> &neighbors)
{
BoundaryVolumeHierarchy<DIMENSION>* neighbor;
for (size_t i = 0; i < DIMENSION; i++)
{
neighbor = getNeighborCellsGreaterOrEqual(i, false);
getNeighborCellsSmaller(i, false, neighbor, neighbors);
neighbor = getNeighborCellsGreaterOrEqual(i, true);
getNeighborCellsSmaller(i, true, neighbor, neighbors);
}
}

private:
BoundaryVolumeHierarchy *mRoot;
BoundaryVolumeHierarchy *mParent;
BoundaryVolumeHierarchy *mChildren[NUM_CHILDREN];
Vector mCenter;
float mSize;
bool mLeaf;
size_t mLevel;
std::vector<size_t> mIndices;
std::vector<BoundaryVolumeHierarchy<DIMENSION>*> mLeafTable;

BoundaryVolumeHierarchy(BoundaryVolumeHierarchy *parent, const Vector &center, float size)
: Partitioner<DIMENSION>(parent->pointCloud())
, mRoot(parent->mRoot)
, mParent(parent)
, mCenter(center)
, mSize(size)
, mLeaf(true)
, mLevel(parent->mLevel + 1)
{
for (size_t i = 0; i < NUM_CHILDREN; i++)
{
mChildren[i] = NULL;
}
}

void calculateNewCenters(Vector centers[NUM_CHILDREN])
{
float newSize = mSize / 2;
for (size_t dim = 0; dim < DIMENSION; dim++)
{
for (size_t i = 0; i < NUM_CHILDREN; i++)
{
int signal = (((i & (1 << (DIMENSION - dim - 1))) >> (DIMENSION - dim - 1)) << 1) - 1;
centers[i](dim) = mCenter(dim) + newSize * signal;
}
}
}

size_t calculateChildIndex(const Vector &position)
{
size_t childIndex = 0;
for (size_t dim = 0; dim < DIMENSION; dim++)
{
childIndex |= (position(dim) > mCenter(dim)) << (DIMENSION - dim - 1);
}
return childIndex;
}

size_t getIndexOnParent()
{
for (size_t i = 0; i < NUM_CHILDREN; i++)
{
if (mParent->mChildren[i] == this)
{
return i;
}
}
throw "Could not find node on parent";
}

size_t getIncrement(size_t direction, bool greaterThan)
{
int increment = 1 << (DIMENSION - direction - 1);
if (!greaterThan)
{
increment = NUM_CHILDREN - increment;
}
return increment;
}

BoundaryVolumeHierarchy<DIMENSION>* getNeighborCellsGreaterOrEqual(size_t direction, bool greaterThan)
{
if (isRoot())
{
return NULL;
}
else if (greaterThan && mParent->mCenter(direction) > mCenter(direction) ||
!greaterThan && mParent->mCenter(direction) < mCenter(direction))
{
size_t index = getIndexOnParent();
size_t increment = getIncrement(direction, greaterThan);
return mParent->mChildren[(index + increment) % NUM_CHILDREN];
}
BoundaryVolumeHierarchy<DIMENSION>* node = mParent->getNeighborCellsGreaterOrEqual(direction, greaterThan);
if (node == NULL || node->isLeaf())
{
return node;
}
size_t index = getIndexOnParent();
size_t increment = getIncrement(direction, !greaterThan);
return node->mChildren[(index + increment) % NUM_CHILDREN];
}

void getNeighborCellsSmaller(size_t direction, bool greaterThan, BoundaryVolumeHierarchy<DIMENSION>* neighbor,
std::vector<BoundaryVolumeHierarchy<DIMENSION>*> &neighbors)
{
std::queue<BoundaryVolumeHierarchy<DIMENSION>*> candidates;
if (neighbor != NULL) candidates.push(neighbor);
while (candidates.size() > 0)
{
BoundaryVolumeHierarchy<DIMENSION>* front = candidates.front();
candidates.pop();
if (front->isLeaf())
{
neighbors.push_back(front);
}
else
{
for (size_t i = 0; i < NUM_CHILDREN; i++)
{
if (front->mChildren[i] == NULL) continue;
if ((greaterThan && front->mCenter(direction) > front->mChildren[i]->mCenter(direction)) ||
(!greaterThan && front->mCenter(direction) < front->mChildren[i]->mCenter(direction)))
{
candidates.push(front->mChildren[i]);
}
}
}
}
}

};

template class BoundaryVolumeHierarchy<2>;
template class BoundaryVolumeHierarchy<3>;

typedef BoundaryVolumeHierarchy<2> Quadtree;
typedef BoundaryVolumeHierarchy<3> Octree;

#endif // BOUNDARYVOLUMEHIERARCHY_H

1 主流平面检测算法:

1.1 HoughTransform算法

  • 1 对于每个点:xyz,使用点法式表示过该点的平面:np + d = 0; n为法向,n可以用极坐标写成θ φ的向量,那么平面由三个变量可以确定
  • 2 对于点xyz,我们离散遍历所有的θ φ,可以得到对应的d,那么我们以θ φ作为xy坐标,d作为z坐标,可以画出来一个曲面(曲面上一个点代表一个经过xyz的平面),代表所有可能经过xyz的平面
  • 3 然后对于所有其他点,我们同样可以找到对应的曲面,我们求出来最多曲面参与的焦点,就视为检测出来的平面
  • 4 复杂度:o(n * num(θ) * num(φ)),后面两个分别是离散遍历θ φ的个数

1.2 RA(andom)SA(mple)C(onsensus)算法

  • 1 选取一小集合,估计出来一个平面(比如说随机的三个点)
  • 2 然后评估能被这个平面拟合的点的个数,也就是inlier的个数
  • 3 在1,2中评估出来的所有的初始集合中,选择inlier数最大的那个对应的平面,即为最终结果
  • 4 复杂度:o(I) * o(n) = o(In), I是选出初始集合的个数,n是点的个数,因为你需要判断某个初始拟合出来的平面的inlier的个数不就是去遍历所有的点嘛

1.3 region growing 算法

A Robust Statistics Approach for Plane Detection in Unorganized Point Clouds算法详解:

2 A Robust Statistics Approach for Plane Detection in Unorganized Point Clouds算法详解

2.1 平面检测

2.1.1 划分点云(split phase)

大致分为如下几个步骤:

  • 1 递归将点云切分成8叉树
  • 2 对于足够小的节点(比如至少是第3层的8叉树),进行planar patch(平面patch)探测
    • 2.1 如果当前节点能够形成平面,对plannarPatch滤除外点,然后加入到结果中
      具体实现如下代码:
      1
      2
      3
      4
      5
      6
      7
      8
      9
      10
      11
      12
      13
      14
      15
      16
      17
      18
      19
      20
      21
      22
      23
      24
      25
      26
      27
      bool PlaneDetector::detectPlanarPatches(Octree *node, StatisticsUtils *statistics, size_t minNumPoints, std::vector<PlanarPatch*> &patches)
      {
      if (node->numPoints() < minNumPoints) return false;
      node->partition(1, minNumPoints); // 将当前节点划分为更细节点
      bool hasPlanarPatch = false;
      for (size_t i = 0; i < 8; i++)
      {
      if (node->child(i) != NULL && detectPlanarPatches(node->child(i), statistics, minNumPoints, patches)) // 对每各子节点进行patch探测
      {
      hasPlanarPatch = true;
      }
      }
      if (!hasPlanarPatch && node->octreeLevel() > 2) // 足够小的子节点并且没有patch
      {
      PlanarPatch *patch = new PlanarPatch(pointCloud(), statistics, node->points(), mMinNormalDiff, mMaxDist, mOutlierRatio);
      if (patch->isPlanar()) // 如果patch构成平面,则滤除外点
      {
      patches.push_back(patch); // 将当前patch加入到探测出来的所有patch
      hasPlanarPatch = true;
      }
      else
      {
      delete patch;
      }
      }
      return hasPlanarPatch;
      }

2.1.2 路棒的平面性测试

那么对于足够小的节点进行palnar检测是如何做的呢?(也就是robust planarity test)
pca会受到外点影响,然后robust pca则太慢了,所以:

  • 1
  • 2
  • 3

0 图常见算法

并查集就不提了,算连通性的

dijstra,单元点最短路径

tarjan算法:

trajan 塔杨算法 求割点(关键点),割边

1192criticalConnections 查找集群内的「关键连接」

1 题目

https://leetcode.cn/problems/critical-connections-in-a-network/

2 解题思路

  • 1 解题思路:使用tarjan算法:求割点和割边
    • 算法实现参考:https://www.cnblogs.com/collectionne/p/6847240.html
    • 一句话概括算法:dfsNo[i]: i号节点dfs的顺序, low[i]:i号节点以及其子树中所有的搜索节点仅通过 回边 能够到达的节点的dfs序号(回边:非搜索树上的边),若对于边u->v,有:dfsNo[u] < low[v],说明v通过回边无法到达u之前的节点,那么说明uv为割边,必须是小于,因为若果回边能回到u,且dfsNo[u] == low[v],则说明uv不是割边!
    • 同理,dfsNo[u] <= low[v]可以得出u为割点,之所以有等于号,是因为u是可以被割的
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
class Solution {
public:

void Tarjan(vector<vector<int>>& g, int st, vector<int>& parent, vector<int>& low, vector<int>& dfsNo, int no, set<int>& cutPoints, vector<vector<int>>& cutEdges) {
int rootChildNum = 0;
low[st] = dfsNo[st] = no;
for(auto neighbor : g[st]) {
// if(parent[neighbor] == st) {
// continue;
// }
if(-1 != dfsNo[neighbor]) { // st -> neighbor 是一条回边
if(parent[st] != neighbor) { // 且不在搜索子树内
low[st] = min(low[st], dfsNo[neighbor]); // 更新low[st]为所有能通过回边到达的最小的dfsNo
}
} else {
parent[neighbor] = st;
++rootChildNum;

++no;
// cout << "u/v " << st<<"/"<<neighbor << " " << no << endl;
Tarjan(g, neighbor, parent, low, dfsNo, no, cutPoints, cutEdges);
low[st] = min(low[st], low[neighbor]); // 更新为所有搜索子树节点中能够通过回边到达的最小的dfsNo

if(-1 != parent[st] && dfsNo[st] <= low[neighbor]) {
cutPoints.insert(st);
}
if(dfsNo[st] < low[neighbor]) {
cutEdges.emplace_back(vector<int>{st, neighbor});
}
}
}
if(parent[st] == -1 && rootChildNum >= 2) {
cutPoints.insert(st);
}
}

vector<vector<int>> criticalConnections(int n, vector<vector<int>>& connections) {
// build graph
vector<vector<int>> g(n);
for(auto e : connections) {
g[e[0]].push_back(e[1]);
g[e[1]].push_back(e[0]);
}

vector<int> dfsNo(n, -1);
vector<int> low(n, -1); // low[i]:i号节点以及其子树中所有的搜索节点仅通过 回边 能够到达的节点的dfs序号(回边:非搜索树上的边)
vector<int> parent(n, -1); // 记录搜索树
int no = 0;

set<int> cutPoints;
vector<vector<int>> cutEdges;
dfsNo[0] = 0;
Tarjan(g, 0, parent, low, dfsNo, no, cutPoints, cutEdges);
for(auto i : cutPoints) {
cout << i << " ";
}
cout << "low: ";
for(auto i : low) {
cout << i << " ";
}
cout << "\ndfsNo: ";
for(auto no : dfsNo) {
cout << no << " ";
}
return cutEdges;
}
};

tarjan求连通子集

https://byvoid.com/zhs/blog/scc-tarjan/
当DFN(u)=Low(u)时,以u为根的搜索子树上所有节点是一个强连通分量。

1 题集

0882reachableSubNodes 细分图中的可到达结点 dijstra

1 题目

https://leetcode.cn/problems/reachable-nodes-in-subdivided-graph/

2 解题思路

  • 1 解题思路:类似的题目:https://leetcode.cn/problems/partition-array-into-two-arrays-to-minimize-sum-difference/
    • 1.1 dijstra算出来到所有顶点的距离
    • 1.2 最终能到达的节点由两部分构成:
      • a : 来自原始节点,要求距离小于maxMoves
      • b : 来自细分节点:很简单,对于每个边uv,从u和v出发,分别还能前进maxMoves - dis[u], maxMoves - dis[v],这就是细分节点的个数
        • 需要注意两点:1,maxMoves不一定比dis[u]大;2,若从u和v出发加起来的细分节点数大于uv之间所有的细分节点,记得clamp到uv之间的细分节点个数
  • 2 回顾dijstra算法:
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    unordered_map<int, unordered_map<int, int>> g;

    auto cmp = [](const pair<int, int>& a, const pair<int, int>& b) {
    return a.first > b.first;
    };
    priority_queue<pair<int, int>, vector<pair<int, int>>, decltype(cmp)> pq(cmp);

    for(auto& e : edges) {
    int u = e[0];
    int v = e[1];
    int w = e[2];
    g[u][v] = w+1;
    g[v][u] = w+1;
    }

    // init dis
    vector<int> disRes(n, INT_MAX);
    map<int, bool> visited;
    visited[0] = true;
    disRes[0] = 0;
    pq.push({0, 0});

    // contain 0 itSelf!

    int res = 0;


    // dijstra
    while(pq.size() > 0) {
    auto node = pq.top();
    pq.pop();
    int u = node.second;
    int dis = node.first;

    // for disRes[u], every relax will push a new pair, so we need
    // to pass all old relaxed value
    /**
    *
    * eg: [[2,3,4],[1,3,9],[0,2,4],[0,1,1]]
    * 对于上图的dijstra的距离表的松弛过程为:
    0 2 2147483647 2147483647
    0 2 5 2147483647
    0 2 5 12 // 这里,第一次松弛来自节点1,
    0 2 5 10 // 第二次来自节点2,那么这两个松弛结果都会记录在pq里面,为了让后面
    // pq遍历到 0到3距离为12的时候能pass掉,我么你需要将12和 10(存于距离中)做比较,
    // 就可以直到12是第一次松弛的结果,10才是最终松弛的结果,否则如果还有其他节点,
    // 那么12会在10松弛完其他节点接着松弛,那就错了
    // 于是这个continue是必要的
    */
    if(disRes[u] < dis) {
    continue;
    }
    visited[u] = true;
    disRes[u] = dis;

    res += (disRes[u] <= maxMoves);

    // relax nodes by u
    for(auto& neighbor : g[u]) {
    int v = neighbor.first;
    int w = neighbor.second;
    // v has been proceed!
    if(!visited[v] && disRes[v] > disRes[u] + g[u][v]) {
    disRes[v] = disRes[u] + g[u][v];
    pq.push({disRes[v], v});
    // print(disRes);
    }
    }
    };

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
class Solution {
public:
int reachableNodes(vector<vector<int>>& edges, int maxMoves, int n) {
unordered_map<int, unordered_map<int, int>> g;

auto cmp = [](const pair<int, int>& a, const pair<int, int>& b) {
return a.first > b.first;
};
priority_queue<pair<int, int>, vector<pair<int, int>>, decltype(cmp)> pq(cmp);

for(auto& e : edges) {
int u = e[0];
int v = e[1];
int w = e[2];
g[u][v] = w+1;
g[v][u] = w+1;
}

// init dis
vector<int> disRes(n, INT_MAX);
map<int, bool> visited;
visited[0] = true;
disRes[0] = 0;
pq.push({0, 0});
// disRes[0] = 0;
// for(auto& neighbor : g[0]) {
// int to = neighbor.first;
// int w = neighbor.second
// disRes[to] = w;
// pq.push({w, to});
// }

// contain 0 itSelf!
int res = 0;


// dijstra
while(pq.size() > 0) {
auto node = pq.top();
pq.pop();
int u = node.second;
int dis = node.first;

// for disRes[u], every relax will push a new pair, so we need
// to pass all old relaxed value
if(disRes[u] < dis) {
continue;
}
visited[u] = true;
disRes[u] = dis;

res += (disRes[u] <= maxMoves);

// relax nodes by u
for(auto& neighbor : g[u]) {
int v = neighbor.first;
int w = neighbor.second;
// v has been proceed!
if(!visited[v] && disRes[v] > disRes[u] + g[u][v]) {
disRes[v] = disRes[u] + g[u][v];
pq.push({disRes[v], v});
print(disRes);
}
}
}

// count sub nodes
for(auto& e : edges) {
int u = e[0];
int v = e[1];
int w = e[2];
int curSubNodes = 0;
if(visited[u]) {
curSubNodes += max(maxMoves - disRes[u], 0);
}
if(visited[v]) {
curSubNodes += max(maxMoves - disRes[v], 0);
}
curSubNodes = min(curSubNodes, w);
// cout << "curSub/edge: " << curSubNodes << " " << "e: " << u << "->" << v << endl;
res += curSubNodes;
}


return res;
}
void print(vector<int>& dis) {
for(int i = 0; i < dis.size(); ++i) {
cout <<dis[i] << " ";
}cout << "\n";
}
};

1368minCostToHaveAPath 使网格图至少有一条有效路径的最小代价

1 题目

https://leetcode.cn/problems/minimum-cost-to-make-at-least-one-valid-path-in-a-grid/

2 解题思路

  • 1 解题思路:
    • 1.1 说明思路:构图,正确方向,那么u到v的距离为0,否则为1
      • 那么不就求从00到mn的最短路吗?dijstra可以做
    • 1.2 但是你非要用bfs:
      • 解决办法也简单,此时决定一个点v是否进入下一次bfs不再是vis数组了,而是:用dis[u]表示从0到u的距离,然后它的邻居v们,是否进入下一次bfs,是由:dis[v] < dis[u] + g[u][v].weight,也就是如果v被松弛了,进入下一次bfs,这个和dijstra思想基本上是一样的,但是可能有很多重复计算
    • 1.3 这个叫01bfs,也就是说,队列里的点,到原点的距离,要么是dis,要么是dis+1,当然是使用deque维护,g[u][v].weight如果为1,从队尾进入,否则从对首进入,但是不这么做貌似也行哈哈哈,就用普通的队列即可
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
class Solution {
public:
int dx[4] = {1,-1, 0, 0};
int dy[4] = {0, 0, 1, -1};
int m, n;

int bfs(vector<vector<pair<int, int>>>& g) {
// start from 0, search first 0 wighted edges
deque<pair<int, int>> curLevel; // <node, dis from 0,0>
curLevel.push_front({0, 0});
vector<bool> vis(m*n);
vector<int> disV(m*n, INT_MAX);
disV[0] = 0;
int tar = m * n - 1;


int res = INT_MAX;
while(0 != curLevel.size()) {
// cout << "---" << endl;
int curLevelSize = curLevel.size();
while(curLevelSize-- > 0) {
auto curNode = curLevel.front();
curLevel.pop_front();
// cout << curNode.first << "-> dis: " << curNode.second << endl;

int curDis = disV[curNode.first];
for(auto& [neighbor, dis] : g[curNode.first]) {
// cout << "from/to: " << curNode.first << "/ " << neighbor << endl;
int newDis = curDis + dis;
if(newDis < disV[neighbor]) {
curLevel.push_back({neighbor, dis});
// if(dis == 0) {
// curLevel.push_front({neighbor, 0});
// } else {
// curLevel.push_back({neighbor, 1});
// }
disV[neighbor] = newDis;
}
if(neighbor == tar) {
res = min(res, disV[tar]);
}
}
}
}

return res == INT_MAX ? 0 : res;
}

int minCost(vector<vector<int>>& grid) {
m = grid.size();
n = grid[0].size();
vector<vector<pair<int, int>>> g(m * n);

for(int i = 0; i < m; ++i) {
for(int j = 0; j < n; ++j) {
int curNode = j + i * n;
// cout << "i/j" << i << " " << j << endl;
for(int mvIdx = 0; mvIdx < 4; ++mvIdx) {
int nextI = i + dy[mvIdx];
int nextJ = j + dx[mvIdx];
if(0 <= nextI && nextI < m && 0 <= nextJ && nextJ < n) {
int nextNode = nextI * n + nextJ;
// cout << mvIdx << endl;
if(grid[i][j] == mvIdx + 1) {
g[curNode].push_back({nextNode, 0});
// cout << "to " << nextNode << " = " << 0 << " ";
} else {
g[curNode].push_back({nextNode, 1});
// cout << "to " << nextNode << " = " << 1 << " ";
}
// cout << "\n";
}
}
sort(g[curNode].begin(), g[curNode].end(),
[](const pair<int, int>& a, const pair<int, int>& b) {
return a.second < b.second;
});
}
}

// bfs to get dis
return bfs(g);
}
}

1579maxNumEdgeToRemove 保证图可完全遍历的最大删除边数

1 题目

https://leetcode.cn/problems/remove-max-number-of-edges-to-keep-graph-fully-traversable/

2 解题思路

  • 1 解题思路:(不如说:并查集就是典型的逆向思维)
    • 1.1 不要顺着考虑删除边的情况,那样式2^n复杂度,解决不了问题,逆向思维,考虑一条条加入边
    • 1.2 肯定是优先加入公共边,若能merge则是有用边,否则为无用边,则++res,然后看是否连通,如果连通,就不用alice和bob的专业边了,可以直接返回结果
    • 1.3 不然的话,要分别考虑alice和bob,对alice来说,从上面1.2中得到的并查集,对每一条a边尝试加入,失败则++res,对bob同样如此,最后返回res,当然失败情况就是,alice和bob至少有一个人的并查集的root数目大于1;
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
class Solution {
public:

class UF {
public:
int rootNums;
vector<int> parent;

UF(int size) : rootNums(size - 1) {
for(int i = 0; i < size; ++i) {
parent.push_back(i);
}
}

int find(int x) {
int oldX = x;
while(x != parent[x]) {
// parent[x] = parent[parent[x]]; // compression path
x = parent[x];
}
parent[oldX] = x; // the same as upwards
return x;
}

bool merge(int x, int y) {
int rootX = find(x);
int rootY = find(y);
if(rootX != rootY) {
parent[rootX] = rootY;
--rootNums;
return true;
}
return false;
}
};

int maxNumEdgesToRemove(int n, vector<vector<int>>& edges) {
vector<vector<int>> aEdges;
vector<vector<int>> bEdges;
vector<vector<int>> abEdges;

for(auto& e : edges) {
if(1 == e[0]) {
aEdges.push_back(e);
} else if(2 == e[0]) {
bEdges.push_back(e);
} else {
abEdges.push_back(e);
}
}

int removeEdgeCnt = 0;

UF uf(n + 1);
int abCnt = 0;
// firstly add the common edges
for(auto& e : abEdges) {
if(!uf.merge(e[1], e[2])) {
// cout << "ab: rm: " << e[1] << "-" << e[2] << endl;
++removeEdgeCnt;
} else {
// cout << "ab: merge: " << e[1] << "-" << e[2] << endl;
++abCnt;
}
}

if(1 == uf.rootNums) {
return edges.size() - abCnt;
}
// cout << "cur rmAB cnt: " << removeEdgeCnt << endl;
UF ufA = uf;
UF ufB = uf;
// try to make alice all ok
int aCnt = 0;
for(auto& e : aEdges) {
if(!ufA.merge(e[1], e[2])) {
++removeEdgeCnt;
// cout << "a: rm: " << e[1] << "-" << e[2] << endl;
}
}
// cout << "cur rmA cnt: " << removeEdgeCnt << endl;

// try to make bob all ok
int bCnt = 0;
for(auto& e : bEdges) {
if(!ufB.merge(e[1], e[2])) {
++removeEdgeCnt;
// cout << "b: rm: " << e[1] << "-" << e[2] << endl;
}
}

// cout << "cur rmB cnt: " << removeEdgeCnt << endl;
if(1 == ufB.rootNums && 1 == ufA.rootNums) {
return removeEdgeCnt;
}
return -1;
}
};

1928minCostToReachInMaxTime 规定时间内到达终点的最小花费

1 题目

https://leetcode.cn/problems/minimum-cost-to-reach-destination-in-time/

2 解题思路

  • 1 解题思路:动态规划:
    • f[t][i]: fees in exact time t to reach node i
    • f[t][i] = min(f[t - time(j, i)][j] + fee[i])
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
class Solution {
public:
using pii = pair<int, int>;
int minCost(int maxTime, vector<vector<int>>& edges, vector<int>& passingFees) {
// build g
int n = passingFees.size();
vector<vector<pii>> g(n);
for(auto&e : edges) {
g[e[0]].push_back({e[1], e[2]});
g[e[1]].push_back({e[0], e[2]});
}


// f[t][i]: fees in exact time t to reach node i
int res = INT_MAX;
vector<vector<int>> f(maxTime+1, vector<int>(n, INT_MAX));
f[0][0] = passingFees[0];
// f[t][i] = min(f[t - time(j, i)][j] + fee[i])
for(int t = 1; t <= maxTime; ++t) {
for(int i = 1; i < n; ++i) {
for(auto& [j, time] : g[i]) {
if(t - time >= 0 && f[t-time][j] != INT_MAX) {
f[t][i] = min(f[t][i], f[t - time][j] + passingFees[i]);
}
}
}
}

for(int t = 1; t <= maxTime; ++t) {
res = min(res, f[t][n-1]);
}

return res == INT_MAX ? -1 : res;
}
}

2065maxPathValueWithinMaxtime 规定时间内路径的最大价值

1 题目

https://leetcode.cn/problems/maximum-path-quality-of-a-graph/

2 解题思路

  • 1 解题思路:
    • 1.1 考虑一个问题:如何遍历所有路径?那不就是回溯吗,对于u,尝试v进入下一个dfs,然后dfs完成v以后尝试下一个v,将之前的v设置为未访问即可
    • 1.2 那么什么时候更新结果?只要v == 0并且剩余时间 >= 0,就可以更新结果
    • 1.3 那什么时候返回?时间小于0了就返回啊
    • 1.4 有个问题,要求路径中节点的值不被重复加?那不就是回溯过程中,如果当前顶点v被用过,那么直接从v进入dfs,就不去将当前v的价值考虑进去就行,而且也不需要去标记它没被访问过,因为它是已经被访问过的节点(走回头路了嘛),因为只有一个地方可以回溯,那么就是进入v且v此时没有被访问过然后dfs完成以后可以将v置为未访问
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
class Solution {
public:
long long n;
using pii = pair<long long, long long>;

void dfs(unordered_map<long long, vector<pii>>& g, long long st, long long leftTime, vector<int>& values, vector<bool>& used, long long& res, long long& curValue) {
// even the time cost least node not able to reach
// cout << "at node/lefttime" << st << "/" << leftTime << endl;

if(0 == st && leftTime >= 0) {
res = max(res, curValue);
}

if(leftTime < 0) {
return ;
}

for(auto& [v, w] : g[st]) {

if(!used[v]) {
// cout << "u->v: " << st << "->" << v << endl;
used[v] = true;
curValue += values[v];

dfs(g, v, leftTime - w, values, used, res, curValue);

// cout << "use new node: maxValue is : " << res << endl;
curValue -= values[v];
used[v] = false;
} else {

dfs(g, v, leftTime - w, values, used, res, curValue);
// cout << "use new node: maxValue is : " << res << endl;
}
}
}

int maximalPathQuality(vector<int>& values, vector<vector<int>>& edges, int maxTime) {
n = values.size();

unordered_map<long long, vector<pii>> g(n);
for(auto& e : edges) {
long long u = e[0];
long long v = e[1];
long long w = e[2];

g[u].push_back({v, w});
g[v].push_back({u, w});
}

// smallest weighted edge first
for(auto& [node, neighbors] : g) {
sort(neighbors.begin(), neighbors.end(), [](const pii& a, const pii& b) {
return a.second < b.second;
});
}
vector<bool> vecBool(n, false);
vecBool[0] = true;

long long res = INT_MIN;
long long curValue = values[0];

if(0 == g[0].size()) {
return values[0];
}

dfs(g, 0, maxTime, values, vecBool, res, curValue);
return res;
}
};

1 参考项目

restClient in cpp: https://github.com/jgaa/restc-cpp

2 依赖下载

openssl下载:
https://github.com/CristiFati/Prebuilt-Binaries/blob/master/OpenSSL/v1.1.1/OpenSSL-1.1.1o-Win-pc064.zip

3 创建云端mock

个人示例:https://mock.apifox.cn/m1/1153222-0-default/roomlist

1
2
3
nash5@DESKTOP-0DDCG1U MINGW64 /f/myFiles/blogs (main)
$ curl https://mock.apifox.cn/m1/1153222-0-default/roomlist
[{"id":1,"name":"room1"}]

4 示例代码以及测试

安装docker,执行restc-cpp的create_container脚本,然后修改:运行BasicTests.cpp

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65

#include <iostream>

#include "restc-cpp/logging.h"

#include <boost/lexical_cast.hpp>
#include <boost/fusion/adapted.hpp>

#include "restc-cpp/restc-cpp.h"
#include "restc-cpp/RequestBuilder.h"

#include "restc-cpp/test_helper.h"

using namespace std;
using namespace restc_cpp;


struct Room {
int id = 0;
string name;
};

BOOST_FUSION_ADAPT_STRUCT(
Room,
(int, id)
(string, name)
)

const static string roomListUrl = "https://mock.apifox.cn/m1/1153222-0-default/roomlist";

void tryGetRoom(Context& ctx) {
// Asynchronously fetch the entire data-set, and convert it from json
// to C++ objects was we go.
// We expcet a list of Post objects
cout << "getting room now!" << endl;
list<Room> room_list;
SerializeFromJson(room_list, ctx.Get(GetDockerUrl(roomListUrl)));

// Just dump the data.
for (const auto& room: room_list) {
RESTC_CPP_LOG_INFO_("Post id=" << room.id << ", title: " << room.name);
}
}

int main(int argc, char *argv[]) {

RESTC_CPP_TEST_LOGGING_SETUP("debug");

try {


auto rest_client = RestClient::Create();
cout << "trying to get rooms" << endl;
// auto future = rest_client->ProcessWithPromise(DoSomethingInteresting);
auto future = rest_client->ProcessWithPromise(tryGetRoom);

// Hold the main thread to allow the worker to do it's job
future.get();
return 0;
} catch (const exception& ex) {
RESTC_CPP_LOG_INFO_("main: Caught exception: " << ex.what());
}

return 0;
}

执行情况如下:

1
2
3
4
5
6
7
8
9
10
11
nash5@DESKTOP-0DDCG1U MINGW64 /f/prjs/restc-cpp/build/tests/functional/Release (master)
$ ./basic_tests.exe
trying to get rooms
[2022-06-21 17:24:14.729043] [0x00008d94] [debug] Worker 0 is starting.
getting room now!
[2022-06-21 17:24:14.758042] [0x00008d94] [debug] Connecting to 114.55.47.169:443
[2022-06-21 17:24:14.807048] [0x00008d94] [debug] Sent GET request to 'https://mock.apifox.cn/m1/1153222-0-default/roomlist' {Connection f3f975c7-cbbb-44a4-8d0c-2c95450273e2 {TlsSocket
socket# 624 192.168.5.44:50487 <--> 114.55.47.169:443}}
[2022-06-21 17:24:14.888860] [0x00008d94] [info] Post id=1, title: room1
[2022-06-21 17:24:14.888860] [0x00008d94] [debug] Worker 0 is done.

trajan 塔杨算法 求割点,割边

1192criticalConnections 查找集群内的「关键连接」

1 题目

https://leetcode.cn/problems/critical-connections-in-a-network/

2 解题思路

  • 1 解题思路:使用tarjan算法:求割点和割边
    • 算法实现参考:https://www.cnblogs.com/collectionne/p/6847240.html
    • 一句话概括算法:dfsNo[i]: i号节点dfs的顺序, low[i]:i号节点以及其子树中所有的搜索节点仅通过 回边 能够到达的节点的dfs序号(回边:非搜索树上的边),若对于边u->v,有:dfsNo[u] < low[v],说明v通过回边无法到达u之前的节点,那么说明uv为割边,必须是小于,因为若果回边能回到u,且dfsNo[u] == low[v],则说明uv不是割边!
    • 同理,dfsNo[u] <= low[v]可以得出u为割点,之所以有等于号,是因为u是可以被割的
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
class Solution {
public:

void Tarjan(vector<vector<int>>& g, int st, vector<int>& parent, vector<int>& low, vector<int>& dfsNo, int no, set<int>& cutPoints, vector<vector<int>>& cutEdges) {
int rootChildNum = 0;
low[st] = dfsNo[st] = no;
for(auto neighbor : g[st]) {
// if(parent[neighbor] == st) {
// continue;
// }
if(-1 != dfsNo[neighbor]) { // st -> neighbor 是一条回边
if(parent[st] != neighbor) { // 且不在搜索子树内
low[st] = min(low[st], dfsNo[neighbor]); // 更新low[st]为所有能通过回边到达的最小的dfsNo
}
} else {
parent[neighbor] = st;
++rootChildNum;

++no;
// cout << "u/v " << st<<"/"<<neighbor << " " << no << endl;
Tarjan(g, neighbor, parent, low, dfsNo, no, cutPoints, cutEdges);
low[st] = min(low[st], low[neighbor]); // 更新为所有搜索子树节点中能够通过回边到达的最小的dfsNo

if(-1 != parent[st] && dfsNo[st] <= low[neighbor]) {
cutPoints.insert(st);
}
if(dfsNo[st] < low[neighbor]) {
cutEdges.emplace_back(vector<int>{st, neighbor});
}
}
}
if(parent[st] == -1 && rootChildNum >= 2) {
cutPoints.insert(st);
}
}

vector<vector<int>> criticalConnections(int n, vector<vector<int>>& connections) {
// build graph
vector<vector<int>> g(n);
for(auto e : connections) {
g[e[0]].push_back(e[1]);
g[e[1]].push_back(e[0]);
}

vector<int> dfsNo(n, -1);
vector<int> low(n, -1); // low[i]:i号节点以及其子树中所有的搜索节点仅通过 回边 能够到达的节点的dfs序号(回边:非搜索树上的边)
vector<int> parent(n, -1); // 记录搜索树
int no = 0;

set<int> cutPoints;
vector<vector<int>> cutEdges;
dfsNo[0] = 0;
Tarjan(g, 0, parent, low, dfsNo, no, cutPoints, cutEdges);
for(auto i : cutPoints) {
cout << i << " ";
}
cout << "low: ";
for(auto i : low) {
cout << i << " ";
}
cout << "\ndfsNo: ";
for(auto no : dfsNo) {
cout << no << " ";
}
return cutEdges;
}
};

tarjan求连通子集

https://byvoid.com/zhs/blog/scc-tarjan/
当DFN(u)=Low(u)时,以u为根的搜索子树上所有节点是一个强连通分量。