版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/TH_NUM/article/details/85053207
1 .指针赋值
typedef tree_node<T> node_type;
node_type* root;
node_type **n =&root; //这里root虽然是个指针但是可能是空值,为了给指针赋值,所以取地址
*n =new node_type(v,p,0,0);
2 . c++ callback 函数
Using StatusCallback = std::function<void<const Status&>>
使用template构造函数:
// A generic for loop on GPU
template <typename SizeT, typename OpT>
__global__
void loop_kernel(
SizeT loop_size,
OpT op)
{
const SizeT STRIDE = (SizeT)gridDim.x * blockDim.x;
SizeT i = (SizeT)blockDim.x * blockIdx.x + threadIdx.x;
while (i < loop_size) {
op(i);
i += STRIDE;
}
}
3 . 其中shared_ptr 指针初始化类似上面的:
template <MPIDataType DT, DeviceType Dev, class T, class MASKT >
Status TorchOpContext<DT, Dev, T, MASKT>::AllocatePersistent(
int64_t size, std::shared_ptr<PersistentBuffer>* tensor) {
// Allocation errors are handled using PyTorch exceptions.
*tensor = std::make_shared<TorchPersistentBuffer>(device_, size);
return Status::OK();
}
4 . pair类型vector
std::vector<std::pair<std::string,uint64_t>> layers;
layers.push_back(std::make_pair(1,1));
5 . tuple类型vector
std::vector<std::tuple<SizeT, SizeT, size_t> > chunks;
chunks.push_back(std::make_tuple(
chunk_start, chunk_size, chunk_offset_bytes));
SizeT chunk_start = std::get<0>(chunk);
SizeT chunk_size = std::get<1>(chunk);
size_t chunk_offset = std::get<2>(chunk);
6 . explict 关键字:禁止隐式构造函数
7 . typedef double(*func)();
8 . realloc : 动态内存分配
9 . c++ lambda 函数:
语法定义:[capture](parameters) mutable -> return-type{ statements }
10 . reinterpret_cast
unsigned int key_bits = *reinterpret_cast<unsigned*>(const_cast<float*>(&key));
11 . operation 相关重载函数
Operator: 相关的重载函数
1. std::ostream & operator <<(std::ostream & out ,const half_t &x){
out<<(float)x;
return out;
}
2. bool operator >=(const half_t & other) const{
return float(*this) >= float(other);
}
3. half_t & operator +=(const half_t &rhs){
*this =half_t(float(*this)+float(rhs));
reuturn &=*this;
}
4. operator float() const{
uint32_t f =0;
return *reinterpret_cast<float const *>(&f);
}
5. operator __half()const{
Return reinterpret_cast<const __half &>(__x);
}
6. ++ 重载
self_type &operator++(){
if(pos){
//首先确保不是空的迭代器,再查看有没有右子树
if(pos->right){
//定位到右子树的最左节点
pos=pos->right;
while (pos->left)pos=pos->left;
}else{
//定位到尚未访问过的祖先节点
while ((pos->parent)&&(pos->parent->right==pos))pos=pos->parent;
pos=pos->parent;
}
}
return *this;
}
7. 指针重载
reference_type & operator *()const throw(std::runtime_error){
if(pos)return pos->value;
else {
throw std::runtime_error("dereference null iterator!");
}
}
8. != 重载
template <typename N>
bool operator !=(tree_iterator<N>const & left,tree_iterator<N> const & right){
return !(left==right);
}