敏感字体和图片的敏感字体的识别-----------源自黑马头条资料P76

敏感字体识别 利用DFA算法(类似于字典树的结构)
工具类开箱即用

package com.heima.utils.common;


import java.util.*;

public class SensitiveWordUtil {

    public static Map<String, Object> dictionaryMap = new HashMap<>();


    /**
     * 生成关键词字典库
     * @param words
     * @return
     */
    public static void initMap(Collection<String> words) {
        if (words == null) {
            System.out.println("敏感词列表不能为空");
            return ;
        }

        // map初始长度words.size(),整个字典库的入口字数(小于words.size(),因为不同的词可能会有相同的首字)
        Map<String, Object> map = new HashMap<>(words.size());
        // 遍历过程中当前层次的数据
        Map<String, Object> curMap = null;
        Iterator<String> iterator = words.iterator();

        while (iterator.hasNext()) {
            String word = iterator.next();
            curMap = map;
            int len = word.length();
            for (int i =0; i < len; i++) {
                // 遍历每个词的字
                String key = String.valueOf(word.charAt(i));
                // 当前字在当前层是否存在, 不存在则新建, 当前层数据指向下一个节点, 继续判断是否存在数据
                Map<String, Object> wordMap = (Map<String, Object>) curMap.get(key);
                if (wordMap == null) {
                    // 每个节点存在两个数据: 下一个节点和isEnd(是否结束标志)
                    wordMap = new HashMap<>(2);
                    wordMap.put("isEnd", "0");
                    curMap.put(key, wordMap);
                }
                curMap = wordMap;
                // 如果当前字是词的最后一个字,则将isEnd标志置1
                if (i == len -1) {
                    curMap.put("isEnd", "1");
                }
            }
        }

        dictionaryMap = map;
    }

    /**
     * 搜索文本中某个文字是否匹配关键词
     * @param text
     * @param beginIndex
     * @return
     */
    private static int checkWord(String text, int beginIndex) {
        if (dictionaryMap == null) {
            throw new RuntimeException("字典不能为空");
        }
        boolean isEnd = false;
        int wordLength = 0;
        Map<String, Object> curMap = dictionaryMap;
        int len = text.length();
        // 从文本的第beginIndex开始匹配
        for (int i = beginIndex; i < len; i++) {
            String key = String.valueOf(text.charAt(i));
            // 获取当前key的下一个节点
            curMap = (Map<String, Object>) curMap.get(key);
            if (curMap == null) {
                break;
            } else {
                wordLength ++;
                if ("1".equals(curMap.get("isEnd"))) {
                    isEnd = true;
                }
            }
        }
        if (!isEnd) {
            wordLength = 0;
        }
        return wordLength;
    }

    /**
     * 获取匹配的关键词和命中次数
     * @param text
     * @return
     */
    public static Map<String, Integer> matchWords(String text) {
        Map<String, Integer> wordMap = new HashMap<>();
        int len = text.length();
        for (int i = 0; i < len; i++) {
            int wordLength = checkWord(text, i);
            if (wordLength > 0) {
                String word = text.substring(i, i + wordLength);
                // 添加关键词匹配次数
                if (wordMap.containsKey(word)) {
                    wordMap.put(word, wordMap.get(word) + 1);
                } else {
                    wordMap.put(word, 1);
                }

                i += wordLength - 1;
            }
        }
        return wordMap;
    }

    public static void main(String[] args) {
        List<String> list = new ArrayList<>();
        list.add("法轮");
        list.add("法轮功");
        list.add("冰毒");
        initMap(list);
        String content="我是一个好人,并不会卖冰毒,也不操练法轮功,我真的不卖冰毒";
        Map<String, Integer> map = matchWords(content);
        System.out.println(map);
    }
}

使用示例:

    private boolean handleSensitiveScan(String content, WmNews wmNews) {
        boolean flag=true;
        //获取数据库中已经定义好的敏感词
        List<WmSensitive> wmSensitives = wmSensitiveMapper.selectList(Wrappers.<WmSensitive>lambdaQuery().select(WmSensitive::getSensitives));
        List<String> collect = wmSensitives.stream().map(WmSensitive::getSensitives).collect(Collectors.toList());

        //初始化工具类的敏感词库
        SensitiveWordUtil.initMap(collect);

        //使用工具类查看文章中是否包含敏感词
        Map<String, Integer> resultMap = SensitiveWordUtil.matchWords(content);
        if (resultMap.size()>0){
            updateWmNews(wmNews,(short) 2,"当前文章存在违规内容"+resultMap);
            flag=false;
        }
    return flag;

    }

图片识别
1.导入依赖

        <dependency>
            <groupId>net.sourceforge.tess4j</groupId>
            <artifactId>tess4j</artifactId>
            <version>4.1.1</version>
        </dependency>

2.识别文字

//创建实例
ITesseract tesseract = new Tesseract();

//设置字体库路径
tesseract.setDatapath("D:\\workspace\\tessdata");

//设置语言 -->简体中文字典(需要下载,如需要请下方留言)
tesseract.setLanguage("chi_sim");

File file = new File("D:\\143.png");

//识别图片
String result = tesseract.doOCR(file);

System.out.println("识别的结果为:"+result.replaceAll("\\r|\\n","-"));

3.使用工具类匹配敏感字体

猜你喜欢

转载自blog.csdn.net/qq_56533553/article/details/131590573
今日推荐