Win7 Eclipse 搭建spark java1.8环境:WordCount helloworld例子马克-to-win @ 马克java社区:在eclipse oxygen上创建一个普通的java项目,然后把spark-assembly-1.6.1-hadoop2.6.0.jar这个包导进工程就ok了。只要启动start-dfs,下面的程序就可以运行了。package com;import java.util.Arrays;import java.util.List;import org.apache.spark.SparkConf;import org.apache.spark.api.java.JavaPairRDD;import org.apache.spark.api.java.JavaRDD;import org.apache.spark.api.java.JavaSparkContext;import org.apache.spark.api.java.function.FlatMapFunction;import org.apache.spark.api.java.function.Function2;import org.apache.spark.api.java.function.PairFunction;import org.apache.spark.api.java.function.VoidFunction;import scala.Tuple2;public class WordCount1 { public static void main(String[] args) { SparkConf conf = new SparkConf().setMaster("local").setAppName("wc");
/*没有下面的话, 会报一个错误,java.lang.IllegalArgumentException: System memory 259522560 must be at least 4.718592E8(470M). Please use a larger heap size.这是memory不够,导致无法启动SparkContext*/ conf.set("spark.testing.memory", "2000000000"); JavaSparkContext sc = new JavaSparkContext(conf); /*下面的这种倒入的方法也行*/ // JavaRDD<String> text = sc.textFile("hdfs://localhost:9000/README.txt"); /*原文件是:o1abc 45 o1abc 77 o1abc o1abc */ JavaRDD<String> text = sc.textFile("E://temp//input//friend.txt"); List<String> strList = text.collect(); /*输出str:o1abc 45 str:o1abc 77 str:o1abc o1abc*/ for (String str : strList) { System.out.println("str:" + str); } /*Interface FlatMapFunction<T,R>, Iterable<R> call(T t)(注意之后的版本,返回值有所变化。)*/ JavaRDD<String> words = text.flatMap(new FlatMapFunction<String, String>() { /*List的super Interface 是java.lang.Iterable*/ public Iterable<String> call(String line) throws Exception { System.out.println("flatMap once, line is "+line ); String[] wordsArray=line.split(" "); List<String> wordsList=Arrays.asList(wordsArray); return wordsList; } });
更多请看下节:https://blog.csdn.net/qq_44596980/article/details/93384785