This repository was archived by the owner on Nov 10, 2025. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathSQL.Scala
More file actions
52 lines (40 loc) · 1.41 KB
/
SQL.Scala
File metadata and controls
52 lines (40 loc) · 1.41 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import org.apache.spark.sql.{SparkSession, Dataset}
import org.apache.hadoop.conf.Configuration
object SparkS3WordCount {
def main(args: Array[String]): Unit = {
val spark = createSparkSession()
val inputPath = "s3a://your-bucket-name/input/textfile.txt"
val outputPath = "s3a://your-bucket-name/output/wordcount"
val wordCounts = processWordCount(spark, inputPath)
saveToS3(wordCounts, outputPath)
spark.stop()
}
def createSparkSession(): SparkSession = {
val spark = SparkSession.builder()
.appName("Spark S3 Word Count")
.master("local[*]") // Use "yarn" for cluster execution
.getOrCreate()
val hadoopConf: Configuration = spark.sparkContext.hadoopConfiguration
hadoopConf.set("fs.s3a.access.key", "<AWS_ACCESS_KEY>")
hadoopConf.set("fs.s3a.secret.key", "<AWS_SECRET_KEY>")
hadoopConf.set("fs.s3a.endpoint", "s3.amazonaws.com")
spark
}
def processWordCount(spark: SparkSession, inputPath: String): Dataset[(String, Int)] = {
import spark.implicits._
spark.sparkContext
.textFile(inputPath)
.flatMap(_.split("\\W+"))
.map(_.toLowerCase)
.filter(_.nonEmpty)
.map(word => (word, 1))
.reduceByKey(_ + _)
.toDS()
}
def saveToS3(wordCounts: Dataset[(String, Int)], outputPath: String): Unit = {
wordCounts
.map { case (word, count) => s"$word: $count" }
.write
.text(outputPath)
}
}