diff --git a/.gitignore b/.gitignore index 37407a1..5d48d87 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,6 @@ # VSCode .vscode + +# GitHub +.github_access_token diff --git a/docs/2022-06-08-backup.html b/docs/2022-06-08-backup.html index 80e0ddf..46237af 100644 --- a/docs/2022-06-08-backup.html +++ b/docs/2022-06-08-backup.html @@ -1,18 +1,36 @@ -Index

Backup my way

First thing first, I want to list my own devices, which I have through the years:

App/Service I use daily:

The purpose is that I want my data to be safe, secure, and can be easily recovered if I lost some devices;
or in the worst situation, I lost all.
Because you know, it is hard to guess what is waiting for us in the future.

There are 2 sections which I want to share, the first is How to backup, the second is Recover strategy.

How to backup

Before I talk about backup, I want to talk about data.
In specifically, which data should I backup?

I use Arch Linux and macOS, primarily work in the terminal so I have too many dotfiles, for example, ~/.config/nvim/init.lua.
Each time I reinstall Arch Linux (I like it a lot), I need to reconfigure all the settings, and it is time-consuming.

So for the DE and UI settings, I keep it as default as possible, unless it's getting in my way, I leave the default setting there and forget about it.
The others are dotfiles, which I write my own dotfiles tool to backup and reconfigure easily and quickly.
Also, I know that installing Arch Linux is not easy, despite I install it too many times (Like thousand times since I was in high school).
Not because it is hard, but as life goes on, the official install guide keeps getting new update and covering too many cases for my own personal use, so I write my own guide to quickly capture what I need to do.
I back up all my dotfiles in GitHub and GitLab as I trust them both.
Also as I travel the Internet, I discover Codeberg and Treehouse and use them as another backup for git repo.

So that is my dotfiles, for my regular data, like Wallpaper or Books, Images, I use Google Drive (Actually I pay for it).
But the step: open the webpage, click the upload button and choose files seems boring and time-consuming.
So I use Rclone, it supports Google Drive, One Drive and many providers but I only use Google Drive for now.
The commands are simple:

# Sync from local to remote
+Index

Backup my way

First thing first, I want to list my own devices, which I have through the years:

App/Service I use daily:

The purpose is that I want my data to be safe, secure, and can be easily recovered if I lost some devices; +or in the worst situation, I lost all. +Because you know, it is hard to guess what is waiting for us in the future.

There are 2 sections which I want to share, the first is How to backup, the second is Recover strategy.

How to backup

Before I talk about backup, I want to talk about data. +In specifically, which data should I backup?

I use Arch Linux and macOS, primarily work in the terminal so I have too many dotfiles, for example, ~/.config/nvim/init.lua. +Each time I reinstall Arch Linux (I like it a lot), I need to reconfigure all the settings, and it is time-consuming.

So for the DE and UI settings, I keep it as default as possible, unless it's getting in my way, I leave the default setting there and forget about it. +The others are dotfiles, which I write my own dotfiles tool to backup and reconfigure easily and quickly. +Also, I know that installing Arch Linux is not easy, despite I install it too many times (Like thousand times since I was in high school). +Not because it is hard, but as life goes on, the official install guide keeps getting new update and covering too many cases for my own personal use, so I write my own guide to quickly capture what I need to do. +I back up all my dotfiles in GitHub and GitLab as I trust them both. +Also as I travel the Internet, I discover Codeberg and Treehouse and use them as another backup for git repo.

So that is my dotfiles, for my regular data, like Wallpaper or Books, Images, I use Google Drive (Actually I pay for it). +But the step: open the webpage, click the upload button and choose files seems boring and time-consuming. +So I use Rclone, it supports Google Drive, One Drive and many providers but I only use Google Drive for now. +The commands are simple:

# Sync from local to remote
 rclone sync MyBooks remote:MyBooks -P --exclude .DS_Store
 
-# Sync from remote to local
-rclone sync remote:MyBooks MyBooks -P --exclude .DS_Store
-

Before you use Rclone to sync to Google Drive, you should read Google Drive rclone configuration first.

For private data, I use restic which can be used with Rclone:

# Init
+# Sync from remote to local
+rclone sync remote:MyBooks MyBooks -P --exclude .DS_Store

Before you use Rclone to sync to Google Drive, you should read Google Drive rclone configuration first.

For private data, I use restic which can be used with Rclone:

# Init
 restic -r rclone:remote:PrivateData init
 
-# Backup
+# Backup
 restic -r rclone:remote:PrivateData backup PrivateData
 
-# Cleanup old backups
+# Cleanup old backups
 restic -r rclone:remote:PrivateData forget --keep-last 1 --prune
 
-# Restore
-restic -r rclone:remote:PrivateData restore latest --target ~
-

The next data is my passwords and my OTPs.
These are the things which I'm scare to lose the most.
First thing first, I enable 2-Step Verification for all of my important accounts, should use both OTP and phone method.

I use Bitwarden for passwords (That is a long story, coming from Google Password manager to Firefox Lockwise and then settle down with Bitwarden) and Aegis for OTPs.
The reason I choose Aegis, not Authy (I use Authy for so long but Aegis is definitely better) is because Aegis allows me to extract all the OTPs to a single file (Can be encrypted), which I use to transfer or backup easily.

As long as Bitwarden provides free passwords stored, I use all of its apps, extensions so that I can easily sync passwords between laptops and phones.
The thing I need to remember is the master password of Bitwarden in my head.

With Aegis, I export the data, then sync it to Google Drive, also store it locally in my phone.

The main problem here is the OTP, I can not store all of my OTPs in the cloud completely.
Because if I want to access my OTPs in the cloud, I should log in, and then input my OTP, this is a circle, my friends.

Recovery strategy

There are many strategies that I process to react as if something strange is happening to my devices.

If I lost my laptops, single laptop or all, do not panic as long as I have my phones.
The OTPs are in there, the passwords are in Bitwarden cloud, other data is in Google Drive so nothing is lost here.

If I lost my phone, but not my laptops, I use the OTPs which are stored locally in my laptops.

In the worst situation, I lost everything, my laptops, my phone.
The first step is to recover my SIM, then log in to Google account using the password and SMS OTP.
After that, log in to Bitwarden account using the master password and OTP from Gmail, which I open previously.

The end

This guide will be updated regularly I promise.

Feel free to ask me via email +# Restore +restic -r rclone:remote:PrivateData restore latest --target ~

The next data is my passwords and my OTPs. +These are the things which I'm scare to lose the most. +First thing first, I enable 2-Step Verification for all of my important accounts, should use both OTP and phone method.

I use Bitwarden for passwords (That is a long story, coming from Google Password manager to Firefox Lockwise and then settle down with Bitwarden) and Aegis for OTPs. +The reason I choose Aegis, not Authy (I use Authy for so long but Aegis is definitely better) is because Aegis allows me to extract all the OTPs to a single file (Can be encrypted), which I use to transfer or backup easily.

As long as Bitwarden provides free passwords stored, I use all of its apps, extensions so that I can easily sync passwords between laptops and phones. +The thing I need to remember is the master password of Bitwarden in my head.

With Aegis, I export the data, then sync it to Google Drive, also store it locally in my phone.

The main problem here is the OTP, I can not store all of my OTPs in the cloud completely. +Because if I want to access my OTPs in the cloud, I should log in, and then input my OTP, this is a circle, my friends.

Recovery strategy

There are many strategies that I process to react as if something strange is happening to my devices.

If I lost my laptops, single laptop or all, do not panic as long as I have my phones. +The OTPs are in there, the passwords are in Bitwarden cloud, other data is in Google Drive so nothing is lost here.

If I lost my phone, but not my laptops, I use the OTPs which are stored locally in my laptops.

In the worst situation, I lost everything, my laptops, my phone. +The first step is to recover my SIM, then log in to Google account using the password and SMS OTP. +After that, log in to Bitwarden account using the master password and OTP from Gmail, which I open previously.

The end

This guide will be updated regularly I promise.

Feel free to ask me via email Mastodon \ No newline at end of file diff --git a/docs/2022-06-08-dockerfile-go.html b/docs/2022-06-08-dockerfile-go.html index 7a1486a..57c9076 100644 --- a/docs/2022-06-08-dockerfile-go.html +++ b/docs/2022-06-08-dockerfile-go.html @@ -1,36 +1,44 @@ -Index

Dockerfile for Go

Each time I start a new Go project, I repeat many steps.
Like set up .gitignore, CI configs, Dockerfile, ...

So I decide to have a baseline Dockerfile like this:

FROM golang:1.19-bullseye as builder
+Index

Dockerfile for Go

Each time I start a new Go project, I repeat many steps. +Like set up .gitignore, CI configs, Dockerfile, ...

So I decide to have a baseline Dockerfile like this:

FROM golang:1.19-bullseye as builder
 
-RUN go install golang.org/dl/go1.19@latest \
+RUN go install golang.org/dl/go1.19@latest \
     && go1.19 download
 
-WORKDIR /build
+WORKDIR /build
 
-COPY go.mod .
-COPY go.sum .
-COPY vendor .
-COPY . .
+COPY go.mod .
+COPY go.sum .
+COPY vendor .
+COPY . .
 
-RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GOAMD64=v3 go build -o ./app -tags timetzdata -trimpath .
+RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GOAMD64=v3 go build -o ./app -tags timetzdata -trimpath .
 
-FROM gcr.io/distroless/base-debian11
+FROM gcr.io/distroless/base-debian11
 
-COPY --from=builder /build/app /app
+COPY --from=builder /build/app /app
 
-ENTRYPOINT ["/app"]
-

I use multi-stage build to keep my image size small.
First stage is Go official image,
second stage is Distroless.

Before Distroless, I use Alpine official image,
There is a whole discussion on the Internet to choose which is the best base image for Go.
After reading some blogs, I discover Distroless as a small and secure base image.
So I stick with it for a while.

Also, remember to match Distroless Debian version with Go official image Debian version.

FROM golang:1.19-bullseye as builder
-

This is Go image I use as a build stage.
This can be official Go image or custom image is required in some companies.

RUN go install golang.org/dl/go1.19@latest \
-    && go1.19 download
-

This is optional.
In my case, my company is slow to update Go image so I use this trick to install latest Go version.

WORKDIR /build
+ENTRYPOINT ["/app"]

I use multi-stage build to keep my image size small. +First stage is Go official image, +second stage is Distroless.

Before Distroless, I use Alpine official image, +There is a whole discussion on the Internet to choose which is the best base image for Go. +After reading some blogs, I discover Distroless as a small and secure base image. +So I stick with it for a while.

Also, remember to match Distroless Debian version with Go official image Debian version.

FROM golang:1.19-bullseye as builder

This is Go image I use as a build stage. +This can be official Go image or custom image is required in some companies.

RUN go install golang.org/dl/go1.19@latest \
+    && go1.19 download

This is optional. +In my case, my company is slow to update Go image so I use this trick to install latest Go version.

WORKDIR /build
 
-COPY go.mod .
-COPY go.sum .
-COPY vendor .
-COPY . .
-

I use /build to emphasize that I am building something in that directory.

The 4 COPY lines are familiar if you use Go enough.
First is go.mod and go.sum because it defines Go modules.
The second is vendor, this is optional but I use it because I don't want each time I build Dockerfile, I need to redownload Go modules.

RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GOAMD64=v3 go build -o ./app -tags timetzdata -trimpath .
-

This is where I build Go program.

CGO_ENABLED=0 because I don't want to mess with C libraries.
GOOS=linux GOARCH=amd64 is easy to explain, Linux with x86-64.
GOAMD64=v3 is new since Go 1.18,
I use v3 because I read about AMD64 version in Arch Linux rfcs. TLDR's newer computers are already x86-64-v3.

-tags timetzdata to embed timezone database incase base image does not have.
-trimpath to support reproduce build.

FROM gcr.io/distroless/base-debian11
+COPY go.mod .
+COPY go.sum .
+COPY vendor .
+COPY . .

I use /build to emphasize that I am building something in that directory.

The 4 COPY lines are familiar if you use Go enough. +First is go.mod and go.sum because it defines Go modules. +The second is vendor, this is optional but I use it because I don't want each time I build Dockerfile, I need to redownload Go modules.

RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GOAMD64=v3 go build -o ./app -tags timetzdata -trimpath .

This is where I build Go program.

CGO_ENABLED=0 because I don't want to mess with C libraries. +GOOS=linux GOARCH=amd64 is easy to explain, Linux with x86-64. +GOAMD64=v3 is new since Go 1.18, +I use v3 because I read about AMD64 version in Arch Linux rfcs. TLDR's newer computers are already x86-64-v3.

-tags timetzdata to embed timezone database incase base image does not have. +-trimpath to support reproduce build.

FROM gcr.io/distroless/base-debian11
 
-COPY --from=builder /build/app /app
+COPY --from=builder /build/app /app
 
-ENTRYPOINT ["/app"]
-

Finally, I copy app to Distroless base image.

Feel free to ask me via email +ENTRYPOINT ["/app"]

Finally, I copy app to Distroless base image.

Feel free to ask me via email Mastodon \ No newline at end of file diff --git a/docs/2022-07-10-bootstrap-go.html b/docs/2022-07-10-bootstrap-go.html index 179d4c6..eeb5de1 100644 --- a/docs/2022-07-10-bootstrap-go.html +++ b/docs/2022-07-10-bootstrap-go.html @@ -1,99 +1,113 @@ -Index

Bootstrap Go

It is hard to write bootstrap tool to quickly create Go service.
So I write this guide instead.
This is a quick checklist for me every damn time I need to write a Go service from scratch.
Also, this is my personal opinion, so feel free to comment.

Structure

main.go
+Index

Bootstrap Go

It is hard to write bootstrap tool to quickly create Go service. +So I write this guide instead. +This is a quick checklist for me every damn time I need to write a Go service from scratch. +Also, this is my personal opinion, so feel free to comment.

Structure

main.go
 internal
-| business
-| | http
-| | | handler.go
-| | | service.go
-| | | models.go
-| | grpc
-| | | handler.go
-| | | models.go
-| | consumer
-| | | handler.go
-| | | service.go
-| | | models.go
-| | service.go
-| | repository.go
-| | models.go
-

All business codes are inside internal.
Each business has a different directory business.

Inside each business, there are 2 handlers: http, grpc:

For each handler, there are usually 3 layers: handler, service, repository:

Location:

Do not repeat!

If we have too many services, some of the logic will be overlapped.

For example, service A and service B both need to make POST call API to service C.
If service A and service B both have libs to call service C to do that API, we need to move the libs to some common pkg libs.
So in the future, service D which needs to call C will not need to copy libs to handle service C api but only need to import from common pkg libs.

Another bad practice is adapter service.
No need to write a new service if what we need is just common pkg libs.

Taste on style guide

Stop using global var

If I see someone using global var, I swear I will shoot them twice in the face.

Why?

Use functional options, but don't overuse it!

For simple struct with 1 or 2 fields, no need to use functional options.

Example:

func main() {
-	s := NewS(WithA(1), WithB("b"))
-	fmt.Printf("%+v\n", s)
+| business
+| | http
+| | | handler.go
+| | | service.go
+| | | models.go
+| | grpc
+| | | handler.go
+| | | models.go
+| | consumer
+| | | handler.go
+| | | service.go
+| | | models.go
+| | service.go
+| | repository.go
+| | models.go

All business codes are inside internal. +Each business has a different directory business.

Inside each business, there are 2 handlers: http, grpc:

For each handler, there are usually 3 layers: handler, service, repository:

Location:

Do not repeat!

If we have too many services, some of the logic will be overlapped.

For example, service A and service B both need to make POST call API to service C. +If service A and service B both have libs to call service C to do that API, we need to move the libs to some common pkg libs. +So in the future, service D which needs to call C will not need to copy libs to handle service C api but only need to import from common pkg libs.

Another bad practice is adapter service. +No need to write a new service if what we need is just common pkg libs.

Taste on style guide

Stop using global var

If I see someone using global var, I swear I will shoot them twice in the face.

Why?

Use functional options, but don't overuse it!

For simple struct with 1 or 2 fields, no need to use functional options.

Example:

func main() {
+	s := NewS(WithA(1), WithB("b"))
+	fmt.Printf("%+v\n", s)
 }
 
-type S struct {
-	fieldA int
-	fieldB string
+type S struct {
+	fieldA int
+	fieldB string
 }
 
-type OptionS func(s *S)
+type OptionS func(s *S)
 
-func WithA(a int) OptionS {
-	return func(s *S) {
-		s.fieldA = a
+func WithA(a int) OptionS {
+	return func(s *S) {
+		s.fieldA = a
 	}
 }
 
-func WithB(b string) OptionS {
-	return func(s *S) {
-		s.fieldB = b
+func WithB(b string) OptionS {
+	return func(s *S) {
+		s.fieldB = b
 	}
 }
 
-func NewS(opts ...OptionS) *S {
-	s := &S{}
-	for _, opt := range opts {
-		opt(s)
+func NewS(opts ...OptionS) *S {
+	s := &S{}
+	for _, opt := range opts {
+		opt(s)
 	}
-	return s
-}
-

In above example, I construct s with WithA and WithB option.
No need to pass direct field inside s.

Use errgroup as much as possible

If business logic involves calling too many APIs, but they are not depend on each other.
We can fire them parallel :)

Personally, I prefer errgroup to WaitGroup (https://pkg.go.dev/sync#WaitGroup).
Because I always need deal with error.

Example:

eg, egCtx := errgroup.WithContext(ctx)
+	return s
+}

In above example, I construct s with WithA and WithB option. +No need to pass direct field inside s.

Use errgroup as much as possible

If business logic involves calling too many APIs, but they are not depend on each other. +We can fire them parallel :)

Personally, I prefer errgroup to WaitGroup (https://pkg.go.dev/sync#WaitGroup). +Because I always need deal with error.

Example:

eg, egCtx := errgroup.WithContext(ctx)
 
-eg.Go(func() error {
-	// Do some thing
-	return nil
+eg.Go(func() error {
+	// Do some thing
+	return nil
 })
 
-eg.Go(func() error {
-	// Do other thing
-	return nil
+eg.Go(func() error {
+	// Do other thing
+	return nil
 })
 
-if err := eg.Wait(); err != nil {
-	// Handle error
-}
-

Use semaphore when need to implement WorkerPool

Please don't use external libs for WorkerPool, I don't want to deal with dependency hell.

External libs

No need vendor

Only need if you need something from vendor, to generate mock or something else.

Use build.go to include build tools in go.mod

To easily control version of build tools.

For example build.go:

//go:build tools
-// +build tools
+if err := eg.Wait(); err != nil {
+	// Handle error
+}

Use semaphore when need to implement WorkerPool

Please don't use external libs for WorkerPool, I don't want to deal with dependency hell.

External libs

No need vendor

Only need if you need something from vendor, to generate mock or something else.

Use build.go to include build tools in go.mod

To easily control version of build tools.

For example build.go:

//go:build tools
+// +build tools
 
-package main
+package main
 
-import (
-	_ "github.com/golang/protobuf/protoc-gen-go"
-)
-

And then in Makefile:

build:
-    go install github.com/golang/protobuf/protoc-gen-go
-

We always get the version of build tools in go.mod each time we install it.
Future contributors will not cry anymore.

Don't use cli libs (spf13/cobra, urfave/cli) just for Go service

What is the point to pass many params (do-it, --abc, --xyz) when what we only need is start service?

In my case, service starts with only config, and config should be read from file or environment like The Twelve Factors guide.

Don't use grpc-ecosystem/grpc-gateway

Just don't.

Use protocolbuffers/protobuf-go, grpc/grpc-go for gRPC.

Write 1 for both gRPC, REST sounds good, but in the end, it is not worth it.

Don't use uber/prototool, use bufbuild/buf

prototool is deprecated, and buf can generate, lint, format as good as prototool.

Use gin-gonic/gin for REST.

Don't use gin.Context when pass context from handler layer to service layer, use gin.Context.Request.Context() instead.

If you want log, just use uber-go/zap

It is fast!

To read config, use spf13/viper

Only init config in main or cmd layer.
Do not use viper.Get... in business layer or inside business layer.

Why?

Also, be careful if config value is empty.
You should decide to continue or stop the service if there is no config.

Don't overuse ORM libs, no need to handle another layer above SQL.

Each ORM libs has each different syntax.
To learn and use those libs correctly is time consuming.
So just stick to plain SQL.
It is easier to debug when something is wrong.

But database/sql has its own limit.
For example, it is hard to get primary key after insert/update.
So may be you want to use ORM for those cases.
I hear that go-gorm/gorm, ent/ent is good.

If you want test, just use stretchr/testify.

It is easy to write a suite test, thanks to testify.
Also, for mocking, there are many options out there.
Pick 1 then sleep peacefully.

If need to mock, choose matryer/moq or golang/mock

The first is easy to use but not powerful as the later.
If you want to make sure mock func is called with correct times, use the later.

Example with matryer/moq:

// Only gen mock if source code file is newer than mock file
-// https://jonwillia.ms/2019/12/22/conditional-gomock-mockgen
-//go:generate sh -c "test service_mock_generated.go -nt $GOFILE && exit 0; moq -rm -out service_mock_generated.go . Service"
-

Be careful with spf13/cast

Don't cast proto enum:

// Bad
-a := cast.ToInt32(servicev1.ReasonCode_ABC)
+import (
+	_ "github.com/golang/protobuf/protoc-gen-go"
+)

And then in Makefile:

build:
+    go install github.com/golang/protobuf/protoc-gen-go

We always get the version of build tools in go.mod each time we install it. +Future contributors will not cry anymore.

Don't use cli libs (spf13/cobra, urfave/cli) just for Go service

What is the point to pass many params (do-it, --abc, --xyz) when what we only need is start service?

In my case, service starts with only config, and config should be read from file or environment like The Twelve Factors guide.

Don't use grpc-ecosystem/grpc-gateway

Just don't.

Use protocolbuffers/protobuf-go, grpc/grpc-go for gRPC.

Write 1 for both gRPC, REST sounds good, but in the end, it is not worth it.

Don't use uber/prototool, use bufbuild/buf

prototool is deprecated, and buf can generate, lint, format as good as prototool.

Use gin-gonic/gin for REST.

Don't use gin.Context when pass context from handler layer to service layer, use gin.Context.Request.Context() instead.

If you want log, just use uber-go/zap

It is fast!

To read config, use spf13/viper

Only init config in main or cmd layer. +Do not use viper.Get... in business layer or inside business layer.

Why?

Also, be careful if config value is empty. +You should decide to continue or stop the service if there is no config.

Don't overuse ORM libs, no need to handle another layer above SQL.

Each ORM libs has each different syntax. +To learn and use those libs correctly is time consuming. +So just stick to plain SQL. +It is easier to debug when something is wrong.

But database/sql has its own limit. +For example, it is hard to get primary key after insert/update. +So may be you want to use ORM for those cases. +I hear that go-gorm/gorm, ent/ent is good.

If you want test, just use stretchr/testify.

It is easy to write a suite test, thanks to testify. +Also, for mocking, there are many options out there. +Pick 1 then sleep peacefully.

If need to mock, choose matryer/moq or golang/mock

The first is easy to use but not powerful as the later. +If you want to make sure mock func is called with correct times, use the later.

Example with matryer/moq:

// Only gen mock if source code file is newer than mock file
+// https://jonwillia.ms/2019/12/22/conditional-gomock-mockgen
+//go:generate sh -c "test service_mock_generated.go -nt $GOFILE && exit 0; moq -rm -out service_mock_generated.go . Service"

Be careful with spf13/cast

Don't cast proto enum:

// Bad
+a := cast.ToInt32(servicev1.ReasonCode_ABC)
 
-// Good
-a := int32(servicev1.ReasonCode_ABC)
-

Use stringer if you want your type enum can be print as string

type Drink int
+// Good
+a := int32(servicev1.ReasonCode_ABC)

Use stringer if you want your type enum can be print as string

type Drink int
 
-const (
-	Beer Drink = iota
-	Water
-	OrangeJuice
-)
-
go install golang.org/x/tools/cmd/stringer@latest
+const (
+	Beer Drink = iota
+	Water
+	OrangeJuice
+)
go install golang.org/x/tools/cmd/stringer@latest
 
-# Run inside directory which contains Drink
-stringer -type=Drink
-

Don't waste your time rewrite rate limiter if your use case is simple, use rate or go-redis/redis_rate

rate if you want rate limiter locally in your single instance of service.
redis_rate if you want rate limiter distributed across all your instances of service.

Replace go fmt, goimports with mvdan/gofumpt.

gofumpt provides more rules when format Go codes.

Use golangci/golangci-lint.

No need to say more.
Lint or get the f out!

If you get fieldalignment error, use fieldalignment to fix them.

# Install
+# Run inside directory which contains Drink
+stringer -type=Drink

Don't waste your time rewrite rate limiter if your use case is simple, use rate or go-redis/redis_rate

rate if you want rate limiter locally in your single instance of service. +redis_rate if you want rate limiter distributed across all your instances of service.

Replace go fmt, goimports with mvdan/gofumpt.

gofumpt provides more rules when format Go codes.

Use golangci/golangci-lint.

No need to say more. +Lint or get the f out!

If you get fieldalignment error, use fieldalignment to fix them.

# Install
 go install golang.org/x/tools/go/analysis/passes/fieldalignment/cmd/fieldalignment@latest
 
-# Fix
-fieldalignment -fix ./internal/business/*.go
-

Thanks

Feel free to ask me via email +# Fix +fieldalignment -fix ./internal/business/*.go

Thanks

Feel free to ask me via email Mastodon \ No newline at end of file diff --git a/docs/2022-07-12-uuid-or-else.html b/docs/2022-07-12-uuid-or-else.html index a612303..8e50070 100644 --- a/docs/2022-07-12-uuid-or-else.html +++ b/docs/2022-07-12-uuid-or-else.html @@ -1,2 +1,12 @@ -Index

UUID or else

There are many use cases where we need to use a unique ID.
In my experience, I only encouter 2 cases:

In my Go universe, there are some libs to help us with this:

First use case is trace ID, or context aware ID

The ID is used only for trace and log.
If same ID is generated twice (because maybe the possibilty is too small but not 0), honestly I don't care.
When I use that ID to search log , if it pops more than things I care for, it is still no harm to me.

My choice for this use case is rs/xid.
Because it is small (not span too much on log line) and copy friendly.

Second use case is primary key, also hard choice

Why I don't use auto increment key for primary key?
The answer is simple, I don't want to write database specific SQL.
SQLite has some different syntax from MySQL, and PostgreSQL and so on.
Every logic I can move to application layer from database layer, I will.

In the past and present, I use google/uuid, specificially I use UUID v4.
In the future I will look to use segmentio/ksuid and oklog/ulid (trial and error of course).
Both are sortable, but google/uuid is not.
The reason I'm afraid because the database is sensitive subject, and I need more testing and battle test proof to trust those libs.

What else?

I think about adding prefix to ID to identify which resource that ID represents.

Thanks

Feel free to ask me via email +Index

UUID or else

There are many use cases where we need to use a unique ID. +In my experience, I only encouter 2 cases:

In my Go universe, there are some libs to help us with this:

First use case is trace ID, or context aware ID

The ID is used only for trace and log. +If same ID is generated twice (because maybe the possibilty is too small but not 0), honestly I don't care. +When I use that ID to search log , if it pops more than things I care for, it is still no harm to me.

My choice for this use case is rs/xid. +Because it is small (not span too much on log line) and copy friendly.

Second use case is primary key, also hard choice

Why I don't use auto increment key for primary key? +The answer is simple, I don't want to write database specific SQL. +SQLite has some different syntax from MySQL, and PostgreSQL and so on. +Every logic I can move to application layer from database layer, I will.

In the past and present, I use google/uuid, specificially I use UUID v4. +In the future I will look to use segmentio/ksuid and oklog/ulid (trial and error of course). +Both are sortable, but google/uuid is not. +The reason I'm afraid because the database is sensitive subject, and I need more testing and battle test proof to trust those libs.

What else?

I think about adding prefix to ID to identify which resource that ID represents.

Thanks

Feel free to ask me via email Mastodon \ No newline at end of file diff --git a/docs/2022-07-19-migrate-to-buf.html b/docs/2022-07-19-migrate-to-buf.html index 28e0fed..d9d96c9 100644 --- a/docs/2022-07-19-migrate-to-buf.html +++ b/docs/2022-07-19-migrate-to-buf.html @@ -1,46 +1,43 @@ -Index

Migrate to buf from prototool

Why? Because prototool is outdated, and can not run on M1 mac.

We need 3 files:

FYI, the libs version I use:

build.go:

//go:build tools
-// +build tools
+Index

Migrate to buf from prototool

Why? Because prototool is outdated, and can not run on M1 mac.

We need 3 files:

FYI, the libs version I use:

build.go:

//go:build tools
+// +build tools
 
-import (
-  _ "github.com/envoyproxy/protoc-gen-validate"
-  _ "github.com/golang/protobuf/protoc-gen-go"
-  _ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway"
-  _ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger"
-  _ "github.com/kei2100/protoc-gen-marshal-zap/plugin/protoc-gen-marshal-zap"
-)
-

buf.yaml

version: v1
-deps:
-  - buf.build/haunt98/googleapis:b38d93f7ade94a698adff9576474ae7c
-  - buf.build/haunt98/grpc-gateway:ecf4f0f58aa8496f8a76ed303c6e06c7
-  - buf.build/haunt98/protoc-gen-validate:2686264610fc4ad4a9fcc932647e279d
-  - buf.build/haunt98/marshal-zap:2a593ca925134680a5820d3f13c1be5a
-breaking:
-  use:
-    - FILE
-lint:
-  use:
-    - DEFAULT
-

buf.gen.yaml:

version: v1
-plugins:
-  - name: go
-    out: pkg
-    opt:
-      - plugins=grpc
-  - name: grpc-gateway
-    out: pkg
-    opt:
-      - logtostderr=true
-  - name: swagger
-    out: .
-    opt:
-      - logtostderr=true
-  - name: validate
-    out: pkg
-    opt:
-      - lang=go
-  - name: marshal-zap
-    out: pkg
-

Update Makefile:

gen:
+import (
+  _ "github.com/envoyproxy/protoc-gen-validate"
+  _ "github.com/golang/protobuf/protoc-gen-go"
+  _ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway"
+  _ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger"
+  _ "github.com/kei2100/protoc-gen-marshal-zap/plugin/protoc-gen-marshal-zap"
+)

buf.yaml

version: v1
+deps:
+  - buf.build/haunt98/googleapis:b38d93f7ade94a698adff9576474ae7c
+  - buf.build/haunt98/grpc-gateway:ecf4f0f58aa8496f8a76ed303c6e06c7
+  - buf.build/haunt98/protoc-gen-validate:2686264610fc4ad4a9fcc932647e279d
+  - buf.build/haunt98/marshal-zap:2a593ca925134680a5820d3f13c1be5a
+breaking:
+  use:
+    - FILE
+lint:
+  use:
+    - DEFAULT

buf.gen.yaml:

version: v1
+plugins:
+  - name: go
+    out: pkg
+    opt:
+      - plugins=grpc
+  - name: grpc-gateway
+    out: pkg
+    opt:
+      - logtostderr=true
+  - name: swagger
+    out: .
+    opt:
+      - logtostderr=true
+  - name: validate
+    out: pkg
+    opt:
+      - lang=go
+  - name: marshal-zap
+    out: pkg

Update Makefile:

gen:
   go install github.com/golang/protobuf/protoc-gen-go
   go install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway
   go install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger
@@ -49,6 +46,5 @@ plugins:
   go install github.com/bufbuild/buf/cmd/buf@latest
   buf mod update
   buf format -w
-  buf generate
-

Run make gen to have fun of course.

FAQ

Remember grpc-ecosystem/grpc-gateway, envoyproxy/protoc-gen-validate, kei2100/protoc-gen-marshal-zap is optional, so feel free to delete if you don't use theme.

If use vendor:

If you use grpc-gateway:

The last step is delete prototool.yaml.

If you are not migrate but start from scratch:

Thanks

Feel free to ask me via email + buf generate

Run make gen to have fun of course.

FAQ

Remember grpc-ecosystem/grpc-gateway, envoyproxy/protoc-gen-validate, kei2100/protoc-gen-marshal-zap is optional, so feel free to delete if you don't use theme.

If use vendor:

If you use grpc-gateway:

The last step is delete prototool.yaml.

If you are not migrate but start from scratch:

Thanks

Feel free to ask me via email Mastodon \ No newline at end of file diff --git a/docs/2022-07-31-experiment-go.html b/docs/2022-07-31-experiment-go.html index 74de9d8..add21c8 100644 --- a/docs/2022-07-31-experiment-go.html +++ b/docs/2022-07-31-experiment-go.html @@ -1,30 +1,34 @@ -Index

Experiment Go

There come a time when you need to experiment new things, new style, new approach.
So this post serves as it is named.

Design API by trimming down the interface/struct or whatever

Instead of:

type Client interface {
-    GetUser()
-    AddUser()
-    GetAccount()
-    RemoveAccount()
+Index

Experiment Go

There come a time when you need to experiment new things, new style, new approach. +So this post serves as it is named.

Design API by trimming down the interface/struct or whatever

Instead of:

type Client interface {
+    GetUser()
+    AddUser()
+    GetAccount()
+    RemoveAccount()
 }
 
-// c is Client
-c.GetUser()
-c.RemoveAccount()
-

Try:

type Client struct {
-    User ClientUser
-    Account ClientAccount
+// c is Client
+c.GetUser()
+c.RemoveAccount()

Try:

type Client struct {
+    User ClientUser
+    Account ClientAccount
 }
 
-type ClientUser interface {
-    Get()
-    Add()
+type ClientUser interface {
+    Get()
+    Add()
 }
 
-type ClientAccount interface {
-    Get()
-    Remove()
+type ClientAccount interface {
+    Get()
+    Remove()
 }
 
-// c is Client
-c.User.Get()
-c.Account.Remove()
-

The difference is c.GetUser() -> c.User.Get().

For example we have client which connect to bank.
There are many functions like GetUser, GetTransaction, VerifyAccount, ...
So split big client to many children, each child handle single aspect, like user or transaction.

My concert is we replace an interface with a struct which contains multiple interfaces aka children.
I don't know if this is the right call.

This pattern is used by google/go-github.

Find alternative to grpc/grpc-go

Why?
See for yourself.
Also read A new Go API for Protocol Buffers to know why v1.20.0 is v2.

Currently there are some:

Thanks

Feel free to ask me via email +// c is Client +c.User.Get() +c.Account.Remove()

The difference is c.GetUser() -> c.User.Get().

For example we have client which connect to bank. +There are many functions like GetUser, GetTransaction, VerifyAccount, ... +So split big client to many children, each child handle single aspect, like user or transaction.

My concert is we replace an interface with a struct which contains multiple interfaces aka children. +I don't know if this is the right call.

This pattern is used by google/go-github.

Find alternative to grpc/grpc-go

Why? +See for yourself. +Also read A new Go API for Protocol Buffers to know why v1.20.0 is v2.

Currently there are some:

Thanks

Feel free to ask me via email Mastodon \ No newline at end of file diff --git a/docs/2022-07-31-sql.html b/docs/2022-07-31-sql.html index 7703781..10c7286 100644 --- a/docs/2022-07-31-sql.html +++ b/docs/2022-07-31-sql.html @@ -1,16 +1,27 @@ -Index

SQL

Previously in my fresher software developer time, I rarely write SQL, I always use ORM to wrap SQL.
But time past and too much abstraction bites me.
So I decide to only write SQL from now as much as possible, no more ORM for me.
But if there is any cool ORM for Go, I guess I try.

This guide is not kind of guide which cover all cases.
Just my little tricks when I work with SQL.

Stay away from database unique id

Use UUID instead.
If you can, and you should, choose UUID type which can be sortable.

Stay away from database timestamp

Stay away from all kind of database timestamp (MySQL timestmap, SQLite timestamp, ...)
Just use int64 then pass the timestamp in service layer not database layer.

Why? Because time and date and location are too much complex to handle.
In my business, I use timestamp in milliseconds.
Then I save timestamp as int64 value to database.
Each time I get timestamp from database, I parse to time struct in Go with location or format I want.
No more hassle!

It looks like this:

[Business] time, data -> convert to unix timestamp milliseconds -> [Database] int64
-

Use index!!!

You should use index for faster query, but not too much.
Don't create index for every fields in table.
Choose wisely!

For example, create index in MySQL:

CREATE INDEX `idx_timestamp`
-    ON `user_upload` (`timestamp`);
-

Be careful with NULL

If compare with field which can be NULL, remember to check NULL for safety.

-- field_something can be NULL
+Index

SQL

Previously in my fresher software developer time, I rarely write SQL, I always use ORM to wrap SQL. +But time past and too much abstraction bites me. +So I decide to only write SQL from now as much as possible, no more ORM for me. +But if there is any cool ORM for Go, I guess I try.

This guide is not kind of guide which cover all cases. +Just my little tricks when I work with SQL.

Stay away from database unique id

Use UUID instead. +If you can, and you should, choose UUID type which can be sortable.

Stay away from database timestamp

Stay away from all kind of database timestamp (MySQL timestmap, SQLite timestamp, ...) +Just use int64 then pass the timestamp in service layer not database layer.

Why? Because time and date and location are too much complex to handle. +In my business, I use timestamp in milliseconds. +Then I save timestamp as int64 value to database. +Each time I get timestamp from database, I parse to time struct in Go with location or format I want. +No more hassle!

It looks like this:

[Business] time, data -> convert to unix timestamp milliseconds -> [Database] int64

Use index!!!

You should use index for faster query, but not too much. +Don't create index for every fields in table. +Choose wisely!

For example, create index in MySQL:

CREATE INDEX `idx_timestamp`
+    ON `user_upload` (`timestamp`);

Be careful with NULL

If compare with field which can be NULL, remember to check NULL for safety.

-- field_something can be NULL
 
--- Bad
-SELECT *
-FROM table
-WHERE field_something != 1
+-- Bad
+SELECT *
+FROM table
+WHERE field_something != 1
 
--- Good
-SELECT *
-FROM table
-WHERE (field_something IS NULL OR field_something != 1)
-

Need clarify why this happpen? Idk :(

VARCHAR or TEXT

Prefer VARCHAR if you need to query and of course use index, and make sure size of value will never hit the limit.
Prefer TEXT if you don't care, just want to store something.

Be super careful when migrate, update database on production and online!!!

Plase read docs about online ddl operations before do anything online (keep database running the same time update it, for example create index, ...)

Tools

Thanks

Feel free to ask me via email +-- Good +SELECT * +FROM table +WHERE (field_something IS NULL OR field_something != 1)

Need clarify why this happpen? Idk :(

+VARCHAR or TEXT

Prefer VARCHAR if you need to query and of course use index, and make sure size of value will never hit the limit. +Prefer TEXT if you don't care, just want to store something.

Be super careful when migrate, update database on production and online!!!

Plase read docs about online ddl operations before do anything online (keep database running the same time update it, for example create index, ...)

Tools

Thanks

Feel free to ask me via email Mastodon \ No newline at end of file diff --git a/docs/2022-08-10-gitignore.html b/docs/2022-08-10-gitignore.html index 7567a39..833f66d 100644 --- a/docs/2022-08-10-gitignore.html +++ b/docs/2022-08-10-gitignore.html @@ -1,20 +1,17 @@ -Index

gitignore

My quick check for .gitignore.

Base

# macOS
+Index

gitignore

My quick check for .gitignore.

Base

# macOS
 .DS_Store
 
-# Windows
-*.exe
+# Windows
+*.exe
 
-# IntelliJ
+# IntelliJ
 .idea/
 
-# VSCode
-.vscode/
-

Go

# Go
-# Test coverage
+# VSCode
+.vscode/

Go

# Go
+# Test coverage
 coverage.out
 
-# Should ignore vendor
-vendor
-

Python

venv
-
Feel free to ask me via email +# Should ignore vendor +vendor

Python

venv
Feel free to ask me via email Mastodon \ No newline at end of file diff --git a/docs/2022-10-26-reload-config.html b/docs/2022-10-26-reload-config.html index eaa79f1..d93e6f4 100644 --- a/docs/2022-10-26-reload-config.html +++ b/docs/2022-10-26-reload-config.html @@ -1,46 +1,47 @@ -Index

Reload config

This serves as design draft of reload config system

@startuml Reload config
+Index

Reload config

This serves as design draft of reload config system

@startuml Reload config
 
-skinparam defaultFontName Iosevka Term SS08
+skinparam defaultFontName Iosevka Term SS08
 
-participant admin
-participant other_service
-participant config_service
-participant storage
+participant admin
+participant other_service
+participant config_service
+participant storage
 
-== Admin handle ==
+== Admin handle ==
 
-admin -> config_service: set/update/delete config
+admin -> config_service: set/update/delete config
 
-config_service -> storage: set/update/delete config
+config_service -> storage: set/update/delete config
 
-== Other service handle ==
+== Other service handle ==
 
-other_service -> other_service: init service
+other_service -> other_service: init service
 
-activate other_service
+activate other_service
 
-other_service -> storage: make connection
+other_service -> storage: make connection
 
-loop
+loop
 
-    other_service -> storage: listen on config change
+    other_service -> storage: listen on config change
 
-    other_service -> other_service: save config to memory
+    other_service -> other_service: save config to memory
 
-end
+end
 
-deactivate other_service
+deactivate other_service
 
-other_service -> other_service: do business
+other_service -> other_service: do business
 
-activate other_service
+activate other_service
 
-other_service -> other_service: get config
+other_service -> other_service: get config
 
-other_service -> other_service: do other business
+other_service -> other_service: do other business
 
-deactivate other_service
+deactivate other_service
 
-@enduml
-

Config storage can be any key value storage or database like etcd, Consul, mySQL, ...

If storage is key value storage, maybe there is API to listen on config change.
Otherwise we should create a loop to get all config from storage for some interval, for example each 5 minute.

Each other_service need to get config from its memory, not hit storage.
So there is some delay between upstream config (config in storage) and downstream config (config in other_service), but maybe we can forgive that delay (???).

Pros:

Cons:

Feel free to ask me via email +@enduml

Config storage can be any key value storage or database like etcd, Consul, mySQL, ...

If storage is key value storage, maybe there is API to listen on config change. +Otherwise we should create a loop to get all config from storage for some interval, for example each 5 minute.

Each other_service need to get config from its memory, not hit storage. +So there is some delay between upstream config (config in storage) and downstream config (config in other_service), but maybe we can forgive that delay (???).

Pros:

Cons:

Feel free to ask me via email Mastodon \ No newline at end of file diff --git a/docs/2022-12-25-archlinux.html b/docs/2022-12-25-archlinux.html index 76e97cc..2747a50 100644 --- a/docs/2022-12-25-archlinux.html +++ b/docs/2022-12-25-archlinux.html @@ -1,126 +1,102 @@ -Index

Install Arch Linux

Install Arch Linux is thing I always want to do for my laptop/PC since I had my laptop in ninth grade.

This is not a guide for everyone, this is just save for myself in a future and for anyone who want to walk in my shoes.

Installation guide

Pre-installation

Check disks carefully:

lsblk
-

USB flash installation medium

Verify the boot mode

Check UEFI mode:

ls /sys/firmware/efi/efivars
-

Connect to the internet

For wifi, use iwd.

Partition the disks

GPT fdisk:

cgdisk /dev/sdx
-

Partition scheme

UEFI/GPT layout:
Mount pointPartitionPartition typeSuggested size
/mnt/efi/dev/efi_system_partitionEFI System Partition512 MiB
/mnt/boot/dev/extended_boot_loader_partitionExtended Boot Loader Partition1 GiB
/mnt/dev/root_partitionRoot Partition

BIOS/GPT layout:
Mount pointPartitionPartition typeSuggested size
BIOS boot partition1 MiB
/mnt/dev/root_partitionRoot Partition

LVM:

# Create physical volumes
+Index

Install Arch Linux

Install Arch Linux is thing I always want to do for my laptop/PC since I had my laptop in ninth grade.

This is not a guide for everyone, this is just save for myself in a future and for anyone who want to walk in my shoes.

Installation guide

Pre-installation

Check disks carefully:

lsblk

USB flash installation medium

Verify the boot mode

Check UEFI mode:

ls /sys/firmware/efi/efivars

Connect to the internet

For wifi, use iwd.

Partition the disks

GPT fdisk:

cgdisk /dev/sdx

Partition scheme

UEFI/GPT layout:
Mount pointPartitionPartition typeSuggested size
/mnt/efi/dev/efi_system_partitionEFI System Partition512 MiB
/mnt/boot/dev/extended_boot_loader_partitionExtended Boot Loader Partition1 GiB
/mnt/dev/root_partitionRoot Partition

BIOS/GPT layout:
Mount pointPartitionPartition typeSuggested size
BIOS boot partition1 MiB
/mnt/dev/root_partitionRoot Partition

LVM:

# Create physical volumes
 pvcreate /dev/sdaX
 
-# Create volume groups
+# Create volume groups
 vgcreate RootGroup /dev/sdaX /dev/sdaY
 
-# Create logical volumes
-lvcreate -l +100%FREE RootGroup -n rootvol
-

Format:

# efi
+# Create logical volumes
+lvcreate -l +100%FREE RootGroup -n rootvol

Format:

# efi
 mkfs.fat -F32 /dev/efi_system_partition
 
-# boot
+# boot
 mkfs.fat -F32 /dev/extended_boot_loader_partition
 
-# root
+# root
 mkfs.ext4 -L ROOT /dev/root_partition
 
-# root with btrfs
+# root with btrfs
 mkfs.btrfs -L ROOT /dev/root_partition
 
-# root on lvm
-mkfs.ext4 /dev/RootGroup/rootvol
-

Mount:

# root
+# root on lvm
+mkfs.ext4 /dev/RootGroup/rootvol

Mount:

# root
 mount /dev/root_partition /mnt
 
-# root with btrfs
+# root with btrfs
 mount -o compress=zstd /dev/root_partition /mnt
 
-# root on lvm
+# root on lvm
 mount /dev/RootGroup/rootvol /mnt
 
-# efi
+# efi
 mount --mkdir /dev/efi_system_partition /mnt/efi
 
-# boot
-mount --mkdir /dev/extended_boot_loader_partition /mnt/boot
-

Installation

pacstrap -K /mnt base linux linux-firmware
+# boot
+mount --mkdir /dev/extended_boot_loader_partition /mnt/boot

Installation

pacstrap -K /mnt base linux linux-firmware
 
-# AMD
+# AMD
 pacstrap -K /mnt amd-ucode
 
-# Intel
+# Intel
 pacstrap -K /mnt intel-ucode
 
-# Btrfs
+# Btrfs
 pacstrap -K /mnt btrfs-progs
 
-# LVM
+# LVM
 pacstrap -K /mnt lvm2
 
-# Text editor
-pacstrap -K /mnt neovim
-

Configure

fstab

genfstab -U /mnt >> /mnt/etc/fstab
-

Chroot

arch-chroot /mnt
-

Time zone

ln -sf /usr/share/zoneinfo/Region/City /etc/localtime
+# Text editor
+pacstrap -K /mnt neovim

Configure

fstab

genfstab -U /mnt >> /mnt/etc/fstab

Chroot

arch-chroot /mnt

Time zone

ln -sf /usr/share/zoneinfo/Region/City /etc/localtime
 
-hwclock --systohc
-

Localization:

Edit /etc/locale.gen:

# Uncomment en_US.UTF-8 UTF-8
-

Generate locales:

locale-gen
-

Edit /etc/locale.conf:

LANG=en_US.UTF-8
-

Network configuration

Edit /etc/hostname:

myhostname
-

Initramfs

Edit /etc/mkinitcpio.conf:

# LVM
-# https://wiki.archlinux.org/title/Install_Arch_Linux_on_LVM#Adding_mkinitcpio_hooks
+hwclock --systohc

Localization:

Edit /etc/locale.gen:

# Uncomment en_US.UTF-8 UTF-8

Generate locales:

locale-gen

Edit /etc/locale.conf:

LANG=en_US.UTF-8

Network configuration

Edit /etc/hostname:

myhostname

Initramfs

Edit /etc/mkinitcpio.conf:

# LVM
+# https://wiki.archlinux.org/title/Install_Arch_Linux_on_LVM#Adding_mkinitcpio_hooks
 HOOKS=(base udev ... block lvm2 filesystems)
 
-# https://wiki.archlinux.org/title/mkinitcpio#Common_hooks
-# Replace udev with systemd
-
mkinitcpio -P
-

Root password

passwd
-

Addition

# NetworkManager
+# https://wiki.archlinux.org/title/mkinitcpio#Common_hooks
+# Replace udev with systemd
mkinitcpio -P

Root password

passwd

Addition

# NetworkManager
 pacman -Syu networkmanager
-systemctl enable NetworkManager.service
+systemctl enable NetworkManager.service
 
-# Bluetooth
+# Bluetooth
 pacman -Syu bluez
-systemctl enable bluetooth.service
+systemctl enable bluetooth.service
 
-# Clock
-timedatectl set-ntp true
-

Boot loader

systemd-boot

GRUB

General recommendations

Always remember to check dependencies when install packages.

System administration

Sudo:

pacman -Syu sudo
+# Clock
+timedatectl set-ntp true

Boot loader

systemd-boot

GRUB

General recommendations

Always remember to check dependencies when install packages.

System administration

Sudo:

pacman -Syu sudo
 
 EDITOR=nvim visudo
-# Uncomment group wheel
+# Uncomment group wheel
 
-# Add user if don't want to use systemd-homed
-useradd -m -G wheel -c "The Joker" joker
+# Add user if don't want to use systemd-homed
+useradd -m -G wheel -c "The Joker" joker
 
-# Or using zsh
-useradd -m -G wheel -s /usr/bin/zsh -c "The Joker" joker
+# Or using zsh
+useradd -m -G wheel -s /usr/bin/zsh -c "The Joker" joker
 
-# Set password
-passwd joker
-

systemd-homed (WIP):

systemctl enable systemd-homed.service
+# Set password
+passwd joker

systemd-homed (WIP):

systemctl enable systemd-homed.service
 
-homectl create joker --real-name="The Joker" --member-of=wheel
+homectl create joker --real-name="The Joker" --member-of=wheel
 
-# Using zsh
-homectl update joker --shell=/usr/bin/zsh
-

Note:
Can not run homectl when install Arch Linux.
Should run on the first boot.

Desktop Environment

Install Xorg:

pacman -Syu xorg-server
-

GNOME

pacman -Syu gnome-shell \
+# Using zsh
+homectl update joker --shell=/usr/bin/zsh

Note: +Can not run homectl when install Arch Linux. +Should run on the first boot.

Desktop Environment

Install Xorg:

pacman -Syu xorg-server

GNOME

pacman -Syu gnome-shell \
 	gnome-control-center gnome-system-monitor \
 	gnome-tweaks gnome-backgrounds gnome-screenshot gnome-keyring gnome-logs \
 	gnome-console gnome-text-editor \
 	nautilus xdg-user-dirs-gtk file-roller evince eog
 
-# Login manager
+# Login manager
 pacman -Syu gdm
-systemctl enable gdm.service
-

KDE (WIP)

pacman -Syu plasma-meta \
+systemctl enable gdm.service

KDE (WIP)

pacman -Syu plasma-meta \
 	kde-system-meta
 
-# Login manager
+# Login manager
 pacman -Syu sddm
-systemctl enable sddm.service
-

List of applications

pacman

Uncomment in /etc/pacman.conf:

# Misc options
+systemctl enable sddm.service

List of applications

pacman

Uncomment in /etc/pacman.conf:

# Misc options
 Color
-ParallelDownloads
-

Pipewire (WIP)

pacman -Syu pipewire wireplumber \
+ParallelDownloads

Pipewire (WIP)

pacman -Syu pipewire wireplumber \
 	pipewire-alsa pipewire-pulse \
-	gst-plugin-pipewire pipewire-v4l2
-

Flatpak (WIP)

pacman -Syu flatpak
-

Improving performance

https://wiki.archlinux.org/index.php/swap#Swap_file

https://wiki.archlinux.org/index.php/swap#Swappiness

https://wiki.archlinux.org/index.php/Systemd/Journal#Journal_size_limit

https://wiki.archlinux.org/index.php/Core_dump#Disabling_automatic_core_dumps

https://wiki.archlinux.org/index.php/Solid_state_drive#Periodic_TRIM

https://wiki.archlinux.org/index.php/Silent_boot

https://wiki.archlinux.org/title/Improving_performance#Watchdogs

https://wiki.archlinux.org/title/PRIME

In the end

This guide is updated regularly I promise.

Feel free to ask me via email + gst-plugin-pipewire pipewire-v4l2

Flatpak (WIP)

pacman -Syu flatpak

Improving performance

https://wiki.archlinux.org/index.php/swap#Swap_file

https://wiki.archlinux.org/index.php/swap#Swappiness

https://wiki.archlinux.org/index.php/Systemd/Journal#Journal_size_limit

https://wiki.archlinux.org/index.php/Core_dump#Disabling_automatic_core_dumps

https://wiki.archlinux.org/index.php/Solid_state_drive#Periodic_TRIM

https://wiki.archlinux.org/index.php/Silent_boot

https://wiki.archlinux.org/title/Improving_performance#Watchdogs

https://wiki.archlinux.org/title/PRIME

In the end

This guide is updated regularly I promise.

Feel free to ask me via email Mastodon \ No newline at end of file diff --git a/docs/2022-12-25-go-buf.html b/docs/2022-12-25-go-buf.html index a56a171..aa5149b 100644 --- a/docs/2022-12-25-go-buf.html +++ b/docs/2022-12-25-go-buf.html @@ -1,2 +1,4 @@ -Index

Integration Go gRPC with Buf

There are 2 questions here.
What is Buf?
And why is Buf?

Feel free to ask me via email +Index

Integration Go gRPC with Buf

There are 2 questions here. +What is Buf? +And why is Buf?

Feel free to ask me via email Mastodon \ No newline at end of file diff --git a/docs/2022-12-25-go-test-asap.html b/docs/2022-12-25-go-test-asap.html index 58b7749..8f17a3a 100644 --- a/docs/2022-12-25-go-test-asap.html +++ b/docs/2022-12-25-go-test-asap.html @@ -1,182 +1,203 @@ -Index

Speed up writing Go test ASAP

Imagine your project currently have 0% unit test code coverage.
And your boss keep pushing it to 80% or even 90%?
What do you do?
Give up?

What if I tell you there is a way?
Not entirely cheating but ... you know, there is always trade off.

If your purpose is to test carefully all path, check if all return is correctly.
Sadly this post is not for you, I guess.
If you only want good number on test coverage, with minimum effort as possible, I hope this will show you some idea you can use :)

In my opinion, unit test is not that important (like must must have).
It's just make sure your code is running excatly as you intent it to be.
If you don't think about edge case before, unit test won't help you.

First, rewrite the impossible (to test) out

When I learn programming, I encounter very interesting idea, which become mainly my mindset when I dev later.
I don't recall it clearly, kinda like: "Don't just fix bugs, rewrite it so that kind of bugs will not appear again".
So in our context, there is some thing we hardly or can not write test in Go.
My suggestion is don't use that thing.

In my experience, I can list a few here:

Let the fun (writing test) begin

If you code Go long enough, you know table driven tests and how is that so useful.
You set up test data, then you test.
Somewhere in the future, you change the func, then you need to update test data, then you good!

In simple case, your func only have 2 or 3 inputs so table drive tests is still looking good.
But real world is ugly (maybe not, idk I'm just too young in this industry). Your func can have 5 or 10 inputs, also your func call many third party services.

Imagine having below func to upload image:

type service struct {
-    db DB
-    redis Redis
-    minio MinIO
-    logService LogService
-    verifyService VerifyService
+Index

Speed up writing Go test ASAP

Imagine your project currently have 0% unit test code coverage. +And your boss keep pushing it to 80% or even 90%? +What do you do? +Give up?

What if I tell you there is a way? +Not entirely cheating but ... you know, there is always trade off.

If your purpose is to test carefully all path, check if all return is correctly. +Sadly this post is not for you, I guess. +If you only want good number on test coverage, with minimum effort as possible, I hope this will show you some idea you can use :)

In my opinion, unit test is not that important (like must must have). +It's just make sure your code is running excatly as you intent it to be. +If you don't think about edge case before, unit test won't help you.

First, rewrite the impossible (to test) out

When I learn programming, I encounter very interesting idea, which become mainly my mindset when I dev later. +I don't recall it clearly, kinda like: "Don't just fix bugs, rewrite it so that kind of bugs will not appear again". +So in our context, there is some thing we hardly or can not write test in Go. +My suggestion is don't use that thing.

In my experience, I can list a few here:

Let the fun (writing test) begin

If you code Go long enough, you know table driven tests and how is that so useful. +You set up test data, then you test. +Somewhere in the future, you change the func, then you need to update test data, then you good!

In simple case, your func only have 2 or 3 inputs so table drive tests is still looking good. +But real world is ugly (maybe not, idk I'm just too young in this industry). Your func can have 5 or 10 inputs, also your func call many third party services.

Imagine having below func to upload image:

type service struct {
+    db DB
+    redis Redis
+    minio MinIO
+    logService LogService
+    verifyService VerifyService
 }
 
-func (s *service) Upload(ctx context.Context, req Request) error {
-    // I simplify by omitting the response, only care error for now
-    if err := s.verifyService.Verify(req); err != nil {
-        return err
+func (s *service) Upload(ctx context.Context, req Request) error {
+    // I simplify by omitting the response, only care error for now
+    if err := s.verifyService.Verify(req); err != nil {
+        return err
     }
 
-    if err := s.minio.Put(req); err != nil {
-        return err
+    if err := s.minio.Put(req); err != nil {
+        return err
     }
 
-    if err := s.redis.Set(req); err != nil {
-        return err
+    if err := s.redis.Set(req); err != nil {
+        return err
     }
 
-    if err := s.db.Save(req); err != nil {
-        return err
+    if err := s.db.Save(req); err != nil {
+        return err
     }
 
-    if err := s.logService.Save(req); err != nil {
-        return err
+    if err := s.logService.Save(req); err != nil {
+        return err
     }
 
-    return nil
-}
-

With table driven test and thanks to stretchr/testify, I usually write like this:

type ServiceSuite struct {
-    suite.Suite
+    return nil
+}

With table driven test and thanks to stretchr/testify, I usually write like this:

type ServiceSuite struct {
+    suite.Suite
 
-    db DBMock
-    redis RedisMock
-    minio MinIOMock
-    logService LogServiceMock
-    verifyService VerifyServiceMock
+    db DBMock
+    redis RedisMock
+    minio MinIOMock
+    logService LogServiceMock
+    verifyService VerifyServiceMock
 
-    s service
+    s service
 }
 
-func (s *ServiceSuite) SetupTest() {
-    // Init mock
-    // Init service
+func (s *ServiceSuite) SetupTest() {
+    // Init mock
+    // Init service
 }
 
-func (s *ServiceSuite) TestUpload() {
-    tests := []struct{
-        name string
-        req Request
-        verifyErr error
-        minioErr error
-        redisErr error
-        dbErr error
-        logErr error
-        wantErr error
+func (s *ServiceSuite) TestUpload() {
+    tests := []struct{
+        name string
+        req Request
+        verifyErr error
+        minioErr error
+        redisErr error
+        dbErr error
+        logErr error
+        wantErr error
     }{
         {
-            // Init test case
+            // Init test case
         }
     }
 
-    for _, tc := range tests {
-        s.Run(tc.name, func(){
-            // Mock all error depends on test case
-            if tc.verifyErr != nil {
-                s.verifyService.MockVerify().Return(tc.verifyErr)
+    for _, tc := range tests {
+        s.Run(tc.name, func(){
+            // Mock all error depends on test case
+            if tc.verifyErr != nil {
+                s.verifyService.MockVerify().Return(tc.verifyErr)
             }
-            // ...
+            // ...
 
-            gotErr := s.service.Upload(tc.req)
-            s.Equal(wantErr, gotErr)
+            gotErr := s.service.Upload(tc.req)
+            s.Equal(wantErr, gotErr)
         })
     }
-}
-

Looks good right?
Be careful with this.
It can go from 0 to 100 ugly real quick.

What if req is a struct with many fields?
So in each test case you need to set up req.
They are almost the same, but with some error case you must alter req.
It's easy to be init with wrong value here (typing maybe ?).
Also all req looks similiar, kinda duplicated.

tests := []struct{
-        name string
-        req Request
-        verifyErr error
-        minioErr error
-        redisErr error
-        dbErr error
-        logErr error
-        wantErr error
+}

Looks good right? +Be careful with this. +It can go from 0 to 100 ugly real quick.

What if req is a struct with many fields? +So in each test case you need to set up req. +They are almost the same, but with some error case you must alter req. +It's easy to be init with wrong value here (typing maybe ?). +Also all req looks similiar, kinda duplicated.

tests := []struct{
+        name string
+        req Request
+        verifyErr error
+        minioErr error
+        redisErr error
+        dbErr error
+        logErr error
+        wantErr error
     }{
         {
-            req: Request {
-                a: "a",
-                b: {
-                    c: "c",
-                    d: {
-                        "e": e
+            req: Request {
+                a: "a",
+                b: {
+                    c: "c",
+                    d: {
+                        "e": e
                     }
                 }
             }
-            // Other fieles
+            // Other fieles
         },
          {
-            req: Request {
-                a: "a",
-                b: {
-                    c: "c",
-                    d: {
-                        "e": e
+            req: Request {
+                a: "a",
+                b: {
+                    c: "c",
+                    d: {
+                        "e": e
                     }
                 }
             }
-            // Other fieles
+            // Other fieles
         },
          {
-            req: Request {
-                a: "a",
-                b: {
-                    c: "c",
-                    d: {
-                        "e": e
+            req: Request {
+                a: "a",
+                b: {
+                    c: "c",
+                    d: {
+                        "e": e
                     }
                 }
             }
-            // Other fieles
+            // Other fieles
         }
-    }
-

What if dependencies of service keep growing?
More mock error to test data of course.

    tests := []struct{
-        name string
-        req Request
-        verifyErr error
-        minioErr error
-        redisErr error
-        dbErr error
-        logErr error
-        wantErr error
-        // Murr error
-        aErr error
-        bErr error
-        cErr error
-        // ...
+    }

What if dependencies of service keep growing? +More mock error to test data of course.

    tests := []struct{
+        name string
+        req Request
+        verifyErr error
+        minioErr error
+        redisErr error
+        dbErr error
+        logErr error
+        wantErr error
+        // Murr error
+        aErr error
+        bErr error
+        cErr error
+        // ...
     }{
         {
-            // Init test case
+            // Init test case
         }
-    }
-

The test file keep growing longer and longer until I feel sick about it.

See tektoncd/pipeline unit test to get a feeling about this.
When I see it, TestPodBuild has almost 2000 lines.

The solution I propose here is simple (absolutely not perfect, but good with my usecase) thanks to stretchr/testify.
I init all default action on success case.
Then I alter request or mock error for unit test to hit on other case.
Remember if unit test is hit, code coverate is surely increaesed, and that my goal.

// Init ServiceSuite as above
+    }

The test file keep growing longer and longer until I feel sick about it.

See tektoncd/pipeline unit test to get a feeling about this. +When I see it, TestPodBuild has almost 2000 lines.

The solution I propose here is simple (absolutely not perfect, but good with my usecase) thanks to stretchr/testify. +I init all default action on success case. +Then I alter request or mock error for unit test to hit on other case. +Remember if unit test is hit, code coverate is surely increaesed, and that my goal.

// Init ServiceSuite as above
 
-func (s *ServiceSuite) TestUpload() {
-    // Init success request
-    req := Request{
-        // ...
+func (s *ServiceSuite) TestUpload() {
+    // Init success request
+    req := Request{
+        // ...
     }
 
-    // Init success action
-    s.verifyService.MockVerify().Return(nil)
-    // ...
+    // Init success action
+    s.verifyService.MockVerify().Return(nil)
+    // ...
 
-    gotErr := s.service.Upload(tc.req)
-    s.NoError(gotErr)
+    gotErr := s.service.Upload(tc.req)
+    s.NoError(gotErr)
 
-    s.Run("failed", func(){
-        // Alter failed request from default
-        req := Request{
-            // ...
+    s.Run("failed", func(){
+        // Alter failed request from default
+        req := Request{
+            // ...
         }
 
-        gotErr := s.service.Upload(tc.req)
-        s.Error(gotErr)
+        gotErr := s.service.Upload(tc.req)
+        s.Error(gotErr)
     })
 
-    s.Run("another failed", func(){
-        // Alter verify return
-        s.verifyService.MockVerify().Return(someErr)
+    s.Run("another failed", func(){
+        // Alter verify return
+        s.verifyService.MockVerify().Return(someErr)
 
 
-        gotErr := s.service.Upload(tc.req)
-        s.Error(gotErr)
+        gotErr := s.service.Upload(tc.req)
+        s.Error(gotErr)
     })
 
-    // ...
-}
-

If you think this is not quick enough, just ignore the response.
You only need to check error or not if you want code coverage only.

So if request change fields or more dependencies, I need to update success case, and maybe add corresponding error case if need.

Same idea but still with table, you can find here Functional table-driven tests in Go - Fatih Arslan.

Feel free to ask me via email + // ... +}

If you think this is not quick enough, just ignore the response. +You only need to check error or not if you want code coverage only.

So if request change fields or more dependencies, I need to update success case, and maybe add corresponding error case if need.

Same idea but still with table, you can find here Functional table-driven tests in Go - Fatih Arslan.

Feel free to ask me via email Mastodon \ No newline at end of file diff --git a/docs/index.html b/docs/index.html index e208d04..c8f34d1 100644 --- a/docs/index.html +++ b/docs/index.html @@ -1,2 +1,2 @@ -Index

Index

This is where I dump my thoughts.

Feel free to ask me via email +Index

Index

This is where I dump my thoughts.

Feel free to ask me via email Mastodon \ No newline at end of file diff --git a/go.mod b/go.mod index 8936569..5e4ea90 100644 --- a/go.mod +++ b/go.mod @@ -3,8 +3,17 @@ module github.com/haunt98/posts-go go 1.18 require ( + github.com/google/go-github/v48 v48.2.0 github.com/tdewolff/minify/v2 v2.12.4 - github.com/yuin/goldmark v1.5.3 + golang.org/x/oauth2 v0.3.0 ) -require github.com/tdewolff/parse/v2 v2.6.4 // indirect +require ( + github.com/golang/protobuf v1.5.2 // indirect + github.com/google/go-querystring v1.1.0 // indirect + github.com/tdewolff/parse/v2 v2.6.4 // indirect + golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect + golang.org/x/net v0.3.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/protobuf v1.28.0 // indirect +) diff --git a/go.sum b/go.sum index ae25bf8..9152c62 100644 --- a/go.sum +++ b/go.sum @@ -2,6 +2,17 @@ github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgk github.com/djherbis/atime v1.1.0/go.mod h1:28OF6Y8s3NQWwacXc5eZTsEsiMzp7LF8MbXE+XJPdBE= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-github/v48 v48.2.0 h1:68puzySE6WqUY9KWmpOsDEQfDZsso98rT6pZcz9HqcE= +github.com/google/go-github/v48 v48.2.0/go.mod h1:dDlehKBDo850ZPvCTK0sEqTCVWcrGl2LcDiajkYi89Y= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/matryer/try v0.0.0-20161228173917-9ac251b645a2/go.mod h1:0KeJpeMD6o+O4hW7qJOT7vyQPKrWmj26uf5wMc/IiIs= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/tdewolff/minify/v2 v2.12.4 h1:kejsHQMM17n6/gwdw53qsi6lg0TGddZADVyQOz1KMdE= @@ -10,6 +21,23 @@ github.com/tdewolff/parse/v2 v2.6.4 h1:KCkDvNUMof10e3QExio9OPZJT8SbdKojLBumw8YZy github.com/tdewolff/parse/v2 v2.6.4/go.mod h1:woz0cgbLwFdtbjJu8PIKxhW05KplTFQkOdX78o+Jgrs= github.com/tdewolff/test v1.0.7 h1:8Vs0142DmPFW/bQeHRP3MV19m1gvndjUb1sn8yy74LM= github.com/tdewolff/test v1.0.7/go.mod h1:6DAvZliBAAnD7rhVgwaM7DE5/d9NMOAJ09SqYqeK4QE= -github.com/yuin/goldmark v1.5.3 h1:3HUJmBFbQW9fhQOzMgseU134xfi6hU+mjWywx5Ty+/M= -github.com/yuin/goldmark v1.5.3/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.3.0 h1:VWL6FNY2bEEmsGVKabSlHu5Irp34xmMRoqb/9lF9lxk= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/oauth2 v0.3.0 h1:6l90koy8/LaBLmLu8jpHeHexzMwEita0zFfYlggy2F8= +golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= diff --git a/main.go b/main.go index b38e913..875db40 100644 --- a/main.go +++ b/main.go @@ -1,20 +1,15 @@ package main import ( - "bytes" - "io" + "context" "log" "os" "path/filepath" "strings" "text/template" - "github.com/tdewolff/minify/v2" - minify_css "github.com/tdewolff/minify/v2/css" - minify_html "github.com/tdewolff/minify/v2/html" - "github.com/yuin/goldmark" - gm_extension "github.com/yuin/goldmark/extension" - gm_html "github.com/yuin/goldmark/renderer/html" + "github.com/google/go-github/v48/github" + "golang.org/x/oauth2" ) const ( @@ -37,6 +32,8 @@ type templatePostData struct { } func main() { + ctx := context.Background() + // Cleanup generated path if err := os.RemoveAll(generatedPath); err != nil { log.Fatalln("Failed to remove all", generatedPath, err) @@ -63,20 +60,19 @@ func main() { log.Fatalln("Failed to parse template", err) } - // Prepare parse markdown - gm := goldmark.New( - goldmark.WithExtensions( - gm_extension.GFM, - ), - goldmark.WithRendererOptions( - gm_html.WithHardWraps(), - ), - ) + // Prepare GitHub + ghAccessTokenBytes, err := os.ReadFile(".github_access_token") + if err != nil { + log.Fatalln("Failed to read file", ".github_access_token", err) + } - // Prepare minify - m := minify.New() - m.AddFunc(mimeTypeHTML, minify_html.Minify) - m.AddFunc(mimeTypeCSS, minify_css.Minify) + ghTokenSrc := oauth2.StaticTokenSource( + &oauth2.Token{ + AccessToken: string(ghAccessTokenBytes), + }, + ) + ghHTTPClient := oauth2.NewClient(ctx, ghTokenSrc) + ghClient := github.NewClient(ghHTTPClient) // Generate post files for _, postFile := range postFiles { @@ -91,6 +87,13 @@ func main() { log.Fatalln("Failed to read file", mdFilename, err) } + ghMarkdown, _, err := ghClient.Markdown(ctx, string(mdFileBytes), &github.MarkdownOptions{ + Mode: "markdown", + }) + if err != nil { + log.Fatalln("Failed to GitHub markdown", err) + } + // Prepare html file htmlFilename := strings.TrimSuffix(postFile.Name(), filepath.Ext(postFile.Name())) + extHTML htmlFilepath := filepath.Join(generatedPath, htmlFilename) @@ -100,45 +103,23 @@ func main() { log.Fatalln("Failed to open file", htmlFilepath, err) } - // Parse markdown - var markdownBuf bytes.Buffer - if err := gm.Convert(mdFileBytes, &markdownBuf); err != nil { - log.Fatalln("Failed to convert markdown", err) + if err := templatePost.Execute(htmlFile, templatePostData{ + Body: ghMarkdown, + }); err != nil { + log.Fatalln("Failed to execute html template", err) } - tmpReader, tmpWriter := io.Pipe() - - // Template - go func() { - if err := templatePost.Execute(tmpWriter, templatePostData{ - Body: markdownBuf.String(), - }); err != nil { - log.Fatalln("Failed to execute html template", err) - } - tmpWriter.Close() - }() - - // Minify - if err := m.Minify(mimeTypeHTML, htmlFile, tmpReader); err != nil { - log.Fatalln("Failed to minify html", err) - } - tmpReader.Close() htmlFile.Close() } - // Copy css file - templateCSSFile, err := os.OpenFile(templateCSSPath, os.O_RDONLY, 0o600) + // Copy css file from templates to generated + templateCSSBytes, err := os.ReadFile(templateCSSPath) if err != nil { log.Fatalln("Failed to open file", templateCSSPath, err) } - cssFilename := filepath.Join(generatedPath, cssFilename) - cssFile, err := os.OpenFile(cssFilename, os.O_RDWR|os.O_CREATE, 0o600) - if err != nil { - log.Fatalln("Failed to open file", cssFilename, err) - } - - if err := m.Minify(mimeTypeCSS, cssFile, templateCSSFile); err != nil { - log.Fatalln("Failed to minify css", err) + generatedCSSPath := filepath.Join(generatedPath, cssFilename) + if err := os.WriteFile(generatedCSSPath, templateCSSBytes, 0o600); err != nil { + log.Fatalln("Failed to write file", generatedCSSPath, err) } } diff --git a/templates/post.html b/templates/post.html index 47ee178..7a6b24b 100644 --- a/templates/post.html +++ b/templates/post.html @@ -6,16 +6,21 @@ + - + + Index {{.Body}}